VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp@ 92516

Last change on this file since 92516 was 92516, checked in by vboxsync, 3 years ago

VMM/CPUM,NEM: The fNestedPagingAndFullGuestExec condition in cpumR3InitCpuIdAndMsrs does not apply to NEM, as we don't do the shadow page tables there and are unlikely to have to deal with funny instructions. Any NEM restrictions should be applied selectively where needed. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 342.1 KB
Line 
1/* $Id: CPUMR3CpuId.cpp 92516 2021-11-19 20:52:38Z vboxsync $ */
2/** @file
3 * CPUM - CPU ID part.
4 */
5
6/*
7 * Copyright (C) 2013-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/nem.h>
27#include <VBox/vmm/ssm.h>
28#include "CPUMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/sup.h>
31
32#include <VBox/err.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/ctype.h>
35#include <iprt/mem.h>
36#include <iprt/string.h>
37
38
39/*********************************************************************************************************************************
40* Defined Constants And Macros *
41*********************************************************************************************************************************/
42/** For sanity and avoid wasting hyper heap on buggy config / saved state. */
43#define CPUM_CPUID_MAX_LEAVES 2048
44/** Max size we accept for the XSAVE area.
45 * @see CPUMCTX::abXSave */
46#define CPUM_MAX_XSAVE_AREA_SIZE (0x4000 - 0x300)
47/* Min size we accept for the XSAVE area. */
48#define CPUM_MIN_XSAVE_AREA_SIZE 0x240
49
50
51/*********************************************************************************************************************************
52* Global Variables *
53*********************************************************************************************************************************/
54/**
55 * The intel pentium family.
56 */
57static const CPUMMICROARCH g_aenmIntelFamily06[] =
58{
59 /* [ 0(0x00)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro A-step (says sandpile.org). */
60 /* [ 1(0x01)] = */ kCpumMicroarch_Intel_P6, /* Pentium Pro */
61 /* [ 2(0x02)] = */ kCpumMicroarch_Intel_Unknown,
62 /* [ 3(0x03)] = */ kCpumMicroarch_Intel_P6_II, /* PII Klamath */
63 /* [ 4(0x04)] = */ kCpumMicroarch_Intel_Unknown,
64 /* [ 5(0x05)] = */ kCpumMicroarch_Intel_P6_II, /* PII Deschutes */
65 /* [ 6(0x06)] = */ kCpumMicroarch_Intel_P6_II, /* Celeron Mendocino. */
66 /* [ 7(0x07)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Katmai. */
67 /* [ 8(0x08)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Coppermine (includes Celeron). */
68 /* [ 9(0x09)] = */ kCpumMicroarch_Intel_P6_M_Banias, /* Pentium/Celeron M Banias. */
69 /* [10(0x0a)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Xeon */
70 /* [11(0x0b)] = */ kCpumMicroarch_Intel_P6_III, /* PIII Tualatin (includes Celeron). */
71 /* [12(0x0c)] = */ kCpumMicroarch_Intel_Unknown,
72 /* [13(0x0d)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Pentium/Celeron M Dothan. */
73 /* [14(0x0e)] = */ kCpumMicroarch_Intel_Core_Yonah, /* Core Yonah (Enhanced Pentium M). */
74 /* [15(0x0f)] = */ kCpumMicroarch_Intel_Core2_Merom, /* Merom */
75 /* [16(0x10)] = */ kCpumMicroarch_Intel_Unknown,
76 /* [17(0x11)] = */ kCpumMicroarch_Intel_Unknown,
77 /* [18(0x12)] = */ kCpumMicroarch_Intel_Unknown,
78 /* [19(0x13)] = */ kCpumMicroarch_Intel_Unknown,
79 /* [20(0x14)] = */ kCpumMicroarch_Intel_Unknown,
80 /* [21(0x15)] = */ kCpumMicroarch_Intel_P6_M_Dothan, /* Tolapai - System-on-a-chip. */
81 /* [22(0x16)] = */ kCpumMicroarch_Intel_Core2_Merom,
82 /* [23(0x17)] = */ kCpumMicroarch_Intel_Core2_Penryn,
83 /* [24(0x18)] = */ kCpumMicroarch_Intel_Unknown,
84 /* [25(0x19)] = */ kCpumMicroarch_Intel_Unknown,
85 /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Nehalem-EP */
86 /* [27(0x1b)] = */ kCpumMicroarch_Intel_Unknown,
87 /* [28(0x1c)] = */ kCpumMicroarch_Intel_Atom_Bonnell, /* Diamonville, Pineview, */
88 /* [29(0x1d)] = */ kCpumMicroarch_Intel_Core2_Penryn,
89 /* [30(0x1e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Clarksfield, Lynnfield, Jasper Forest. */
90 /* [31(0x1f)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Only listed by sandpile.org. 2 cores ABD/HVD, whatever that means. */
91 /* [32(0x20)] = */ kCpumMicroarch_Intel_Unknown,
92 /* [33(0x21)] = */ kCpumMicroarch_Intel_Unknown,
93 /* [34(0x22)] = */ kCpumMicroarch_Intel_Unknown,
94 /* [35(0x23)] = */ kCpumMicroarch_Intel_Unknown,
95 /* [36(0x24)] = */ kCpumMicroarch_Intel_Unknown,
96 /* [37(0x25)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Arrandale, Clarksdale. */
97 /* [38(0x26)] = */ kCpumMicroarch_Intel_Atom_Lincroft,
98 /* [39(0x27)] = */ kCpumMicroarch_Intel_Atom_Saltwell,
99 /* [40(0x28)] = */ kCpumMicroarch_Intel_Unknown,
100 /* [41(0x29)] = */ kCpumMicroarch_Intel_Unknown,
101 /* [42(0x2a)] = */ kCpumMicroarch_Intel_Core7_SandyBridge,
102 /* [43(0x2b)] = */ kCpumMicroarch_Intel_Unknown,
103 /* [44(0x2c)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Gulftown, Westmere-EP. */
104 /* [45(0x2d)] = */ kCpumMicroarch_Intel_Core7_SandyBridge, /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP. */
105 /* [46(0x2e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Beckton (Xeon). */
106 /* [47(0x2f)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Westmere-EX. */
107 /* [48(0x30)] = */ kCpumMicroarch_Intel_Unknown,
108 /* [49(0x31)] = */ kCpumMicroarch_Intel_Unknown,
109 /* [50(0x32)] = */ kCpumMicroarch_Intel_Unknown,
110 /* [51(0x33)] = */ kCpumMicroarch_Intel_Unknown,
111 /* [52(0x34)] = */ kCpumMicroarch_Intel_Unknown,
112 /* [53(0x35)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* ?? */
113 /* [54(0x36)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* Cedarview, ++ */
114 /* [55(0x37)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
115 /* [56(0x38)] = */ kCpumMicroarch_Intel_Unknown,
116 /* [57(0x39)] = */ kCpumMicroarch_Intel_Unknown,
117 /* [58(0x3a)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
118 /* [59(0x3b)] = */ kCpumMicroarch_Intel_Unknown,
119 /* [60(0x3c)] = */ kCpumMicroarch_Intel_Core7_Haswell,
120 /* [61(0x3d)] = */ kCpumMicroarch_Intel_Core7_Broadwell,
121 /* [62(0x3e)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
122 /* [63(0x3f)] = */ kCpumMicroarch_Intel_Core7_Haswell,
123 /* [64(0x40)] = */ kCpumMicroarch_Intel_Unknown,
124 /* [65(0x41)] = */ kCpumMicroarch_Intel_Unknown,
125 /* [66(0x42)] = */ kCpumMicroarch_Intel_Unknown,
126 /* [67(0x43)] = */ kCpumMicroarch_Intel_Unknown,
127 /* [68(0x44)] = */ kCpumMicroarch_Intel_Unknown,
128 /* [69(0x45)] = */ kCpumMicroarch_Intel_Core7_Haswell,
129 /* [70(0x46)] = */ kCpumMicroarch_Intel_Core7_Haswell,
130 /* [71(0x47)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* i7-5775C */
131 /* [72(0x48)] = */ kCpumMicroarch_Intel_Unknown,
132 /* [73(0x49)] = */ kCpumMicroarch_Intel_Unknown,
133 /* [74(0x4a)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
134 /* [75(0x4b)] = */ kCpumMicroarch_Intel_Unknown,
135 /* [76(0x4c)] = */ kCpumMicroarch_Intel_Atom_Airmount,
136 /* [77(0x4d)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
137 /* [78(0x4e)] = */ kCpumMicroarch_Intel_Core7_Skylake,
138 /* [79(0x4f)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* Broadwell-E */
139 /* [80(0x50)] = */ kCpumMicroarch_Intel_Unknown,
140 /* [81(0x51)] = */ kCpumMicroarch_Intel_Unknown,
141 /* [82(0x52)] = */ kCpumMicroarch_Intel_Unknown,
142 /* [83(0x53)] = */ kCpumMicroarch_Intel_Unknown,
143 /* [84(0x54)] = */ kCpumMicroarch_Intel_Unknown,
144 /* [85(0x55)] = */ kCpumMicroarch_Intel_Core7_Skylake, /* server cpu; skylake <= 4, cascade lake > 5 */
145 /* [86(0x56)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* Xeon D-1540, Broadwell-DE */
146 /* [87(0x57)] = */ kCpumMicroarch_Intel_Phi_KnightsLanding,
147 /* [88(0x58)] = */ kCpumMicroarch_Intel_Unknown,
148 /* [89(0x59)] = */ kCpumMicroarch_Intel_Unknown,
149 /* [90(0x5a)] = */ kCpumMicroarch_Intel_Atom_Silvermont, /* Moorefield */
150 /* [91(0x5b)] = */ kCpumMicroarch_Intel_Unknown,
151 /* [92(0x5c)] = */ kCpumMicroarch_Intel_Atom_Goldmont, /* Apollo Lake */
152 /* [93(0x5d)] = */ kCpumMicroarch_Intel_Atom_Silvermont, /* x3-C3230 */
153 /* [94(0x5e)] = */ kCpumMicroarch_Intel_Core7_Skylake, /* i7-6700K */
154 /* [95(0x5f)] = */ kCpumMicroarch_Intel_Atom_Goldmont, /* Denverton */
155 /* [96(0x60)] = */ kCpumMicroarch_Intel_Unknown,
156 /* [97(0x61)] = */ kCpumMicroarch_Intel_Unknown,
157 /* [98(0x62)] = */ kCpumMicroarch_Intel_Unknown,
158 /* [99(0x63)] = */ kCpumMicroarch_Intel_Unknown,
159 /*[100(0x64)] = */ kCpumMicroarch_Intel_Unknown,
160 /*[101(0x65)] = */ kCpumMicroarch_Intel_Atom_Silvermont, /* SoFIA */
161 /*[102(0x66)] = */ kCpumMicroarch_Intel_Core7_CannonLake, /* unconfirmed */
162 /*[103(0x67)] = */ kCpumMicroarch_Intel_Unknown,
163 /*[104(0x68)] = */ kCpumMicroarch_Intel_Unknown,
164 /*[105(0x69)] = */ kCpumMicroarch_Intel_Unknown,
165 /*[106(0x6a)] = */ kCpumMicroarch_Intel_Core7_IceLake, /* unconfirmed server */
166 /*[107(0x6b)] = */ kCpumMicroarch_Intel_Unknown,
167 /*[108(0x6c)] = */ kCpumMicroarch_Intel_Core7_IceLake, /* unconfirmed server */
168 /*[109(0x6d)] = */ kCpumMicroarch_Intel_Unknown,
169 /*[110(0x6e)] = */ kCpumMicroarch_Intel_Atom_Airmount, /* or silvermount? */
170 /*[111(0x6f)] = */ kCpumMicroarch_Intel_Unknown,
171 /*[112(0x70)] = */ kCpumMicroarch_Intel_Unknown,
172 /*[113(0x71)] = */ kCpumMicroarch_Intel_Unknown,
173 /*[114(0x72)] = */ kCpumMicroarch_Intel_Unknown,
174 /*[115(0x73)] = */ kCpumMicroarch_Intel_Unknown,
175 /*[116(0x74)] = */ kCpumMicroarch_Intel_Unknown,
176 /*[117(0x75)] = */ kCpumMicroarch_Intel_Atom_Airmount, /* or silvermount? */
177 /*[118(0x76)] = */ kCpumMicroarch_Intel_Unknown,
178 /*[119(0x77)] = */ kCpumMicroarch_Intel_Unknown,
179 /*[120(0x78)] = */ kCpumMicroarch_Intel_Unknown,
180 /*[121(0x79)] = */ kCpumMicroarch_Intel_Unknown,
181 /*[122(0x7a)] = */ kCpumMicroarch_Intel_Atom_GoldmontPlus,
182 /*[123(0x7b)] = */ kCpumMicroarch_Intel_Unknown,
183 /*[124(0x7c)] = */ kCpumMicroarch_Intel_Unknown,
184 /*[125(0x7d)] = */ kCpumMicroarch_Intel_Core7_IceLake, /* unconfirmed */
185 /*[126(0x7e)] = */ kCpumMicroarch_Intel_Core7_IceLake, /* unconfirmed */
186 /*[127(0x7f)] = */ kCpumMicroarch_Intel_Unknown,
187 /*[128(0x80)] = */ kCpumMicroarch_Intel_Unknown,
188 /*[129(0x81)] = */ kCpumMicroarch_Intel_Unknown,
189 /*[130(0x82)] = */ kCpumMicroarch_Intel_Unknown,
190 /*[131(0x83)] = */ kCpumMicroarch_Intel_Unknown,
191 /*[132(0x84)] = */ kCpumMicroarch_Intel_Unknown,
192 /*[133(0x85)] = */ kCpumMicroarch_Intel_Phi_KnightsMill,
193 /*[134(0x86)] = */ kCpumMicroarch_Intel_Unknown,
194 /*[135(0x87)] = */ kCpumMicroarch_Intel_Unknown,
195 /*[136(0x88)] = */ kCpumMicroarch_Intel_Unknown,
196 /*[137(0x89)] = */ kCpumMicroarch_Intel_Unknown,
197 /*[138(0x8a)] = */ kCpumMicroarch_Intel_Unknown,
198 /*[139(0x8b)] = */ kCpumMicroarch_Intel_Unknown,
199 /*[140(0x8c)] = */ kCpumMicroarch_Intel_Core7_TigerLake, /* 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz (bird) */
200 /*[141(0x8d)] = */ kCpumMicroarch_Intel_Core7_TigerLake, /* unconfirmed */
201 /*[142(0x8e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping >= 0xB is Whiskey Lake, 0xA is CoffeeLake. */
202 /*[143(0x8f)] = */ kCpumMicroarch_Intel_Core7_SapphireRapids,
203 /*[144(0x90)] = */ kCpumMicroarch_Intel_Unknown,
204 /*[145(0x91)] = */ kCpumMicroarch_Intel_Unknown,
205 /*[146(0x92)] = */ kCpumMicroarch_Intel_Unknown,
206 /*[147(0x93)] = */ kCpumMicroarch_Intel_Unknown,
207 /*[148(0x94)] = */ kCpumMicroarch_Intel_Unknown,
208 /*[149(0x95)] = */ kCpumMicroarch_Intel_Unknown,
209 /*[150(0x96)] = */ kCpumMicroarch_Intel_Unknown,
210 /*[151(0x97)] = */ kCpumMicroarch_Intel_Core7_AlderLake, /* unconfirmed, unreleased */
211 /*[152(0x98)] = */ kCpumMicroarch_Intel_Unknown,
212 /*[153(0x99)] = */ kCpumMicroarch_Intel_Unknown,
213 /*[154(0x9a)] = */ kCpumMicroarch_Intel_Core7_AlderLake, /* unconfirmed, unreleased */
214 /*[155(0x9b)] = */ kCpumMicroarch_Intel_Unknown,
215 /*[156(0x9c)] = */ kCpumMicroarch_Intel_Unknown,
216 /*[157(0x9d)] = */ kCpumMicroarch_Intel_Unknown,
217 /*[158(0x9e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping >= 0xB is Whiskey Lake, 0xA is CoffeeLake. */
218 /*[159(0x9f)] = */ kCpumMicroarch_Intel_Unknown,
219 /*[160(0xa0)] = */ kCpumMicroarch_Intel_Unknown,
220 /*[161(0xa1)] = */ kCpumMicroarch_Intel_Unknown,
221 /*[162(0xa2)] = */ kCpumMicroarch_Intel_Unknown,
222 /*[163(0xa3)] = */ kCpumMicroarch_Intel_Unknown,
223 /*[164(0xa4)] = */ kCpumMicroarch_Intel_Unknown,
224 /*[165(0xa5)] = */ kCpumMicroarch_Intel_Core7_CometLake, /* unconfirmed */
225 /*[166(0xa6)] = */ kCpumMicroarch_Intel_Unknown,
226 /*[167(0xa7)] = */ kCpumMicroarch_Intel_Core7_CypressCove, /* 14nm backport, unconfirmed */
227};
228AssertCompile(RT_ELEMENTS(g_aenmIntelFamily06) == 0xa7+1);
229
230
231/**
232 * Figures out the (sub-)micro architecture given a bit of CPUID info.
233 *
234 * @returns Micro architecture.
235 * @param enmVendor The CPU vendor.
236 * @param bFamily The CPU family.
237 * @param bModel The CPU model.
238 * @param bStepping The CPU stepping.
239 */
240VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
241 uint8_t bModel, uint8_t bStepping)
242{
243 if (enmVendor == CPUMCPUVENDOR_AMD)
244 {
245 switch (bFamily)
246 {
247 case 0x02: return kCpumMicroarch_AMD_Am286; /* Not really kosher... */
248 case 0x03: return kCpumMicroarch_AMD_Am386;
249 case 0x23: return kCpumMicroarch_AMD_Am386; /* SX*/
250 case 0x04: return bModel < 14 ? kCpumMicroarch_AMD_Am486 : kCpumMicroarch_AMD_Am486Enh;
251 case 0x05: return bModel < 6 ? kCpumMicroarch_AMD_K5 : kCpumMicroarch_AMD_K6; /* Genode LX is 0x0a, lump it with K6. */
252 case 0x06:
253 switch (bModel)
254 {
255 case 0: return kCpumMicroarch_AMD_K7_Palomino;
256 case 1: return kCpumMicroarch_AMD_K7_Palomino;
257 case 2: return kCpumMicroarch_AMD_K7_Palomino;
258 case 3: return kCpumMicroarch_AMD_K7_Spitfire;
259 case 4: return kCpumMicroarch_AMD_K7_Thunderbird;
260 case 6: return kCpumMicroarch_AMD_K7_Palomino;
261 case 7: return kCpumMicroarch_AMD_K7_Morgan;
262 case 8: return kCpumMicroarch_AMD_K7_Thoroughbred;
263 case 10: return kCpumMicroarch_AMD_K7_Barton; /* Thorton too. */
264 }
265 return kCpumMicroarch_AMD_K7_Unknown;
266 case 0x0f:
267 /*
268 * This family is a friggin mess. Trying my best to make some
269 * sense out of it. Too much happened in the 0x0f family to
270 * lump it all together as K8 (130nm->90nm->65nm, AMD-V, ++).
271 *
272 * Emperical CPUID.01h.EAX evidence from revision guides, wikipedia,
273 * cpu-world.com, and other places:
274 * - 130nm:
275 * - ClawHammer: F7A/SH-CG, F5A/-CG, F4A/-CG, F50/-B0, F48/-C0, F58/-C0,
276 * - SledgeHammer: F50/SH-B0, F48/-C0, F58/-C0, F4A/-CG, F5A/-CG, F7A/-CG, F51/-B3
277 * - Newcastle: FC0/DH-CG (erratum #180: FE0/DH-CG), FF0/DH-CG
278 * - Dublin: FC0/-CG, FF0/-CG, F82/CH-CG, F4A/-CG, F48/SH-C0,
279 * - Odessa: FC0/DH-CG (erratum #180: FE0/DH-CG)
280 * - Paris: FF0/DH-CG, FC0/DH-CG (erratum #180: FE0/DH-CG),
281 * - 90nm:
282 * - Winchester: 10FF0/DH-D0, 20FF0/DH-E3.
283 * - Oakville: 10FC0/DH-D0.
284 * - Georgetown: 10FC0/DH-D0.
285 * - Sonora: 10FC0/DH-D0.
286 * - Venus: 20F71/SH-E4
287 * - Troy: 20F51/SH-E4
288 * - Athens: 20F51/SH-E4
289 * - San Diego: 20F71/SH-E4.
290 * - Lancaster: 20F42/SH-E5
291 * - Newark: 20F42/SH-E5.
292 * - Albany: 20FC2/DH-E6.
293 * - Roma: 20FC2/DH-E6.
294 * - Venice: 20FF0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6.
295 * - Palermo: 10FC0/DH-D0, 20FF0/DH-E3, 20FC0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6
296 * - 90nm introducing Dual core:
297 * - Denmark: 20F30/JH-E1, 20F32/JH-E6
298 * - Italy: 20F10/JH-E1, 20F12/JH-E6
299 * - Egypt: 20F10/JH-E1, 20F12/JH-E6
300 * - Toledo: 20F32/JH-E6, 30F72/DH-E6 (single code variant).
301 * - Manchester: 20FB1/BH-E4, 30FF2/BH-E4.
302 * - 90nm 2nd gen opteron ++, AMD-V introduced (might be missing in some cheaper models):
303 * - Santa Ana: 40F32/JH-F2, /-F3
304 * - Santa Rosa: 40F12/JH-F2, 40F13/JH-F3
305 * - Windsor: 40F32/JH-F2, 40F33/JH-F3, C0F13/JH-F3, 40FB2/BH-F2, ??20FB1/BH-E4??.
306 * - Manila: 50FF2/DH-F2, 40FF2/DH-F2
307 * - Orleans: 40FF2/DH-F2, 50FF2/DH-F2, 50FF3/DH-F3.
308 * - Keene: 40FC2/DH-F2.
309 * - Richmond: 40FC2/DH-F2
310 * - Taylor: 40F82/BH-F2
311 * - Trinidad: 40F82/BH-F2
312 *
313 * - 65nm:
314 * - Brisbane: 60FB1/BH-G1, 60FB2/BH-G2.
315 * - Tyler: 60F81/BH-G1, 60F82/BH-G2.
316 * - Sparta: 70FF1/DH-G1, 70FF2/DH-G2.
317 * - Lima: 70FF1/DH-G1, 70FF2/DH-G2.
318 * - Sherman: /-G1, 70FC2/DH-G2.
319 * - Huron: 70FF2/DH-G2.
320 */
321 if (bModel < 0x10)
322 return kCpumMicroarch_AMD_K8_130nm;
323 if (bModel >= 0x60 && bModel < 0x80)
324 return kCpumMicroarch_AMD_K8_65nm;
325 if (bModel >= 0x40)
326 return kCpumMicroarch_AMD_K8_90nm_AMDV;
327 switch (bModel)
328 {
329 case 0x21:
330 case 0x23:
331 case 0x2b:
332 case 0x2f:
333 case 0x37:
334 case 0x3f:
335 return kCpumMicroarch_AMD_K8_90nm_DualCore;
336 }
337 return kCpumMicroarch_AMD_K8_90nm;
338 case 0x10:
339 return kCpumMicroarch_AMD_K10;
340 case 0x11:
341 return kCpumMicroarch_AMD_K10_Lion;
342 case 0x12:
343 return kCpumMicroarch_AMD_K10_Llano;
344 case 0x14:
345 return kCpumMicroarch_AMD_Bobcat;
346 case 0x15:
347 switch (bModel)
348 {
349 case 0x00: return kCpumMicroarch_AMD_15h_Bulldozer; /* Any? prerelease? */
350 case 0x01: return kCpumMicroarch_AMD_15h_Bulldozer; /* Opteron 4200, FX-81xx. */
351 case 0x02: return kCpumMicroarch_AMD_15h_Piledriver; /* Opteron 4300, FX-83xx. */
352 case 0x10: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-5800K for e.g. */
353 case 0x11: /* ?? */
354 case 0x12: /* ?? */
355 case 0x13: return kCpumMicroarch_AMD_15h_Piledriver; /* A10-6800K for e.g. */
356 }
357 return kCpumMicroarch_AMD_15h_Unknown;
358 case 0x16:
359 return kCpumMicroarch_AMD_Jaguar;
360 case 0x17:
361 return kCpumMicroarch_AMD_Zen_Ryzen;
362 }
363 return kCpumMicroarch_AMD_Unknown;
364 }
365
366 if (enmVendor == CPUMCPUVENDOR_INTEL)
367 {
368 switch (bFamily)
369 {
370 case 3:
371 return kCpumMicroarch_Intel_80386;
372 case 4:
373 return kCpumMicroarch_Intel_80486;
374 case 5:
375 return kCpumMicroarch_Intel_P5;
376 case 6:
377 if (bModel < RT_ELEMENTS(g_aenmIntelFamily06))
378 {
379 CPUMMICROARCH enmMicroArch = g_aenmIntelFamily06[bModel];
380 if (enmMicroArch == kCpumMicroarch_Intel_Core7_KabyLake)
381 {
382 if (bStepping >= 0xa && bStepping <= 0xc)
383 enmMicroArch = kCpumMicroarch_Intel_Core7_CoffeeLake;
384 else if (bStepping >= 0xc)
385 enmMicroArch = kCpumMicroarch_Intel_Core7_WhiskeyLake;
386 }
387 else if ( enmMicroArch == kCpumMicroarch_Intel_Core7_Skylake
388 && bModel == 0x55
389 && bStepping >= 5)
390 enmMicroArch = kCpumMicroarch_Intel_Core7_CascadeLake;
391 return enmMicroArch;
392 }
393 return kCpumMicroarch_Intel_Atom_Unknown;
394 case 15:
395 switch (bModel)
396 {
397 case 0: return kCpumMicroarch_Intel_NB_Willamette;
398 case 1: return kCpumMicroarch_Intel_NB_Willamette;
399 case 2: return kCpumMicroarch_Intel_NB_Northwood;
400 case 3: return kCpumMicroarch_Intel_NB_Prescott;
401 case 4: return kCpumMicroarch_Intel_NB_Prescott2M; /* ?? */
402 case 5: return kCpumMicroarch_Intel_NB_Unknown; /*??*/
403 case 6: return kCpumMicroarch_Intel_NB_CedarMill;
404 case 7: return kCpumMicroarch_Intel_NB_Gallatin;
405 default: return kCpumMicroarch_Intel_NB_Unknown;
406 }
407 break;
408 /* The following are not kosher but kind of follow intuitively from 6, 5 & 4. */
409 case 0:
410 return kCpumMicroarch_Intel_8086;
411 case 1:
412 return kCpumMicroarch_Intel_80186;
413 case 2:
414 return kCpumMicroarch_Intel_80286;
415 }
416 return kCpumMicroarch_Intel_Unknown;
417 }
418
419 if (enmVendor == CPUMCPUVENDOR_VIA)
420 {
421 switch (bFamily)
422 {
423 case 5:
424 switch (bModel)
425 {
426 case 1: return kCpumMicroarch_Centaur_C6;
427 case 4: return kCpumMicroarch_Centaur_C6;
428 case 8: return kCpumMicroarch_Centaur_C2;
429 case 9: return kCpumMicroarch_Centaur_C3;
430 }
431 break;
432
433 case 6:
434 switch (bModel)
435 {
436 case 5: return kCpumMicroarch_VIA_C3_M2;
437 case 6: return kCpumMicroarch_VIA_C3_C5A;
438 case 7: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5B : kCpumMicroarch_VIA_C3_C5C;
439 case 8: return kCpumMicroarch_VIA_C3_C5N;
440 case 9: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5XL : kCpumMicroarch_VIA_C3_C5P;
441 case 10: return kCpumMicroarch_VIA_C7_C5J;
442 case 15: return kCpumMicroarch_VIA_Isaiah;
443 }
444 break;
445 }
446 return kCpumMicroarch_VIA_Unknown;
447 }
448
449 if (enmVendor == CPUMCPUVENDOR_SHANGHAI)
450 {
451 switch (bFamily)
452 {
453 case 6:
454 case 7:
455 return kCpumMicroarch_Shanghai_Wudaokou;
456 default:
457 break;
458 }
459 return kCpumMicroarch_Shanghai_Unknown;
460 }
461
462 if (enmVendor == CPUMCPUVENDOR_CYRIX)
463 {
464 switch (bFamily)
465 {
466 case 4:
467 switch (bModel)
468 {
469 case 9: return kCpumMicroarch_Cyrix_5x86;
470 }
471 break;
472
473 case 5:
474 switch (bModel)
475 {
476 case 2: return kCpumMicroarch_Cyrix_M1;
477 case 4: return kCpumMicroarch_Cyrix_MediaGX;
478 case 5: return kCpumMicroarch_Cyrix_MediaGXm;
479 }
480 break;
481
482 case 6:
483 switch (bModel)
484 {
485 case 0: return kCpumMicroarch_Cyrix_M2;
486 }
487 break;
488
489 }
490 return kCpumMicroarch_Cyrix_Unknown;
491 }
492
493 if (enmVendor == CPUMCPUVENDOR_HYGON)
494 {
495 switch (bFamily)
496 {
497 case 0x18:
498 return kCpumMicroarch_Hygon_Dhyana;
499 default:
500 break;
501 }
502 return kCpumMicroarch_Hygon_Unknown;
503 }
504
505 return kCpumMicroarch_Unknown;
506}
507
508
509/**
510 * Translates a microarchitecture enum value to the corresponding string
511 * constant.
512 *
513 * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
514 * NULL if the value is invalid.
515 *
516 * @param enmMicroarch The enum value to convert.
517 */
518VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch)
519{
520 switch (enmMicroarch)
521 {
522#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
523 CASE_RET_STR(kCpumMicroarch_Intel_8086);
524 CASE_RET_STR(kCpumMicroarch_Intel_80186);
525 CASE_RET_STR(kCpumMicroarch_Intel_80286);
526 CASE_RET_STR(kCpumMicroarch_Intel_80386);
527 CASE_RET_STR(kCpumMicroarch_Intel_80486);
528 CASE_RET_STR(kCpumMicroarch_Intel_P5);
529
530 CASE_RET_STR(kCpumMicroarch_Intel_P6);
531 CASE_RET_STR(kCpumMicroarch_Intel_P6_II);
532 CASE_RET_STR(kCpumMicroarch_Intel_P6_III);
533
534 CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Banias);
535 CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Dothan);
536 CASE_RET_STR(kCpumMicroarch_Intel_Core_Yonah);
537
538 CASE_RET_STR(kCpumMicroarch_Intel_Core2_Merom);
539 CASE_RET_STR(kCpumMicroarch_Intel_Core2_Penryn);
540
541 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Nehalem);
542 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Westmere);
543 CASE_RET_STR(kCpumMicroarch_Intel_Core7_SandyBridge);
544 CASE_RET_STR(kCpumMicroarch_Intel_Core7_IvyBridge);
545 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Haswell);
546 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Broadwell);
547 CASE_RET_STR(kCpumMicroarch_Intel_Core7_Skylake);
548 CASE_RET_STR(kCpumMicroarch_Intel_Core7_KabyLake);
549 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CoffeeLake);
550 CASE_RET_STR(kCpumMicroarch_Intel_Core7_WhiskeyLake);
551 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CascadeLake);
552 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CannonLake);
553 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CometLake);
554 CASE_RET_STR(kCpumMicroarch_Intel_Core7_IceLake);
555 CASE_RET_STR(kCpumMicroarch_Intel_Core7_RocketLake);
556 CASE_RET_STR(kCpumMicroarch_Intel_Core7_TigerLake);
557 CASE_RET_STR(kCpumMicroarch_Intel_Core7_AlderLake);
558 CASE_RET_STR(kCpumMicroarch_Intel_Core7_SapphireRapids);
559
560 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Bonnell);
561 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Lincroft);
562 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Saltwell);
563 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Silvermont);
564 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Airmount);
565 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Goldmont);
566 CASE_RET_STR(kCpumMicroarch_Intel_Atom_GoldmontPlus);
567 CASE_RET_STR(kCpumMicroarch_Intel_Atom_Unknown);
568
569 CASE_RET_STR(kCpumMicroarch_Intel_Phi_KnightsFerry);
570 CASE_RET_STR(kCpumMicroarch_Intel_Phi_KnightsCorner);
571 CASE_RET_STR(kCpumMicroarch_Intel_Phi_KnightsLanding);
572 CASE_RET_STR(kCpumMicroarch_Intel_Phi_KnightsHill);
573 CASE_RET_STR(kCpumMicroarch_Intel_Phi_KnightsMill);
574
575 CASE_RET_STR(kCpumMicroarch_Intel_NB_Willamette);
576 CASE_RET_STR(kCpumMicroarch_Intel_NB_Northwood);
577 CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott);
578 CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott2M);
579 CASE_RET_STR(kCpumMicroarch_Intel_NB_CedarMill);
580 CASE_RET_STR(kCpumMicroarch_Intel_NB_Gallatin);
581 CASE_RET_STR(kCpumMicroarch_Intel_NB_Unknown);
582
583 CASE_RET_STR(kCpumMicroarch_Intel_Unknown);
584
585 CASE_RET_STR(kCpumMicroarch_AMD_Am286);
586 CASE_RET_STR(kCpumMicroarch_AMD_Am386);
587 CASE_RET_STR(kCpumMicroarch_AMD_Am486);
588 CASE_RET_STR(kCpumMicroarch_AMD_Am486Enh);
589 CASE_RET_STR(kCpumMicroarch_AMD_K5);
590 CASE_RET_STR(kCpumMicroarch_AMD_K6);
591
592 CASE_RET_STR(kCpumMicroarch_AMD_K7_Palomino);
593 CASE_RET_STR(kCpumMicroarch_AMD_K7_Spitfire);
594 CASE_RET_STR(kCpumMicroarch_AMD_K7_Thunderbird);
595 CASE_RET_STR(kCpumMicroarch_AMD_K7_Morgan);
596 CASE_RET_STR(kCpumMicroarch_AMD_K7_Thoroughbred);
597 CASE_RET_STR(kCpumMicroarch_AMD_K7_Barton);
598 CASE_RET_STR(kCpumMicroarch_AMD_K7_Unknown);
599
600 CASE_RET_STR(kCpumMicroarch_AMD_K8_130nm);
601 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm);
602 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_DualCore);
603 CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_AMDV);
604 CASE_RET_STR(kCpumMicroarch_AMD_K8_65nm);
605
606 CASE_RET_STR(kCpumMicroarch_AMD_K10);
607 CASE_RET_STR(kCpumMicroarch_AMD_K10_Lion);
608 CASE_RET_STR(kCpumMicroarch_AMD_K10_Llano);
609 CASE_RET_STR(kCpumMicroarch_AMD_Bobcat);
610 CASE_RET_STR(kCpumMicroarch_AMD_Jaguar);
611
612 CASE_RET_STR(kCpumMicroarch_AMD_15h_Bulldozer);
613 CASE_RET_STR(kCpumMicroarch_AMD_15h_Piledriver);
614 CASE_RET_STR(kCpumMicroarch_AMD_15h_Steamroller);
615 CASE_RET_STR(kCpumMicroarch_AMD_15h_Excavator);
616 CASE_RET_STR(kCpumMicroarch_AMD_15h_Unknown);
617
618 CASE_RET_STR(kCpumMicroarch_AMD_16h_First);
619
620 CASE_RET_STR(kCpumMicroarch_AMD_Zen_Ryzen);
621
622 CASE_RET_STR(kCpumMicroarch_AMD_Unknown);
623
624 CASE_RET_STR(kCpumMicroarch_Hygon_Dhyana);
625 CASE_RET_STR(kCpumMicroarch_Hygon_Unknown);
626
627 CASE_RET_STR(kCpumMicroarch_Centaur_C6);
628 CASE_RET_STR(kCpumMicroarch_Centaur_C2);
629 CASE_RET_STR(kCpumMicroarch_Centaur_C3);
630 CASE_RET_STR(kCpumMicroarch_VIA_C3_M2);
631 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5A);
632 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5B);
633 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5C);
634 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5N);
635 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5XL);
636 CASE_RET_STR(kCpumMicroarch_VIA_C3_C5P);
637 CASE_RET_STR(kCpumMicroarch_VIA_C7_C5J);
638 CASE_RET_STR(kCpumMicroarch_VIA_Isaiah);
639 CASE_RET_STR(kCpumMicroarch_VIA_Unknown);
640
641 CASE_RET_STR(kCpumMicroarch_Shanghai_Wudaokou);
642 CASE_RET_STR(kCpumMicroarch_Shanghai_Unknown);
643
644 CASE_RET_STR(kCpumMicroarch_Cyrix_5x86);
645 CASE_RET_STR(kCpumMicroarch_Cyrix_M1);
646 CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGX);
647 CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGXm);
648 CASE_RET_STR(kCpumMicroarch_Cyrix_M2);
649 CASE_RET_STR(kCpumMicroarch_Cyrix_Unknown);
650
651 CASE_RET_STR(kCpumMicroarch_NEC_V20);
652 CASE_RET_STR(kCpumMicroarch_NEC_V30);
653
654 CASE_RET_STR(kCpumMicroarch_Unknown);
655
656#undef CASE_RET_STR
657 case kCpumMicroarch_Invalid:
658 case kCpumMicroarch_Intel_End:
659 case kCpumMicroarch_Intel_Core2_End:
660 case kCpumMicroarch_Intel_Core7_End:
661 case kCpumMicroarch_Intel_Atom_End:
662 case kCpumMicroarch_Intel_P6_Core_Atom_End:
663 case kCpumMicroarch_Intel_Phi_End:
664 case kCpumMicroarch_Intel_NB_End:
665 case kCpumMicroarch_AMD_K7_End:
666 case kCpumMicroarch_AMD_K8_End:
667 case kCpumMicroarch_AMD_15h_End:
668 case kCpumMicroarch_AMD_16h_End:
669 case kCpumMicroarch_AMD_Zen_End:
670 case kCpumMicroarch_AMD_End:
671 case kCpumMicroarch_Hygon_End:
672 case kCpumMicroarch_VIA_End:
673 case kCpumMicroarch_Shanghai_End:
674 case kCpumMicroarch_Cyrix_End:
675 case kCpumMicroarch_NEC_End:
676 case kCpumMicroarch_32BitHack:
677 break;
678 /* no default! */
679 }
680
681 return NULL;
682}
683
684
685/**
686 * Determins the host CPU MXCSR mask.
687 *
688 * @returns MXCSR mask.
689 */
690VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void)
691{
692 if ( ASMHasCpuId()
693 && ASMIsValidStdRange(ASMCpuId_EAX(0))
694 && ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_FXSR)
695 {
696 uint8_t volatile abBuf[sizeof(X86FXSTATE) + 64];
697 PX86FXSTATE pState = (PX86FXSTATE)&abBuf[64 - ((uintptr_t)&abBuf[0] & 63)];
698 RT_ZERO(*pState);
699 ASMFxSave(pState);
700 if (pState->MXCSR_MASK == 0)
701 return 0xffbf;
702 return pState->MXCSR_MASK;
703 }
704 return 0;
705}
706
707
708/**
709 * Gets a matching leaf in the CPUID leaf array.
710 *
711 * @returns Pointer to the matching leaf, or NULL if not found.
712 * @param paLeaves The CPUID leaves to search. This is sorted.
713 * @param cLeaves The number of leaves in the array.
714 * @param uLeaf The leaf to locate.
715 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
716 */
717static PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf)
718{
719 /* Lazy bird does linear lookup here since this is only used for the
720 occational CPUID overrides. */
721 for (uint32_t i = 0; i < cLeaves; i++)
722 if ( paLeaves[i].uLeaf == uLeaf
723 && paLeaves[i].uSubLeaf == (uSubLeaf & paLeaves[i].fSubLeafMask))
724 return &paLeaves[i];
725 return NULL;
726}
727
728
729#ifndef IN_VBOX_CPU_REPORT
730/**
731 * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
732 *
733 * @returns true if found, false it not.
734 * @param paLeaves The CPUID leaves to search. This is sorted.
735 * @param cLeaves The number of leaves in the array.
736 * @param uLeaf The leaf to locate.
737 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
738 * @param pLegacy The legacy output leaf.
739 */
740static bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
741 PCPUMCPUID pLegacy)
742{
743 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, uLeaf, uSubLeaf);
744 if (pLeaf)
745 {
746 pLegacy->uEax = pLeaf->uEax;
747 pLegacy->uEbx = pLeaf->uEbx;
748 pLegacy->uEcx = pLeaf->uEcx;
749 pLegacy->uEdx = pLeaf->uEdx;
750 return true;
751 }
752 return false;
753}
754#endif /* IN_VBOX_CPU_REPORT */
755
756
757/**
758 * Ensures that the CPUID leaf array can hold one more leaf.
759 *
760 * @returns Pointer to the CPUID leaf array (*ppaLeaves) on success. NULL on
761 * failure.
762 * @param pVM The cross context VM structure. If NULL, use
763 * the process heap, otherwise the VM's hyper heap.
764 * @param ppaLeaves Pointer to the variable holding the array pointer
765 * (input/output).
766 * @param cLeaves The current array size.
767 *
768 * @remarks This function will automatically update the R0 and RC pointers when
769 * using the hyper heap, which means @a ppaLeaves and @a cLeaves must
770 * be the corresponding VM's CPUID arrays (which is asserted).
771 */
772static PCPUMCPUIDLEAF cpumR3CpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves)
773{
774 /*
775 * If pVM is not specified, we're on the regular heap and can waste a
776 * little space to speed things up.
777 */
778 uint32_t cAllocated;
779 if (!pVM)
780 {
781 cAllocated = RT_ALIGN(cLeaves, 16);
782 if (cLeaves + 1 > cAllocated)
783 {
784 void *pvNew = RTMemRealloc(*ppaLeaves, (cAllocated + 16) * sizeof(**ppaLeaves));
785 if (pvNew)
786 *ppaLeaves = (PCPUMCPUIDLEAF)pvNew;
787 else
788 {
789 RTMemFree(*ppaLeaves);
790 *ppaLeaves = NULL;
791 }
792 }
793 }
794 /*
795 * Otherwise, we're on the hyper heap and are probably just inserting
796 * one or two leaves and should conserve space.
797 */
798 else
799 {
800#ifdef IN_VBOX_CPU_REPORT
801 AssertReleaseFailed();
802#else
803 Assert(ppaLeaves == &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
804 Assert(*ppaLeaves == pVM->cpum.s.GuestInfo.aCpuIdLeaves);
805 Assert(cLeaves == pVM->cpum.s.GuestInfo.cCpuIdLeaves);
806
807 if (cLeaves + 1 <= RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves))
808 { }
809 else
810 {
811 *ppaLeaves = NULL;
812 LogRel(("CPUM: cpumR3CpuIdEnsureSpace: Out of CPUID space!\n"));
813 }
814#endif
815 }
816 return *ppaLeaves;
817}
818
819
820/**
821 * Append a CPUID leaf or sub-leaf.
822 *
823 * ASSUMES linear insertion order, so we'll won't need to do any searching or
824 * replace anything. Use cpumR3CpuIdInsert() for those cases.
825 *
826 * @returns VINF_SUCCESS or VERR_NO_MEMORY. On error, *ppaLeaves is freed, so
827 * the caller need do no more work.
828 * @param ppaLeaves Pointer to the pointer to the array of sorted
829 * CPUID leaves and sub-leaves.
830 * @param pcLeaves Where we keep the leaf count for *ppaLeaves.
831 * @param uLeaf The leaf we're adding.
832 * @param uSubLeaf The sub-leaf number.
833 * @param fSubLeafMask The sub-leaf mask.
834 * @param uEax The EAX value.
835 * @param uEbx The EBX value.
836 * @param uEcx The ECX value.
837 * @param uEdx The EDX value.
838 * @param fFlags The flags.
839 */
840static int cpumR3CollectCpuIdInfoAddOne(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves,
841 uint32_t uLeaf, uint32_t uSubLeaf, uint32_t fSubLeafMask,
842 uint32_t uEax, uint32_t uEbx, uint32_t uEcx, uint32_t uEdx, uint32_t fFlags)
843{
844 if (!cpumR3CpuIdEnsureSpace(NULL /* pVM */, ppaLeaves, *pcLeaves))
845 return VERR_NO_MEMORY;
846
847 PCPUMCPUIDLEAF pNew = &(*ppaLeaves)[*pcLeaves];
848 Assert( *pcLeaves == 0
849 || pNew[-1].uLeaf < uLeaf
850 || (pNew[-1].uLeaf == uLeaf && pNew[-1].uSubLeaf < uSubLeaf) );
851
852 pNew->uLeaf = uLeaf;
853 pNew->uSubLeaf = uSubLeaf;
854 pNew->fSubLeafMask = fSubLeafMask;
855 pNew->uEax = uEax;
856 pNew->uEbx = uEbx;
857 pNew->uEcx = uEcx;
858 pNew->uEdx = uEdx;
859 pNew->fFlags = fFlags;
860
861 *pcLeaves += 1;
862 return VINF_SUCCESS;
863}
864
865
866/**
867 * Checks that we've updated the CPUID leaves array correctly.
868 *
869 * This is a no-op in non-strict builds.
870 *
871 * @param paLeaves The leaves array.
872 * @param cLeaves The number of leaves.
873 */
874static void cpumR3CpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
875{
876#ifdef VBOX_STRICT
877 for (uint32_t i = 1; i < cLeaves; i++)
878 if (paLeaves[i].uLeaf != paLeaves[i - 1].uLeaf)
879 AssertMsg(paLeaves[i].uLeaf > paLeaves[i - 1].uLeaf, ("%#x vs %#x\n", paLeaves[i].uLeaf, paLeaves[i - 1].uLeaf));
880 else
881 {
882 AssertMsg(paLeaves[i].uSubLeaf > paLeaves[i - 1].uSubLeaf,
883 ("%#x: %#x vs %#x\n", paLeaves[i].uLeaf, paLeaves[i].uSubLeaf, paLeaves[i - 1].uSubLeaf));
884 AssertMsg(paLeaves[i].fSubLeafMask == paLeaves[i - 1].fSubLeafMask,
885 ("%#x/%#x: %#x vs %#x\n", paLeaves[i].uLeaf, paLeaves[i].uSubLeaf, paLeaves[i].fSubLeafMask, paLeaves[i - 1].fSubLeafMask));
886 AssertMsg(paLeaves[i].fFlags == paLeaves[i - 1].fFlags,
887 ("%#x/%#x: %#x vs %#x\n", paLeaves[i].uLeaf, paLeaves[i].uSubLeaf, paLeaves[i].fFlags, paLeaves[i - 1].fFlags));
888 }
889#else
890 NOREF(paLeaves);
891 NOREF(cLeaves);
892#endif
893}
894
895
896/**
897 * Inserts a CPU ID leaf, replacing any existing ones.
898 *
899 * When inserting a simple leaf where we already got a series of sub-leaves with
900 * the same leaf number (eax), the simple leaf will replace the whole series.
901 *
902 * When pVM is NULL, this ASSUMES that the leaves array is still on the normal
903 * host-context heap and has only been allocated/reallocated by the
904 * cpumR3CpuIdEnsureSpace function.
905 *
906 * @returns VBox status code.
907 * @param pVM The cross context VM structure. If NULL, use
908 * the process heap, otherwise the VM's hyper heap.
909 * @param ppaLeaves Pointer to the pointer to the array of sorted
910 * CPUID leaves and sub-leaves. Must be NULL if using
911 * the hyper heap.
912 * @param pcLeaves Where we keep the leaf count for *ppaLeaves. Must
913 * be NULL if using the hyper heap.
914 * @param pNewLeaf Pointer to the data of the new leaf we're about to
915 * insert.
916 */
917static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
918{
919 /*
920 * Validate input parameters if we are using the hyper heap and use the VM's CPUID arrays.
921 */
922 if (pVM)
923 {
924 AssertReturn(!ppaLeaves, VERR_INVALID_PARAMETER);
925 AssertReturn(!pcLeaves, VERR_INVALID_PARAMETER);
926 AssertReturn(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3 == pVM->cpum.s.GuestInfo.aCpuIdLeaves, VERR_INVALID_PARAMETER);
927
928 ppaLeaves = &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
929 pcLeaves = &pVM->cpum.s.GuestInfo.cCpuIdLeaves;
930 }
931
932 PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
933 uint32_t cLeaves = *pcLeaves;
934
935 /*
936 * Validate the new leaf a little.
937 */
938 AssertLogRelMsgReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_VALID_MASK),
939 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fFlags),
940 VERR_INVALID_FLAGS);
941 AssertLogRelMsgReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0,
942 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
943 VERR_INVALID_PARAMETER);
944 AssertLogRelMsgReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1),
945 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
946 VERR_INVALID_PARAMETER);
947 AssertLogRelMsgReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf,
948 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
949 VERR_INVALID_PARAMETER);
950
951 /*
952 * Find insertion point. The lazy bird uses the same excuse as in
953 * cpumR3CpuIdGetLeaf(), but optimizes for linear insertion (saved state).
954 */
955 uint32_t i;
956 if ( cLeaves > 0
957 && paLeaves[cLeaves - 1].uLeaf < pNewLeaf->uLeaf)
958 {
959 /* Add at end. */
960 i = cLeaves;
961 }
962 else if ( cLeaves > 0
963 && paLeaves[cLeaves - 1].uLeaf == pNewLeaf->uLeaf)
964 {
965 /* Either replacing the last leaf or dealing with sub-leaves. Spool
966 back to the first sub-leaf to pretend we did the linear search. */
967 i = cLeaves - 1;
968 while ( i > 0
969 && paLeaves[i - 1].uLeaf == pNewLeaf->uLeaf)
970 i--;
971 }
972 else
973 {
974 /* Linear search from the start. */
975 i = 0;
976 while ( i < cLeaves
977 && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
978 i++;
979 }
980 if ( i < cLeaves
981 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
982 {
983 if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
984 {
985 /*
986 * The sub-leaf mask differs, replace all existing leaves with the
987 * same leaf number.
988 */
989 uint32_t c = 1;
990 while ( i + c < cLeaves
991 && paLeaves[i + c].uLeaf == pNewLeaf->uLeaf)
992 c++;
993 if (c > 1 && i + c < cLeaves)
994 {
995 memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
996 *pcLeaves = cLeaves -= c - 1;
997 }
998
999 paLeaves[i] = *pNewLeaf;
1000 cpumR3CpuIdAssertOrder(*ppaLeaves, *pcLeaves);
1001 return VINF_SUCCESS;
1002 }
1003
1004 /* Find sub-leaf insertion point. */
1005 while ( i < cLeaves
1006 && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf
1007 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
1008 i++;
1009
1010 /*
1011 * If we've got an exactly matching leaf, replace it.
1012 */
1013 if ( i < cLeaves
1014 && paLeaves[i].uLeaf == pNewLeaf->uLeaf
1015 && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
1016 {
1017 paLeaves[i] = *pNewLeaf;
1018 cpumR3CpuIdAssertOrder(*ppaLeaves, *pcLeaves);
1019 return VINF_SUCCESS;
1020 }
1021 }
1022
1023 /*
1024 * Adding a new leaf at 'i'.
1025 */
1026 AssertLogRelReturn(cLeaves < CPUM_CPUID_MAX_LEAVES, VERR_TOO_MANY_CPUID_LEAVES);
1027 paLeaves = cpumR3CpuIdEnsureSpace(pVM, ppaLeaves, cLeaves);
1028 if (!paLeaves)
1029 return VERR_NO_MEMORY;
1030
1031 if (i < cLeaves)
1032 memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
1033 *pcLeaves += 1;
1034 paLeaves[i] = *pNewLeaf;
1035
1036 cpumR3CpuIdAssertOrder(*ppaLeaves, *pcLeaves);
1037 return VINF_SUCCESS;
1038}
1039
1040
1041#ifndef IN_VBOX_CPU_REPORT
1042/**
1043 * Removes a range of CPUID leaves.
1044 *
1045 * This will not reallocate the array.
1046 *
1047 * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
1048 * @param pcLeaves Where we keep the leaf count for @a paLeaves.
1049 * @param uFirst The first leaf.
1050 * @param uLast The last leaf.
1051 */
1052static void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
1053{
1054 uint32_t cLeaves = *pcLeaves;
1055
1056 Assert(uFirst <= uLast);
1057
1058 /*
1059 * Find the first one.
1060 */
1061 uint32_t iFirst = 0;
1062 while ( iFirst < cLeaves
1063 && paLeaves[iFirst].uLeaf < uFirst)
1064 iFirst++;
1065
1066 /*
1067 * Find the end (last + 1).
1068 */
1069 uint32_t iEnd = iFirst;
1070 while ( iEnd < cLeaves
1071 && paLeaves[iEnd].uLeaf <= uLast)
1072 iEnd++;
1073
1074 /*
1075 * Adjust the array if anything needs removing.
1076 */
1077 if (iFirst < iEnd)
1078 {
1079 if (iEnd < cLeaves)
1080 memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
1081 *pcLeaves = cLeaves -= (iEnd - iFirst);
1082 }
1083
1084 cpumR3CpuIdAssertOrder(paLeaves, *pcLeaves);
1085}
1086#endif /* IN_VBOX_CPU_REPORT */
1087
1088
1089/**
1090 * Checks if ECX make a difference when reading a given CPUID leaf.
1091 *
1092 * @returns @c true if it does, @c false if it doesn't.
1093 * @param uLeaf The leaf we're reading.
1094 * @param pcSubLeaves Number of sub-leaves accessible via ECX.
1095 * @param pfFinalEcxUnchanged Whether ECX is passed thru when going beyond the
1096 * final sub-leaf (for leaf 0xb only).
1097 */
1098static bool cpumR3IsEcxRelevantForCpuIdLeaf(uint32_t uLeaf, uint32_t *pcSubLeaves, bool *pfFinalEcxUnchanged)
1099{
1100 *pfFinalEcxUnchanged = false;
1101
1102 uint32_t auCur[4];
1103 uint32_t auPrev[4];
1104 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &auPrev[0], &auPrev[1], &auPrev[2], &auPrev[3]);
1105
1106 /* Look for sub-leaves. */
1107 uint32_t uSubLeaf = 1;
1108 for (;;)
1109 {
1110 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1111 if (memcmp(auCur, auPrev, sizeof(auCur)))
1112 break;
1113
1114 /* Advance / give up. */
1115 uSubLeaf++;
1116 if (uSubLeaf >= 64)
1117 {
1118 *pcSubLeaves = 1;
1119 return false;
1120 }
1121 }
1122
1123 /* Count sub-leaves. */
1124 uint32_t cMinLeaves = uLeaf == 0xd ? 64 : 0;
1125 uint32_t cRepeats = 0;
1126 uSubLeaf = 0;
1127 for (;;)
1128 {
1129 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1130
1131 /* Figuring out when to stop isn't entirely straight forward as we need
1132 to cover undocumented behavior up to a point and implementation shortcuts. */
1133
1134 /* 1. Look for more than 4 repeating value sets. */
1135 if ( auCur[0] == auPrev[0]
1136 && auCur[1] == auPrev[1]
1137 && ( auCur[2] == auPrev[2]
1138 || ( auCur[2] == uSubLeaf
1139 && auPrev[2] == uSubLeaf - 1) )
1140 && auCur[3] == auPrev[3])
1141 {
1142 if ( uLeaf != 0xd
1143 || uSubLeaf >= 64
1144 || ( auCur[0] == 0
1145 && auCur[1] == 0
1146 && auCur[2] == 0
1147 && auCur[3] == 0
1148 && auPrev[2] == 0) )
1149 cRepeats++;
1150 if (cRepeats > 4 && uSubLeaf >= cMinLeaves)
1151 break;
1152 }
1153 else
1154 cRepeats = 0;
1155
1156 /* 2. Look for zero values. */
1157 if ( auCur[0] == 0
1158 && auCur[1] == 0
1159 && (auCur[2] == 0 || auCur[2] == uSubLeaf)
1160 && (auCur[3] == 0 || uLeaf == 0xb /* edx is fixed */)
1161 && uSubLeaf >= cMinLeaves)
1162 {
1163 cRepeats = 0;
1164 break;
1165 }
1166
1167 /* 3. Leaf 0xb level type 0 check. */
1168 if ( uLeaf == 0xb
1169 && (auCur[2] & 0xff00) == 0
1170 && (auPrev[2] & 0xff00) == 0)
1171 {
1172 cRepeats = 0;
1173 break;
1174 }
1175
1176 /* 99. Give up. */
1177 if (uSubLeaf >= 128)
1178 {
1179#ifndef IN_VBOX_CPU_REPORT
1180 /* Ok, limit it according to the documentation if possible just to
1181 avoid annoying users with these detection issues. */
1182 uint32_t cDocLimit = UINT32_MAX;
1183 if (uLeaf == 0x4)
1184 cDocLimit = 4;
1185 else if (uLeaf == 0x7)
1186 cDocLimit = 1;
1187 else if (uLeaf == 0xd)
1188 cDocLimit = 63;
1189 else if (uLeaf == 0xf)
1190 cDocLimit = 2;
1191 if (cDocLimit != UINT32_MAX)
1192 {
1193 *pfFinalEcxUnchanged = auCur[2] == uSubLeaf && uLeaf == 0xb;
1194 *pcSubLeaves = cDocLimit + 3;
1195 return true;
1196 }
1197#endif
1198 *pcSubLeaves = UINT32_MAX;
1199 return true;
1200 }
1201
1202 /* Advance. */
1203 uSubLeaf++;
1204 memcpy(auPrev, auCur, sizeof(auCur));
1205 }
1206
1207 /* Standard exit. */
1208 *pfFinalEcxUnchanged = auCur[2] == uSubLeaf && uLeaf == 0xb;
1209 *pcSubLeaves = uSubLeaf + 1 - cRepeats;
1210 if (*pcSubLeaves == 0)
1211 *pcSubLeaves = 1;
1212 return true;
1213}
1214
1215
1216/**
1217 * Gets a CPU ID leaf.
1218 *
1219 * @returns VBox status code.
1220 * @param pVM The cross context VM structure.
1221 * @param pLeaf Where to store the found leaf.
1222 * @param uLeaf The leaf to locate.
1223 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
1224 */
1225VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf)
1226{
1227 PCPUMCPUIDLEAF pcLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
1228 uLeaf, uSubLeaf);
1229 if (pcLeaf)
1230 {
1231 memcpy(pLeaf, pcLeaf, sizeof(*pLeaf));
1232 return VINF_SUCCESS;
1233 }
1234
1235 return VERR_NOT_FOUND;
1236}
1237
1238
1239/**
1240 * Gets all the leaves.
1241 *
1242 * This only works after the CPUID leaves have been initialized. The interface
1243 * is intended for NEM and configuring CPUID leaves for the native hypervisor.
1244 *
1245 * @returns Pointer to the array of leaves. NULL on failure.
1246 * @param pVM The cross context VM structure.
1247 * @param pcLeaves Where to return the number of leaves.
1248 */
1249VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves)
1250{
1251 *pcLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1252 return pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
1253}
1254
1255
1256/**
1257 * Inserts a CPU ID leaf, replacing any existing ones.
1258 *
1259 * @returns VBox status code.
1260 * @param pVM The cross context VM structure.
1261 * @param pNewLeaf Pointer to the leaf being inserted.
1262 */
1263VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf)
1264{
1265 /*
1266 * Validate parameters.
1267 */
1268 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1269 AssertReturn(pNewLeaf, VERR_INVALID_PARAMETER);
1270
1271 /*
1272 * Disallow replacing CPU ID leaves that this API currently cannot manage.
1273 * These leaves have dependencies on saved-states, see PATMCpuidReplacement().
1274 * If you want to modify these leaves, use CPUMSetGuestCpuIdFeature().
1275 */
1276 if ( pNewLeaf->uLeaf == UINT32_C(0x00000000) /* Standard */
1277 || pNewLeaf->uLeaf == UINT32_C(0x00000001)
1278 || pNewLeaf->uLeaf == UINT32_C(0x80000000) /* Extended */
1279 || pNewLeaf->uLeaf == UINT32_C(0x80000001)
1280 || pNewLeaf->uLeaf == UINT32_C(0xc0000000) /* Centaur */
1281 || pNewLeaf->uLeaf == UINT32_C(0xc0000001) )
1282 {
1283 return VERR_NOT_SUPPORTED;
1284 }
1285
1286 return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf);
1287}
1288
1289
1290/**
1291 * Collects CPUID leaves and sub-leaves, returning a sorted array of them.
1292 *
1293 * @returns VBox status code.
1294 * @param ppaLeaves Where to return the array pointer on success.
1295 * Use RTMemFree to release.
1296 * @param pcLeaves Where to return the size of the array on
1297 * success.
1298 */
1299VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
1300{
1301 *ppaLeaves = NULL;
1302 *pcLeaves = 0;
1303
1304 /*
1305 * Try out various candidates. This must be sorted!
1306 */
1307 static struct { uint32_t uMsr; bool fSpecial; } const s_aCandidates[] =
1308 {
1309 { UINT32_C(0x00000000), false },
1310 { UINT32_C(0x10000000), false },
1311 { UINT32_C(0x20000000), false },
1312 { UINT32_C(0x30000000), false },
1313 { UINT32_C(0x40000000), false },
1314 { UINT32_C(0x50000000), false },
1315 { UINT32_C(0x60000000), false },
1316 { UINT32_C(0x70000000), false },
1317 { UINT32_C(0x80000000), false },
1318 { UINT32_C(0x80860000), false },
1319 { UINT32_C(0x8ffffffe), true },
1320 { UINT32_C(0x8fffffff), true },
1321 { UINT32_C(0x90000000), false },
1322 { UINT32_C(0xa0000000), false },
1323 { UINT32_C(0xb0000000), false },
1324 { UINT32_C(0xc0000000), false },
1325 { UINT32_C(0xd0000000), false },
1326 { UINT32_C(0xe0000000), false },
1327 { UINT32_C(0xf0000000), false },
1328 };
1329
1330 for (uint32_t iOuter = 0; iOuter < RT_ELEMENTS(s_aCandidates); iOuter++)
1331 {
1332 uint32_t uLeaf = s_aCandidates[iOuter].uMsr;
1333 uint32_t uEax, uEbx, uEcx, uEdx;
1334 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
1335
1336 /*
1337 * Does EAX look like a typical leaf count value?
1338 */
1339 if ( uEax > uLeaf
1340 && uEax - uLeaf < UINT32_C(0xff)) /* Adjust 0xff limit when exceeded by real HW. */
1341 {
1342 /* Yes, dump them. */
1343 uint32_t cLeaves = uEax - uLeaf + 1;
1344 while (cLeaves-- > 0)
1345 {
1346 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
1347
1348 uint32_t fFlags = 0;
1349
1350 /* There are currently three known leaves containing an APIC ID
1351 that needs EMT specific attention */
1352 if (uLeaf == 1)
1353 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC_ID;
1354 else if (uLeaf == 0xb && uEcx != 0)
1355 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC_ID;
1356 else if ( uLeaf == UINT32_C(0x8000001e)
1357 && ( uEax
1358 || uEbx
1359 || uEdx
1360 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)
1361 || ASMIsHygonCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) )
1362 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC_ID;
1363
1364 /* The APIC bit is per-VCpu and needs flagging. */
1365 if (uLeaf == 1)
1366 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC;
1367 else if ( uLeaf == UINT32_C(0x80000001)
1368 && ( (uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC)
1369 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)
1370 || ASMIsHygonCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) )
1371 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC;
1372
1373 /* Check three times here to reduce the chance of CPU migration
1374 resulting in false positives with things like the APIC ID. */
1375 uint32_t cSubLeaves;
1376 bool fFinalEcxUnchanged;
1377 if ( cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
1378 && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
1379 && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged))
1380 {
1381 if (cSubLeaves > (uLeaf == 0xd ? 68U : 16U))
1382 {
1383 /* This shouldn't happen. But in case it does, file all
1384 relevant details in the release log. */
1385 LogRel(("CPUM: VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES! uLeaf=%#x cSubLeaves=%#x\n", uLeaf, cSubLeaves));
1386 LogRel(("------------------ dump of problematic sub-leaves -----------------\n"));
1387 for (uint32_t uSubLeaf = 0; uSubLeaf < 128; uSubLeaf++)
1388 {
1389 uint32_t auTmp[4];
1390 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auTmp[0], &auTmp[1], &auTmp[2], &auTmp[3]);
1391 LogRel(("CPUM: %#010x, %#010x => %#010x %#010x %#010x %#010x\n",
1392 uLeaf, uSubLeaf, auTmp[0], auTmp[1], auTmp[2], auTmp[3]));
1393 }
1394 LogRel(("----------------- dump of what we've found so far -----------------\n"));
1395 for (uint32_t i = 0 ; i < *pcLeaves; i++)
1396 LogRel(("CPUM: %#010x, %#010x/%#010x => %#010x %#010x %#010x %#010x\n",
1397 (*ppaLeaves)[i].uLeaf, (*ppaLeaves)[i].uSubLeaf, (*ppaLeaves)[i].fSubLeafMask,
1398 (*ppaLeaves)[i].uEax, (*ppaLeaves)[i].uEbx, (*ppaLeaves)[i].uEcx, (*ppaLeaves)[i].uEdx));
1399 LogRel(("\nPlease create a defect on virtualbox.org and attach this log file!\n\n"));
1400 return VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES;
1401 }
1402
1403 if (fFinalEcxUnchanged)
1404 fFlags |= CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES;
1405
1406 for (uint32_t uSubLeaf = 0; uSubLeaf < cSubLeaves; uSubLeaf++)
1407 {
1408 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &uEax, &uEbx, &uEcx, &uEdx);
1409 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1410 uLeaf, uSubLeaf, UINT32_MAX, uEax, uEbx, uEcx, uEdx, fFlags);
1411 if (RT_FAILURE(rc))
1412 return rc;
1413 }
1414 }
1415 else
1416 {
1417 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1418 uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, fFlags);
1419 if (RT_FAILURE(rc))
1420 return rc;
1421 }
1422
1423 /* next */
1424 uLeaf++;
1425 }
1426 }
1427 /*
1428 * Special CPUIDs needs special handling as they don't follow the
1429 * leaf count principle used above.
1430 */
1431 else if (s_aCandidates[iOuter].fSpecial)
1432 {
1433 bool fKeep = false;
1434 if (uLeaf == 0x8ffffffe && uEax == UINT32_C(0x00494544))
1435 fKeep = true;
1436 else if ( uLeaf == 0x8fffffff
1437 && RT_C_IS_PRINT(RT_BYTE1(uEax))
1438 && RT_C_IS_PRINT(RT_BYTE2(uEax))
1439 && RT_C_IS_PRINT(RT_BYTE3(uEax))
1440 && RT_C_IS_PRINT(RT_BYTE4(uEax))
1441 && RT_C_IS_PRINT(RT_BYTE1(uEbx))
1442 && RT_C_IS_PRINT(RT_BYTE2(uEbx))
1443 && RT_C_IS_PRINT(RT_BYTE3(uEbx))
1444 && RT_C_IS_PRINT(RT_BYTE4(uEbx))
1445 && RT_C_IS_PRINT(RT_BYTE1(uEcx))
1446 && RT_C_IS_PRINT(RT_BYTE2(uEcx))
1447 && RT_C_IS_PRINT(RT_BYTE3(uEcx))
1448 && RT_C_IS_PRINT(RT_BYTE4(uEcx))
1449 && RT_C_IS_PRINT(RT_BYTE1(uEdx))
1450 && RT_C_IS_PRINT(RT_BYTE2(uEdx))
1451 && RT_C_IS_PRINT(RT_BYTE3(uEdx))
1452 && RT_C_IS_PRINT(RT_BYTE4(uEdx)) )
1453 fKeep = true;
1454 if (fKeep)
1455 {
1456 int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
1457 uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
1458 if (RT_FAILURE(rc))
1459 return rc;
1460 }
1461 }
1462 }
1463
1464 cpumR3CpuIdAssertOrder(*ppaLeaves, *pcLeaves);
1465 return VINF_SUCCESS;
1466}
1467
1468
1469/**
1470 * Determines the method the CPU uses to handle unknown CPUID leaves.
1471 *
1472 * @returns VBox status code.
1473 * @param penmUnknownMethod Where to return the method.
1474 * @param pDefUnknown Where to return default unknown values. This
1475 * will be set, even if the resulting method
1476 * doesn't actually needs it.
1477 */
1478VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
1479{
1480 uint32_t uLastStd = ASMCpuId_EAX(0);
1481 uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
1482 if (!ASMIsValidExtRange(uLastExt))
1483 uLastExt = 0x80000000;
1484
1485 uint32_t auChecks[] =
1486 {
1487 uLastStd + 1,
1488 uLastStd + 5,
1489 uLastStd + 8,
1490 uLastStd + 32,
1491 uLastStd + 251,
1492 uLastExt + 1,
1493 uLastExt + 8,
1494 uLastExt + 15,
1495 uLastExt + 63,
1496 uLastExt + 255,
1497 0x7fbbffcc,
1498 0x833f7872,
1499 0xefff2353,
1500 0x35779456,
1501 0x1ef6d33e,
1502 };
1503
1504 static const uint32_t s_auValues[] =
1505 {
1506 0xa95d2156,
1507 0x00000001,
1508 0x00000002,
1509 0x00000008,
1510 0x00000000,
1511 0x55773399,
1512 0x93401769,
1513 0x12039587,
1514 };
1515
1516 /*
1517 * Simple method, all zeros.
1518 */
1519 *penmUnknownMethod = CPUMUNKNOWNCPUID_DEFAULTS;
1520 pDefUnknown->uEax = 0;
1521 pDefUnknown->uEbx = 0;
1522 pDefUnknown->uEcx = 0;
1523 pDefUnknown->uEdx = 0;
1524
1525 /*
1526 * Intel has been observed returning the last standard leaf.
1527 */
1528 uint32_t auLast[4];
1529 ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
1530
1531 uint32_t cChecks = RT_ELEMENTS(auChecks);
1532 while (cChecks > 0)
1533 {
1534 uint32_t auCur[4];
1535 ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1536 if (memcmp(auCur, auLast, sizeof(auCur)))
1537 break;
1538 cChecks--;
1539 }
1540 if (cChecks == 0)
1541 {
1542 /* Now, what happens when the input changes? Esp. ECX. */
1543 uint32_t cTotal = 0;
1544 uint32_t cSame = 0;
1545 uint32_t cLastWithEcx = 0;
1546 uint32_t cNeither = 0;
1547 uint32_t cValues = RT_ELEMENTS(s_auValues);
1548 while (cValues > 0)
1549 {
1550 uint32_t uValue = s_auValues[cValues - 1];
1551 uint32_t auLastWithEcx[4];
1552 ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
1553 &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
1554
1555 cChecks = RT_ELEMENTS(auChecks);
1556 while (cChecks > 0)
1557 {
1558 uint32_t auCur[4];
1559 ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1560 if (!memcmp(auCur, auLast, sizeof(auCur)))
1561 {
1562 cSame++;
1563 if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
1564 cLastWithEcx++;
1565 }
1566 else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
1567 cLastWithEcx++;
1568 else
1569 cNeither++;
1570 cTotal++;
1571 cChecks--;
1572 }
1573 cValues--;
1574 }
1575
1576 Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
1577 if (cSame == cTotal)
1578 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
1579 else if (cLastWithEcx == cTotal)
1580 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
1581 else
1582 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
1583 pDefUnknown->uEax = auLast[0];
1584 pDefUnknown->uEbx = auLast[1];
1585 pDefUnknown->uEcx = auLast[2];
1586 pDefUnknown->uEdx = auLast[3];
1587 return VINF_SUCCESS;
1588 }
1589
1590 /*
1591 * Unchanged register values?
1592 */
1593 cChecks = RT_ELEMENTS(auChecks);
1594 while (cChecks > 0)
1595 {
1596 uint32_t const uLeaf = auChecks[cChecks - 1];
1597 uint32_t cValues = RT_ELEMENTS(s_auValues);
1598 while (cValues > 0)
1599 {
1600 uint32_t uValue = s_auValues[cValues - 1];
1601 uint32_t auCur[4];
1602 ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
1603 if ( auCur[0] != uLeaf
1604 || auCur[1] != uValue
1605 || auCur[2] != uValue
1606 || auCur[3] != uValue)
1607 break;
1608 cValues--;
1609 }
1610 if (cValues != 0)
1611 break;
1612 cChecks--;
1613 }
1614 if (cChecks == 0)
1615 {
1616 *penmUnknownMethod = CPUMUNKNOWNCPUID_PASSTHRU;
1617 return VINF_SUCCESS;
1618 }
1619
1620 /*
1621 * Just go with the simple method.
1622 */
1623 return VINF_SUCCESS;
1624}
1625
1626
1627/**
1628 * Translates a unknow CPUID leaf method into the constant name (sans prefix).
1629 *
1630 * @returns Read only name string.
1631 * @param enmUnknownMethod The method to translate.
1632 */
1633VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod)
1634{
1635 switch (enmUnknownMethod)
1636 {
1637 case CPUMUNKNOWNCPUID_DEFAULTS: return "DEFAULTS";
1638 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
1639 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
1640 case CPUMUNKNOWNCPUID_PASSTHRU: return "PASSTHRU";
1641
1642 case CPUMUNKNOWNCPUID_INVALID:
1643 case CPUMUNKNOWNCPUID_END:
1644 case CPUMUNKNOWNCPUID_32BIT_HACK:
1645 break;
1646 }
1647 return "Invalid-unknown-CPUID-method";
1648}
1649
1650
1651/**
1652 * Detect the CPU vendor give n the
1653 *
1654 * @returns The vendor.
1655 * @param uEAX EAX from CPUID(0).
1656 * @param uEBX EBX from CPUID(0).
1657 * @param uECX ECX from CPUID(0).
1658 * @param uEDX EDX from CPUID(0).
1659 */
1660VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1661{
1662 if (ASMIsValidStdRange(uEAX))
1663 {
1664 if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
1665 return CPUMCPUVENDOR_AMD;
1666
1667 if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
1668 return CPUMCPUVENDOR_INTEL;
1669
1670 if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
1671 return CPUMCPUVENDOR_VIA;
1672
1673 if (ASMIsShanghaiCpuEx(uEBX, uECX, uEDX))
1674 return CPUMCPUVENDOR_SHANGHAI;
1675
1676 if ( uEBX == UINT32_C(0x69727943) /* CyrixInstead */
1677 && uECX == UINT32_C(0x64616574)
1678 && uEDX == UINT32_C(0x736E4978))
1679 return CPUMCPUVENDOR_CYRIX;
1680
1681 if (ASMIsHygonCpuEx(uEBX, uECX, uEDX))
1682 return CPUMCPUVENDOR_HYGON;
1683
1684 /* "Geode by NSC", example: family 5, model 9. */
1685
1686 /** @todo detect the other buggers... */
1687 }
1688
1689 return CPUMCPUVENDOR_UNKNOWN;
1690}
1691
1692
1693/**
1694 * Translates a CPU vendor enum value into the corresponding string constant.
1695 *
1696 * The named can be prefixed with 'CPUMCPUVENDOR_' to construct a valid enum
1697 * value name. This can be useful when generating code.
1698 *
1699 * @returns Read only name string.
1700 * @param enmVendor The CPU vendor value.
1701 */
1702VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor)
1703{
1704 switch (enmVendor)
1705 {
1706 case CPUMCPUVENDOR_INTEL: return "INTEL";
1707 case CPUMCPUVENDOR_AMD: return "AMD";
1708 case CPUMCPUVENDOR_VIA: return "VIA";
1709 case CPUMCPUVENDOR_CYRIX: return "CYRIX";
1710 case CPUMCPUVENDOR_SHANGHAI: return "SHANGHAI";
1711 case CPUMCPUVENDOR_HYGON: return "HYGON";
1712 case CPUMCPUVENDOR_UNKNOWN: return "UNKNOWN";
1713
1714 case CPUMCPUVENDOR_INVALID:
1715 case CPUMCPUVENDOR_32BIT_HACK:
1716 break;
1717 }
1718 return "Invalid-cpu-vendor";
1719}
1720
1721
1722static PCCPUMCPUIDLEAF cpumR3CpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf)
1723{
1724 /* Could do binary search, doing linear now because I'm lazy. */
1725 PCCPUMCPUIDLEAF pLeaf = paLeaves;
1726 while (cLeaves-- > 0)
1727 {
1728 if (pLeaf->uLeaf == uLeaf)
1729 return pLeaf;
1730 pLeaf++;
1731 }
1732 return NULL;
1733}
1734
1735
1736static PCCPUMCPUIDLEAF cpumR3CpuIdFindLeafEx(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf)
1737{
1738 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, uLeaf);
1739 if ( !pLeaf
1740 || pLeaf->uSubLeaf != (uSubLeaf & pLeaf->fSubLeafMask))
1741 return pLeaf;
1742
1743 /* Linear sub-leaf search. Lazy as usual. */
1744 cLeaves -= pLeaf - paLeaves;
1745 while ( cLeaves-- > 0
1746 && pLeaf->uLeaf == uLeaf)
1747 {
1748 if (pLeaf->uSubLeaf == (uSubLeaf & pLeaf->fSubLeafMask))
1749 return pLeaf;
1750 pLeaf++;
1751 }
1752
1753 return NULL;
1754}
1755
1756
1757static void cpumR3ExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures)
1758{
1759 Assert(pVmxMsrs);
1760 Assert(pFeatures);
1761 Assert(pFeatures->fVmx);
1762
1763 /* Basic information. */
1764 {
1765 uint64_t const u64Basic = pVmxMsrs->u64Basic;
1766 pFeatures->fVmxInsOutInfo = RT_BF_GET(u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
1767 }
1768
1769 /* Pin-based VM-execution controls. */
1770 {
1771 uint32_t const fPinCtls = pVmxMsrs->PinCtls.n.allowed1;
1772 pFeatures->fVmxExtIntExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_EXT_INT_EXIT);
1773 pFeatures->fVmxNmiExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_NMI_EXIT);
1774 pFeatures->fVmxVirtNmi = RT_BOOL(fPinCtls & VMX_PIN_CTLS_VIRT_NMI);
1775 pFeatures->fVmxPreemptTimer = RT_BOOL(fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
1776 pFeatures->fVmxPostedInt = RT_BOOL(fPinCtls & VMX_PIN_CTLS_POSTED_INT);
1777 }
1778
1779 /* Processor-based VM-execution controls. */
1780 {
1781 uint32_t const fProcCtls = pVmxMsrs->ProcCtls.n.allowed1;
1782 pFeatures->fVmxIntWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);
1783 pFeatures->fVmxTscOffsetting = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1784 pFeatures->fVmxHltExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_HLT_EXIT);
1785 pFeatures->fVmxInvlpgExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INVLPG_EXIT);
1786 pFeatures->fVmxMwaitExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MWAIT_EXIT);
1787 pFeatures->fVmxRdpmcExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDPMC_EXIT);
1788 pFeatures->fVmxRdtscExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);
1789 pFeatures->fVmxCr3LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT);
1790 pFeatures->fVmxCr3StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT);
1791 pFeatures->fVmxTertiaryExecCtls = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS);
1792 pFeatures->fVmxCr8LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT);
1793 pFeatures->fVmxCr8StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT);
1794 pFeatures->fVmxUseTprShadow = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1795 pFeatures->fVmxNmiWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1796 pFeatures->fVmxMovDRxExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
1797 pFeatures->fVmxUncondIoExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
1798 pFeatures->fVmxUseIoBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS);
1799 pFeatures->fVmxMonitorTrapFlag = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1800 pFeatures->fVmxUseMsrBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
1801 pFeatures->fVmxMonitorExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_EXIT);
1802 pFeatures->fVmxPauseExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_PAUSE_EXIT);
1803 pFeatures->fVmxSecondaryExecCtls = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1804 }
1805
1806 /* Secondary processor-based VM-execution controls. */
1807 {
1808 uint32_t const fProcCtls2 = pFeatures->fVmxSecondaryExecCtls ? pVmxMsrs->ProcCtls2.n.allowed1 : 0;
1809 pFeatures->fVmxVirtApicAccess = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1810 pFeatures->fVmxEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT);
1811 pFeatures->fVmxDescTableExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1812 pFeatures->fVmxRdtscp = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDTSCP);
1813 pFeatures->fVmxVirtX2ApicMode = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1814 pFeatures->fVmxVpid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VPID);
1815 pFeatures->fVmxWbinvdExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_WBINVD_EXIT);
1816 pFeatures->fVmxUnrestrictedGuest = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1817 pFeatures->fVmxApicRegVirt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT);
1818 pFeatures->fVmxVirtIntDelivery = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1819 pFeatures->fVmxPauseLoopExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1820 pFeatures->fVmxRdrandExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT);
1821 pFeatures->fVmxInvpcid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_INVPCID);
1822 pFeatures->fVmxVmFunc = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMFUNC);
1823 pFeatures->fVmxVmcsShadowing = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING);
1824 pFeatures->fVmxRdseedExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDSEED_EXIT);
1825 pFeatures->fVmxPml = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PML);
1826 pFeatures->fVmxEptXcptVe = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT_XCPT_VE);
1827 pFeatures->fVmxConcealVmxFromPt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1828 pFeatures->fVmxXsavesXrstors = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_XSAVES_XRSTORS);
1829 pFeatures->fVmxModeBasedExecuteEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1830 pFeatures->fVmxSppEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_SPP_EPT);
1831 pFeatures->fVmxPtEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PT_EPT);
1832 pFeatures->fVmxUseTscScaling = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING);
1833 pFeatures->fVmxUserWaitPause = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1834 pFeatures->fVmxEnclvExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_ENCLV_EXIT);
1835 }
1836
1837 /* Tertiary processor-based VM-execution controls. */
1838 {
1839 uint64_t const fProcCtls3 = pFeatures->fVmxTertiaryExecCtls ? pVmxMsrs->u64ProcCtls3 : 0;
1840 pFeatures->fVmxLoadIwKeyExit = RT_BOOL(fProcCtls3 & VMX_PROC_CTLS3_LOADIWKEY_EXIT);
1841 }
1842
1843 /* VM-exit controls. */
1844 {
1845 uint32_t const fExitCtls = pVmxMsrs->ExitCtls.n.allowed1;
1846 pFeatures->fVmxExitSaveDebugCtls = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG);
1847 pFeatures->fVmxHostAddrSpaceSize = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1848 pFeatures->fVmxExitAckExtInt = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT);
1849 pFeatures->fVmxExitSavePatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR);
1850 pFeatures->fVmxExitLoadPatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR);
1851 pFeatures->fVmxExitSaveEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR);
1852 pFeatures->fVmxExitLoadEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR);
1853 pFeatures->fVmxSavePreemptTimer = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1854 }
1855
1856 /* VM-entry controls. */
1857 {
1858 uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1;
1859 pFeatures->fVmxEntryLoadDebugCtls = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG);
1860 pFeatures->fVmxIa32eModeGuest = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1861 pFeatures->fVmxEntryLoadEferMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1862 pFeatures->fVmxEntryLoadPatMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1863 }
1864
1865 /* Miscellaneous data. */
1866 {
1867 uint32_t const fMiscData = pVmxMsrs->u64Misc;
1868 pFeatures->fVmxExitSaveEferLma = RT_BOOL(fMiscData & VMX_MISC_EXIT_SAVE_EFER_LMA);
1869 pFeatures->fVmxPt = RT_BOOL(fMiscData & VMX_MISC_INTEL_PT);
1870 pFeatures->fVmxVmwriteAll = RT_BOOL(fMiscData & VMX_MISC_VMWRITE_ALL);
1871 pFeatures->fVmxEntryInjectSoftInt = RT_BOOL(fMiscData & VMX_MISC_ENTRY_INJECT_SOFT_INT);
1872 }
1873}
1874
1875
1876int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures)
1877{
1878 Assert(pMsrs);
1879 RT_ZERO(*pFeatures);
1880 if (cLeaves >= 2)
1881 {
1882 AssertLogRelReturn(paLeaves[0].uLeaf == 0, VERR_CPUM_IPE_1);
1883 AssertLogRelReturn(paLeaves[1].uLeaf == 1, VERR_CPUM_IPE_1);
1884 PCCPUMCPUIDLEAF const pStd0Leaf = cpumR3CpuIdFindLeafEx(paLeaves, cLeaves, 0, 0);
1885 AssertLogRelReturn(pStd0Leaf, VERR_CPUM_IPE_1);
1886 PCCPUMCPUIDLEAF const pStd1Leaf = cpumR3CpuIdFindLeafEx(paLeaves, cLeaves, 1, 0);
1887 AssertLogRelReturn(pStd1Leaf, VERR_CPUM_IPE_1);
1888
1889 pFeatures->enmCpuVendor = CPUMR3CpuIdDetectVendorEx(pStd0Leaf->uEax,
1890 pStd0Leaf->uEbx,
1891 pStd0Leaf->uEcx,
1892 pStd0Leaf->uEdx);
1893 pFeatures->uFamily = ASMGetCpuFamily(pStd1Leaf->uEax);
1894 pFeatures->uModel = ASMGetCpuModel(pStd1Leaf->uEax, pFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL);
1895 pFeatures->uStepping = ASMGetCpuStepping(pStd1Leaf->uEax);
1896 pFeatures->enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx((CPUMCPUVENDOR)pFeatures->enmCpuVendor,
1897 pFeatures->uFamily,
1898 pFeatures->uModel,
1899 pFeatures->uStepping);
1900
1901 PCCPUMCPUIDLEAF const pExtLeaf8 = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
1902 if (pExtLeaf8)
1903 {
1904 pFeatures->cMaxPhysAddrWidth = pExtLeaf8->uEax & 0xff;
1905 pFeatures->cMaxLinearAddrWidth = (pExtLeaf8->uEax >> 8) & 0xff;
1906 }
1907 else if (pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE36)
1908 {
1909 pFeatures->cMaxPhysAddrWidth = 36;
1910 pFeatures->cMaxLinearAddrWidth = 36;
1911 }
1912 else
1913 {
1914 pFeatures->cMaxPhysAddrWidth = 32;
1915 pFeatures->cMaxLinearAddrWidth = 32;
1916 }
1917
1918 /* Standard features. */
1919 pFeatures->fMsr = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_MSR);
1920 pFeatures->fApic = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_APIC);
1921 pFeatures->fX2Apic = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_X2APIC);
1922 pFeatures->fPse = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE);
1923 pFeatures->fPse36 = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE36);
1924 pFeatures->fPae = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PAE);
1925 pFeatures->fPge = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PGE);
1926 pFeatures->fPat = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PAT);
1927 pFeatures->fFxSaveRstor = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_FXSR);
1928 pFeatures->fXSaveRstor = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE);
1929 pFeatures->fOpSysXSaveRstor = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_OSXSAVE);
1930 pFeatures->fMmx = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_MMX);
1931 pFeatures->fSse = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_SSE);
1932 pFeatures->fSse2 = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_SSE2);
1933 pFeatures->fSse3 = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_SSE3);
1934 pFeatures->fSsse3 = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_SSSE3);
1935 pFeatures->fSse41 = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_SSE4_1);
1936 pFeatures->fSse42 = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_SSE4_2);
1937 pFeatures->fAvx = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_AVX);
1938 pFeatures->fTsc = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_TSC);
1939 pFeatures->fSysEnter = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_SEP);
1940 pFeatures->fHypervisorPresent = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_HVP);
1941 pFeatures->fMonitorMWait = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR);
1942 pFeatures->fMovCmpXchg16b = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_CX16);
1943 pFeatures->fClFlush = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_CLFSH);
1944 pFeatures->fPcid = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_PCID);
1945 pFeatures->fVmx = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_VMX);
1946 if (pFeatures->fVmx)
1947 cpumR3ExplodeVmxFeatures(&pMsrs->hwvirt.vmx, pFeatures);
1948
1949 /* Structured extended features. */
1950 PCCPUMCPUIDLEAF const pSxfLeaf0 = cpumR3CpuIdFindLeafEx(paLeaves, cLeaves, 7, 0);
1951 if (pSxfLeaf0)
1952 {
1953 pFeatures->fFsGsBase = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE);
1954 pFeatures->fAvx2 = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX2);
1955 pFeatures->fAvx512Foundation = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
1956 pFeatures->fClFlushOpt = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT);
1957 pFeatures->fInvpcid = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_INVPCID);
1958
1959 pFeatures->fIbpb = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB);
1960 pFeatures->fIbrs = pFeatures->fIbpb;
1961 pFeatures->fStibp = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_STIBP);
1962 pFeatures->fFlushCmd = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD);
1963 pFeatures->fArchCap = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP);
1964 pFeatures->fMdsClear = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR);
1965 }
1966
1967 /* MWAIT/MONITOR leaf. */
1968 PCCPUMCPUIDLEAF const pMWaitLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 5);
1969 if (pMWaitLeaf)
1970 pFeatures->fMWaitExtensions = (pMWaitLeaf->uEcx & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1971 == (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
1972
1973 /* Extended features. */
1974 PCCPUMCPUIDLEAF const pExtLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000001);
1975 if (pExtLeaf)
1976 {
1977 pFeatures->fLongMode = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
1978 pFeatures->fSysCall = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
1979 pFeatures->fNoExecute = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX);
1980 pFeatures->fLahfSahf = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
1981 pFeatures->fRdTscP = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1982 pFeatures->fMovCr8In32Bit = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_CMPL);
1983 pFeatures->f3DNow = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_3DNOW);
1984 pFeatures->f3DNowPrefetch = (pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
1985 || (pExtLeaf->uEdx & ( X86_CPUID_EXT_FEATURE_EDX_LONG_MODE
1986 | X86_CPUID_AMD_FEATURE_EDX_3DNOW));
1987 }
1988
1989 /* VMX (VMXON, VMCS region and related data structures) physical address width (depends on long-mode). */
1990 pFeatures->cVmxMaxPhysAddrWidth = pFeatures->fLongMode ? pFeatures->cMaxPhysAddrWidth : 32;
1991
1992 if ( pExtLeaf
1993 && ( pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD
1994 || pFeatures->enmCpuVendor == CPUMCPUVENDOR_HYGON))
1995 {
1996 /* AMD features. */
1997 pFeatures->fMsr |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MSR);
1998 pFeatures->fApic |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC);
1999 pFeatures->fPse |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE);
2000 pFeatures->fPse36 |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE36);
2001 pFeatures->fPae |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAE);
2002 pFeatures->fPge |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PGE);
2003 pFeatures->fPat |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAT);
2004 pFeatures->fFxSaveRstor |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FXSR);
2005 pFeatures->fMmx |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX);
2006 pFeatures->fTsc |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC);
2007 pFeatures->fIbpb |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
2008 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
2009 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
2010 pFeatures->fSvm = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM);
2011 if (pFeatures->fSvm)
2012 {
2013 PCCPUMCPUIDLEAF pSvmLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x8000000a);
2014 AssertLogRelReturn(pSvmLeaf, VERR_CPUM_IPE_1);
2015 pFeatures->fSvmNestedPaging = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING);
2016 pFeatures->fSvmLbrVirt = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
2017 pFeatures->fSvmSvmLock = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK);
2018 pFeatures->fSvmNextRipSave = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
2019 pFeatures->fSvmTscRateMsr = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR);
2020 pFeatures->fSvmVmcbClean = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
2021 pFeatures->fSvmFlusbByAsid = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID);
2022 pFeatures->fSvmDecodeAssists = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
2023 pFeatures->fSvmPauseFilter = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
2024 pFeatures->fSvmPauseFilterThreshold = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
2025 pFeatures->fSvmAvic = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_AVIC);
2026 pFeatures->fSvmVirtVmsaveVmload = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
2027 pFeatures->fSvmVGif = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_VGIF);
2028 pFeatures->fSvmGmet = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_GMET);
2029 pFeatures->uSvmMaxAsid = pSvmLeaf->uEbx;
2030 }
2031 }
2032
2033 /*
2034 * Quirks.
2035 */
2036 pFeatures->fLeakyFxSR = pExtLeaf
2037 && (pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
2038 && ( ( pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD
2039 && pFeatures->uFamily >= 6 /* K7 and up */)
2040 || pFeatures->enmCpuVendor == CPUMCPUVENDOR_HYGON);
2041
2042 /*
2043 * Max extended (/FPU) state.
2044 */
2045 pFeatures->cbMaxExtendedState = pFeatures->fFxSaveRstor ? sizeof(X86FXSTATE) : sizeof(X86FPUSTATE);
2046 if (pFeatures->fXSaveRstor)
2047 {
2048 PCCPUMCPUIDLEAF const pXStateLeaf0 = cpumR3CpuIdFindLeafEx(paLeaves, cLeaves, 13, 0);
2049 if (pXStateLeaf0)
2050 {
2051 if ( pXStateLeaf0->uEcx >= sizeof(X86FXSTATE)
2052 && pXStateLeaf0->uEcx <= CPUM_MAX_XSAVE_AREA_SIZE
2053 && RT_ALIGN_32(pXStateLeaf0->uEcx, 8) == pXStateLeaf0->uEcx
2054 && pXStateLeaf0->uEbx >= sizeof(X86FXSTATE)
2055 && pXStateLeaf0->uEbx <= pXStateLeaf0->uEcx
2056 && RT_ALIGN_32(pXStateLeaf0->uEbx, 8) == pXStateLeaf0->uEbx)
2057 {
2058 pFeatures->cbMaxExtendedState = pXStateLeaf0->uEcx;
2059
2060 /* (paranoia:) */
2061 PCCPUMCPUIDLEAF const pXStateLeaf1 = cpumR3CpuIdFindLeafEx(paLeaves, cLeaves, 13, 1);
2062 if ( pXStateLeaf1
2063 && pXStateLeaf1->uEbx > pFeatures->cbMaxExtendedState
2064 && pXStateLeaf1->uEbx <= CPUM_MAX_XSAVE_AREA_SIZE
2065 && (pXStateLeaf1->uEcx || pXStateLeaf1->uEdx) )
2066 pFeatures->cbMaxExtendedState = pXStateLeaf1->uEbx;
2067 }
2068 else
2069 AssertLogRelMsgFailedStmt(("Unexpected max/cur XSAVE area sizes: %#x/%#x\n", pXStateLeaf0->uEcx, pXStateLeaf0->uEbx),
2070 pFeatures->fXSaveRstor = 0);
2071 }
2072 else
2073 AssertLogRelMsgFailedStmt(("Expected leaf eax=0xd/ecx=0 with the XSAVE/XRSTOR feature!\n"),
2074 pFeatures->fXSaveRstor = 0);
2075 }
2076 }
2077 else
2078 AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1);
2079 return VINF_SUCCESS;
2080}
2081
2082
2083/*
2084 *
2085 * Init related code.
2086 * Init related code.
2087 * Init related code.
2088 *
2089 *
2090 */
2091#ifndef IN_VBOX_CPU_REPORT
2092
2093
2094/**
2095 * Gets an exactly matching leaf + sub-leaf in the CPUID leaf array.
2096 *
2097 * This ignores the fSubLeafMask.
2098 *
2099 * @returns Pointer to the matching leaf, or NULL if not found.
2100 * @param pCpum The CPUM instance data.
2101 * @param uLeaf The leaf to locate.
2102 * @param uSubLeaf The subleaf to locate.
2103 */
2104static PCPUMCPUIDLEAF cpumR3CpuIdGetExactLeaf(PCPUM pCpum, uint32_t uLeaf, uint32_t uSubLeaf)
2105{
2106 uint64_t uNeedle = RT_MAKE_U64(uSubLeaf, uLeaf);
2107 PCPUMCPUIDLEAF paLeaves = pCpum->GuestInfo.paCpuIdLeavesR3;
2108 uint32_t iEnd = pCpum->GuestInfo.cCpuIdLeaves;
2109 if (iEnd)
2110 {
2111 uint32_t iBegin = 0;
2112 for (;;)
2113 {
2114 uint32_t const i = (iEnd - iBegin) / 2 + iBegin;
2115 uint64_t const uCur = RT_MAKE_U64(paLeaves[i].uSubLeaf, paLeaves[i].uLeaf);
2116 if (uNeedle < uCur)
2117 {
2118 if (i > iBegin)
2119 iEnd = i;
2120 else
2121 break;
2122 }
2123 else if (uNeedle > uCur)
2124 {
2125 if (i + 1 < iEnd)
2126 iBegin = i + 1;
2127 else
2128 break;
2129 }
2130 else
2131 return &paLeaves[i];
2132 }
2133 }
2134 return NULL;
2135}
2136
2137
2138/**
2139 * Loads MSR range overrides.
2140 *
2141 * This must be called before the MSR ranges are moved from the normal heap to
2142 * the hyper heap!
2143 *
2144 * @returns VBox status code (VMSetError called).
2145 * @param pVM The cross context VM structure.
2146 * @param pMsrNode The CFGM node with the MSR overrides.
2147 */
2148static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
2149{
2150 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
2151 {
2152 /*
2153 * Assemble a valid MSR range.
2154 */
2155 CPUMMSRRANGE MsrRange;
2156 MsrRange.offCpumCpu = 0;
2157 MsrRange.fReserved = 0;
2158
2159 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
2160 if (RT_FAILURE(rc))
2161 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
2162
2163 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
2164 if (RT_FAILURE(rc))
2165 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
2166 MsrRange.szName, rc);
2167
2168 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
2169 if (RT_FAILURE(rc))
2170 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
2171 MsrRange.szName, rc);
2172
2173 char szType[32];
2174 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
2175 if (RT_FAILURE(rc))
2176 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
2177 MsrRange.szName, rc);
2178 if (!RTStrICmp(szType, "FixedValue"))
2179 {
2180 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
2181 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
2182
2183 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
2184 if (RT_FAILURE(rc))
2185 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
2186 MsrRange.szName, rc);
2187
2188 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
2189 if (RT_FAILURE(rc))
2190 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
2191 MsrRange.szName, rc);
2192
2193 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
2194 if (RT_FAILURE(rc))
2195 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
2196 MsrRange.szName, rc);
2197 }
2198 else
2199 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
2200 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
2201
2202 /*
2203 * Insert the range into the table (replaces/splits/shrinks existing
2204 * MSR ranges).
2205 */
2206 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
2207 &MsrRange);
2208 if (RT_FAILURE(rc))
2209 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
2210 }
2211
2212 return VINF_SUCCESS;
2213}
2214
2215
2216/**
2217 * Loads CPUID leaf overrides.
2218 *
2219 * This must be called before the CPUID leaves are moved from the normal
2220 * heap to the hyper heap!
2221 *
2222 * @returns VBox status code (VMSetError called).
2223 * @param pVM The cross context VM structure.
2224 * @param pParentNode The CFGM node with the CPUID leaves.
2225 * @param pszLabel How to label the overrides we're loading.
2226 */
2227static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
2228{
2229 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
2230 {
2231 /*
2232 * Get the leaf and subleaf numbers.
2233 */
2234 char szName[128];
2235 int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
2236 if (RT_FAILURE(rc))
2237 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
2238
2239 /* The leaf number is either specified directly or thru the node name. */
2240 uint32_t uLeaf;
2241 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
2242 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
2243 {
2244 rc = RTStrToUInt32Full(szName, 16, &uLeaf);
2245 if (rc != VINF_SUCCESS)
2246 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
2247 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
2248 }
2249 else if (RT_FAILURE(rc))
2250 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
2251 pszLabel, szName, rc);
2252
2253 uint32_t uSubLeaf;
2254 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
2255 if (RT_FAILURE(rc))
2256 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
2257 pszLabel, szName, rc);
2258
2259 uint32_t fSubLeafMask;
2260 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
2261 if (RT_FAILURE(rc))
2262 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
2263 pszLabel, szName, rc);
2264
2265 /*
2266 * Look up the specified leaf, since the output register values
2267 * defaults to any existing values. This allows overriding a single
2268 * register, without needing to know the other values.
2269 */
2270 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, uLeaf, uSubLeaf);
2271 CPUMCPUIDLEAF Leaf;
2272 if (pLeaf)
2273 Leaf = *pLeaf;
2274 else
2275 RT_ZERO(Leaf);
2276 Leaf.uLeaf = uLeaf;
2277 Leaf.uSubLeaf = uSubLeaf;
2278 Leaf.fSubLeafMask = fSubLeafMask;
2279
2280 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
2281 if (RT_FAILURE(rc))
2282 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
2283 pszLabel, szName, rc);
2284 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
2285 if (RT_FAILURE(rc))
2286 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
2287 pszLabel, szName, rc);
2288 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
2289 if (RT_FAILURE(rc))
2290 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
2291 pszLabel, szName, rc);
2292 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
2293 if (RT_FAILURE(rc))
2294 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
2295 pszLabel, szName, rc);
2296
2297 /*
2298 * Insert the leaf into the table (replaces existing ones).
2299 */
2300 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,
2301 &Leaf);
2302 if (RT_FAILURE(rc))
2303 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
2304 }
2305
2306 return VINF_SUCCESS;
2307}
2308
2309
2310
2311/**
2312 * Fetches overrides for a CPUID leaf.
2313 *
2314 * @returns VBox status code.
2315 * @param pLeaf The leaf to load the overrides into.
2316 * @param pCfgNode The CFGM node containing the overrides
2317 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
2318 * @param iLeaf The CPUID leaf number.
2319 */
2320static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
2321{
2322 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
2323 if (pLeafNode)
2324 {
2325 uint32_t u32;
2326 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
2327 if (RT_SUCCESS(rc))
2328 pLeaf->uEax = u32;
2329 else
2330 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
2331
2332 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
2333 if (RT_SUCCESS(rc))
2334 pLeaf->uEbx = u32;
2335 else
2336 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
2337
2338 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
2339 if (RT_SUCCESS(rc))
2340 pLeaf->uEcx = u32;
2341 else
2342 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
2343
2344 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
2345 if (RT_SUCCESS(rc))
2346 pLeaf->uEdx = u32;
2347 else
2348 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
2349
2350 }
2351 return VINF_SUCCESS;
2352}
2353
2354
2355/**
2356 * Load the overrides for a set of CPUID leaves.
2357 *
2358 * @returns VBox status code.
2359 * @param paLeaves The leaf array.
2360 * @param cLeaves The number of leaves.
2361 * @param uStart The start leaf number.
2362 * @param pCfgNode The CFGM node containing the overrides
2363 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
2364 */
2365static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
2366{
2367 for (uint32_t i = 0; i < cLeaves; i++)
2368 {
2369 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
2370 if (RT_FAILURE(rc))
2371 return rc;
2372 }
2373
2374 return VINF_SUCCESS;
2375}
2376
2377
2378/**
2379 * Installs the CPUID leaves and explods the data into structures like
2380 * GuestFeatures and CPUMCTX::aoffXState.
2381 *
2382 * @returns VBox status code.
2383 * @param pVM The cross context VM structure.
2384 * @param pCpum The CPUM part of @a VM.
2385 * @param paLeaves The leaves. These will be copied (but not freed).
2386 * @param cLeaves The number of leaves.
2387 * @param pMsrs The MSRs.
2388 */
2389static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
2390{
2391 cpumR3CpuIdAssertOrder(paLeaves, cLeaves);
2392
2393 /*
2394 * Install the CPUID information.
2395 */
2396 AssertLogRelMsgReturn(cLeaves <= RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves),
2397 ("cLeaves=%u - max %u\n", cLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves)),
2398 VERR_CPUM_IPE_1); /** @todo better status! */
2399 if (paLeaves != pCpum->GuestInfo.aCpuIdLeaves)
2400 memcpy(pCpum->GuestInfo.aCpuIdLeaves, paLeaves, cLeaves * sizeof(paLeaves[0]));
2401 pCpum->GuestInfo.paCpuIdLeavesR3 = pCpum->GuestInfo.aCpuIdLeaves;
2402 pCpum->GuestInfo.cCpuIdLeaves = cLeaves;
2403
2404 /*
2405 * Update the default CPUID leaf if necessary.
2406 */
2407 switch (pCpum->GuestInfo.enmUnknownCpuIdMethod)
2408 {
2409 case CPUMUNKNOWNCPUID_LAST_STD_LEAF:
2410 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX:
2411 {
2412 /* We don't use CPUID(0).eax here because of the NT hack that only
2413 changes that value without actually removing any leaves. */
2414 uint32_t i = 0;
2415 if ( pCpum->GuestInfo.cCpuIdLeaves > 0
2416 && pCpum->GuestInfo.paCpuIdLeavesR3[0].uLeaf <= UINT32_C(0xff))
2417 {
2418 while ( i + 1 < pCpum->GuestInfo.cCpuIdLeaves
2419 && pCpum->GuestInfo.paCpuIdLeavesR3[i + 1].uLeaf <= UINT32_C(0xff))
2420 i++;
2421 pCpum->GuestInfo.DefCpuId.uEax = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEax;
2422 pCpum->GuestInfo.DefCpuId.uEbx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEbx;
2423 pCpum->GuestInfo.DefCpuId.uEcx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEcx;
2424 pCpum->GuestInfo.DefCpuId.uEdx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEdx;
2425 }
2426 break;
2427 }
2428 default:
2429 break;
2430 }
2431
2432 /*
2433 * Explode the guest CPU features.
2434 */
2435 int rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs,
2436 &pCpum->GuestFeatures);
2437 AssertLogRelRCReturn(rc, rc);
2438
2439 /*
2440 * Adjust the scalable bus frequency according to the CPUID information
2441 * we're now using.
2442 */
2443 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
2444 pCpum->GuestInfo.uScalableBusFreq = pCpum->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
2445 ? UINT64_C(100000000) /* 100MHz */
2446 : UINT64_C(133333333); /* 133MHz */
2447
2448 /*
2449 * Populate the legacy arrays. Currently used for everything, later only
2450 * for patch manager.
2451 */
2452 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
2453 {
2454 { pCpum->aGuestCpuIdPatmStd, RT_ELEMENTS(pCpum->aGuestCpuIdPatmStd), 0x00000000 },
2455 { pCpum->aGuestCpuIdPatmExt, RT_ELEMENTS(pCpum->aGuestCpuIdPatmExt), 0x80000000 },
2456 { pCpum->aGuestCpuIdPatmCentaur, RT_ELEMENTS(pCpum->aGuestCpuIdPatmCentaur), 0xc0000000 },
2457 };
2458 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
2459 {
2460 uint32_t cLeft = aOldRanges[i].cCpuIds;
2461 uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
2462 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
2463 while (cLeft-- > 0)
2464 {
2465 uLeaf--;
2466 pLegacyLeaf--;
2467
2468 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, 0 /* uSubLeaf */);
2469 if (pLeaf)
2470 {
2471 pLegacyLeaf->uEax = pLeaf->uEax;
2472 pLegacyLeaf->uEbx = pLeaf->uEbx;
2473 pLegacyLeaf->uEcx = pLeaf->uEcx;
2474 pLegacyLeaf->uEdx = pLeaf->uEdx;
2475 }
2476 else
2477 *pLegacyLeaf = pCpum->GuestInfo.DefCpuId;
2478 }
2479 }
2480
2481 /*
2482 * Configure XSAVE offsets according to the CPUID info and set the feature flags.
2483 */
2484 PVMCPU pVCpu0 = pVM->apCpusR3[0];
2485 AssertCompile(sizeof(pVCpu0->cpum.s.Guest.abXState) == CPUM_MAX_XSAVE_AREA_SIZE);
2486 memset(&pVCpu0->cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVCpu0->cpum.s.Guest.aoffXState));
2487 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_X87_BIT] = 0;
2488 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_SSE_BIT] = 0;
2489 for (uint32_t iComponent = XSAVE_C_SSE_BIT + 1; iComponent < 63; iComponent++)
2490 if (pCpum->fXStateGuestMask & RT_BIT_64(iComponent))
2491 {
2492 PCPUMCPUIDLEAF pSubLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0xd, iComponent);
2493 AssertLogRelMsgReturn(pSubLeaf, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
2494 AssertLogRelMsgReturn(pSubLeaf->fSubLeafMask >= iComponent, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
2495 AssertLogRelMsgReturn( pSubLeaf->uEax > 0
2496 && pSubLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
2497 && pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState
2498 && pSubLeaf->uEbx <= pCpum->GuestFeatures.cbMaxExtendedState
2499 && pSubLeaf->uEbx + pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState,
2500 ("iComponent=%#x eax=%#x ebx=%#x cbMax=%#x\n", iComponent, pSubLeaf->uEax, pSubLeaf->uEbx,
2501 pCpum->GuestFeatures.cbMaxExtendedState),
2502 VERR_CPUM_IPE_1);
2503 pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEbx;
2504 }
2505
2506 /* Copy the CPU #0 data to the other CPUs. */
2507 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
2508 {
2509 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2510 memcpy(&pVCpu->cpum.s.Guest.aoffXState[0], &pVCpu0->cpum.s.Guest.aoffXState[0], sizeof(pVCpu0->cpum.s.Guest.aoffXState));
2511 }
2512
2513 return VINF_SUCCESS;
2514}
2515
2516
2517/** @name Instruction Set Extension Options
2518 * @{ */
2519/** Configuration option type (extended boolean, really). */
2520typedef uint8_t CPUMISAEXTCFG;
2521/** Always disable the extension. */
2522#define CPUMISAEXTCFG_DISABLED false
2523/** Enable the extension if it's supported by the host CPU. */
2524#define CPUMISAEXTCFG_ENABLED_SUPPORTED true
2525/** Enable the extension if it's supported by the host CPU, but don't let
2526 * the portable CPUID feature disable it. */
2527#define CPUMISAEXTCFG_ENABLED_PORTABLE UINT8_C(127)
2528/** Always enable the extension. */
2529#define CPUMISAEXTCFG_ENABLED_ALWAYS UINT8_C(255)
2530/** @} */
2531
2532/**
2533 * CPUID Configuration (from CFGM).
2534 *
2535 * @remarks The members aren't document since we would only be duplicating the
2536 * \@cfgm entries in cpumR3CpuIdReadConfig.
2537 */
2538typedef struct CPUMCPUIDCONFIG
2539{
2540 bool fNt4LeafLimit;
2541 bool fInvariantTsc;
2542 bool fForceVme;
2543 bool fNestedHWVirt;
2544
2545 CPUMISAEXTCFG enmCmpXchg16b;
2546 CPUMISAEXTCFG enmMonitor;
2547 CPUMISAEXTCFG enmMWaitExtensions;
2548 CPUMISAEXTCFG enmSse41;
2549 CPUMISAEXTCFG enmSse42;
2550 CPUMISAEXTCFG enmAvx;
2551 CPUMISAEXTCFG enmAvx2;
2552 CPUMISAEXTCFG enmXSave;
2553 CPUMISAEXTCFG enmAesNi;
2554 CPUMISAEXTCFG enmPClMul;
2555 CPUMISAEXTCFG enmPopCnt;
2556 CPUMISAEXTCFG enmMovBe;
2557 CPUMISAEXTCFG enmRdRand;
2558 CPUMISAEXTCFG enmRdSeed;
2559 CPUMISAEXTCFG enmCLFlushOpt;
2560 CPUMISAEXTCFG enmFsGsBase;
2561 CPUMISAEXTCFG enmPcid;
2562 CPUMISAEXTCFG enmInvpcid;
2563 CPUMISAEXTCFG enmFlushCmdMsr;
2564 CPUMISAEXTCFG enmMdsClear;
2565 CPUMISAEXTCFG enmArchCapMsr;
2566
2567 CPUMISAEXTCFG enmAbm;
2568 CPUMISAEXTCFG enmSse4A;
2569 CPUMISAEXTCFG enmMisAlnSse;
2570 CPUMISAEXTCFG enm3dNowPrf;
2571 CPUMISAEXTCFG enmAmdExtMmx;
2572
2573 uint32_t uMaxStdLeaf;
2574 uint32_t uMaxExtLeaf;
2575 uint32_t uMaxCentaurLeaf;
2576 uint32_t uMaxIntelFamilyModelStep;
2577 char szCpuName[128];
2578} CPUMCPUIDCONFIG;
2579/** Pointer to CPUID config (from CFGM). */
2580typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
2581
2582
2583/**
2584 * Mini CPU selection support for making Mac OS X happy.
2585 *
2586 * Executes the /CPUM/MaxIntelFamilyModelStep config.
2587 *
2588 * @param pCpum The CPUM instance data.
2589 * @param pConfig The CPUID configuration we've read from CFGM.
2590 */
2591static void cpumR3CpuIdLimitIntelFamModStep(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
2592{
2593 if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
2594 {
2595 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
2596 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
2597 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
2598 ASMGetCpuFamily(pStdFeatureLeaf->uEax),
2599 0);
2600 uint32_t uMaxIntelFamilyModelStep = pConfig->uMaxIntelFamilyModelStep;
2601 if (pConfig->uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
2602 {
2603 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
2604 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
2605 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
2606 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
2607 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
2608 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
2609 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
2610 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
2611 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
2612 pStdFeatureLeaf->uEax = uNew;
2613 }
2614 }
2615}
2616
2617
2618
2619/**
2620 * Limit it the number of entries, zapping the remainder.
2621 *
2622 * The limits are masking off stuff about power saving and similar, this
2623 * is perhaps a bit crudely done as there is probably some relatively harmless
2624 * info too in these leaves (like words about having a constant TSC).
2625 *
2626 * @param pCpum The CPUM instance data.
2627 * @param pConfig The CPUID configuration we've read from CFGM.
2628 */
2629static void cpumR3CpuIdLimitLeaves(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
2630{
2631 /*
2632 * Standard leaves.
2633 */
2634 uint32_t uSubLeaf = 0;
2635 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0, uSubLeaf);
2636 if (pCurLeaf)
2637 {
2638 uint32_t uLimit = pCurLeaf->uEax;
2639 if (uLimit <= UINT32_C(0x000fffff))
2640 {
2641 if (uLimit > pConfig->uMaxStdLeaf)
2642 {
2643 pCurLeaf->uEax = uLimit = pConfig->uMaxStdLeaf;
2644 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2645 uLimit + 1, UINT32_C(0x000fffff));
2646 }
2647
2648 /* NT4 hack, no zapping of extra leaves here. */
2649 if (pConfig->fNt4LeafLimit && uLimit > 3)
2650 pCurLeaf->uEax = uLimit = 3;
2651
2652 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x00000000), ++uSubLeaf)) != NULL)
2653 pCurLeaf->uEax = uLimit;
2654 }
2655 else
2656 {
2657 LogRel(("CPUID: Invalid standard range: %#x\n", uLimit));
2658 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2659 UINT32_C(0x00000000), UINT32_C(0x0fffffff));
2660 }
2661 }
2662
2663 /*
2664 * Extended leaves.
2665 */
2666 uSubLeaf = 0;
2667 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), uSubLeaf);
2668 if (pCurLeaf)
2669 {
2670 uint32_t uLimit = pCurLeaf->uEax;
2671 if ( uLimit >= UINT32_C(0x80000000)
2672 && uLimit <= UINT32_C(0x800fffff))
2673 {
2674 if (uLimit > pConfig->uMaxExtLeaf)
2675 {
2676 pCurLeaf->uEax = uLimit = pConfig->uMaxExtLeaf;
2677 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2678 uLimit + 1, UINT32_C(0x800fffff));
2679 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), ++uSubLeaf)) != NULL)
2680 pCurLeaf->uEax = uLimit;
2681 }
2682 }
2683 else
2684 {
2685 LogRel(("CPUID: Invalid extended range: %#x\n", uLimit));
2686 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2687 UINT32_C(0x80000000), UINT32_C(0x8ffffffd));
2688 }
2689 }
2690
2691 /*
2692 * Centaur leaves (VIA).
2693 */
2694 uSubLeaf = 0;
2695 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), uSubLeaf);
2696 if (pCurLeaf)
2697 {
2698 uint32_t uLimit = pCurLeaf->uEax;
2699 if ( uLimit >= UINT32_C(0xc0000000)
2700 && uLimit <= UINT32_C(0xc00fffff))
2701 {
2702 if (uLimit > pConfig->uMaxCentaurLeaf)
2703 {
2704 pCurLeaf->uEax = uLimit = pConfig->uMaxCentaurLeaf;
2705 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2706 uLimit + 1, UINT32_C(0xcfffffff));
2707 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), ++uSubLeaf)) != NULL)
2708 pCurLeaf->uEax = uLimit;
2709 }
2710 }
2711 else
2712 {
2713 LogRel(("CPUID: Invalid centaur range: %#x\n", uLimit));
2714 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2715 UINT32_C(0xc0000000), UINT32_C(0xcfffffff));
2716 }
2717 }
2718}
2719
2720
2721/**
2722 * Clears a CPUID leaf and all sub-leaves (to zero).
2723 *
2724 * @param pCpum The CPUM instance data.
2725 * @param uLeaf The leaf to clear.
2726 */
2727static void cpumR3CpuIdZeroLeaf(PCPUM pCpum, uint32_t uLeaf)
2728{
2729 uint32_t uSubLeaf = 0;
2730 PCPUMCPUIDLEAF pCurLeaf;
2731 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, uSubLeaf)) != NULL)
2732 {
2733 pCurLeaf->uEax = 0;
2734 pCurLeaf->uEbx = 0;
2735 pCurLeaf->uEcx = 0;
2736 pCurLeaf->uEdx = 0;
2737 uSubLeaf++;
2738 }
2739}
2740
2741
2742/**
2743 * Used by cpumR3CpuIdSanitize to ensure that we don't have any sub-leaves for
2744 * the given leaf.
2745 *
2746 * @returns pLeaf.
2747 * @param pCpum The CPUM instance data.
2748 * @param pLeaf The leaf to ensure is alone with it's EAX input value.
2749 */
2750static PCPUMCPUIDLEAF cpumR3CpuIdMakeSingleLeaf(PCPUM pCpum, PCPUMCPUIDLEAF pLeaf)
2751{
2752 Assert((uintptr_t)(pLeaf - pCpum->GuestInfo.paCpuIdLeavesR3) < pCpum->GuestInfo.cCpuIdLeaves);
2753 if (pLeaf->fSubLeafMask != 0)
2754 {
2755 /*
2756 * Figure out how many sub-leaves in need of removal (we'll keep the first).
2757 * Log everything while we're at it.
2758 */
2759 LogRel(("CPUM:\n"
2760 "CPUM: Unexpected CPUID sub-leaves for leaf %#x; fSubLeafMask=%#x\n", pLeaf->uLeaf, pLeaf->fSubLeafMask));
2761 PCPUMCPUIDLEAF pLast = &pCpum->GuestInfo.paCpuIdLeavesR3[pCpum->GuestInfo.cCpuIdLeaves - 1];
2762 PCPUMCPUIDLEAF pSubLeaf = pLeaf;
2763 for (;;)
2764 {
2765 LogRel(("CPUM: %08x/%08x: %08x %08x %08x %08x; flags=%#x mask=%#x\n",
2766 pSubLeaf->uLeaf, pSubLeaf->uSubLeaf,
2767 pSubLeaf->uEax, pSubLeaf->uEbx, pSubLeaf->uEcx, pSubLeaf->uEdx,
2768 pSubLeaf->fFlags, pSubLeaf->fSubLeafMask));
2769 if (pSubLeaf == pLast || pSubLeaf[1].uLeaf != pLeaf->uLeaf)
2770 break;
2771 pSubLeaf++;
2772 }
2773 LogRel(("CPUM:\n"));
2774
2775 /*
2776 * Remove the offending sub-leaves.
2777 */
2778 if (pSubLeaf != pLeaf)
2779 {
2780 if (pSubLeaf != pLast)
2781 memmove(pLeaf + 1, pSubLeaf + 1, (uintptr_t)pLast - (uintptr_t)pSubLeaf);
2782 pCpum->GuestInfo.cCpuIdLeaves -= (uint32_t)(pSubLeaf - pLeaf);
2783 }
2784
2785 /*
2786 * Convert the first sub-leaf into a single leaf.
2787 */
2788 pLeaf->uSubLeaf = 0;
2789 pLeaf->fSubLeafMask = 0;
2790 }
2791 return pLeaf;
2792}
2793
2794
2795/**
2796 * Sanitizes and adjust the CPUID leaves.
2797 *
2798 * Drop features that aren't virtualized (or virtualizable). Adjust information
2799 * and capabilities to fit the virtualized hardware. Remove information the
2800 * guest shouldn't have (because it's wrong in the virtual world or because it
2801 * gives away host details) or that we don't have documentation for and no idea
2802 * what means.
2803 *
2804 * @returns VBox status code.
2805 * @param pVM The cross context VM structure (for cCpus).
2806 * @param pCpum The CPUM instance data.
2807 * @param pConfig The CPUID configuration we've read from CFGM.
2808 */
2809static int cpumR3CpuIdSanitize(PVM pVM, PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
2810{
2811#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
2812 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
2813 { \
2814 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
2815 (a_pLeafReg) &= ~(uint32_t)(fMask); \
2816 }
2817#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
2818 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
2819 { \
2820 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
2821 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
2822 }
2823#define PORTABLE_DISABLE_FEATURE_BIT_CFG(Lvl, a_pLeafReg, FeatNm, fBitMask, enmConfig) \
2824 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) \
2825 && ((a_pLeafReg) & (fBitMask)) \
2826 && (enmConfig) != CPUMISAEXTCFG_ENABLED_PORTABLE ) \
2827 { \
2828 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
2829 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
2830 }
2831 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
2832
2833 /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
2834 must consult HostFeatures when processing CPUMISAEXTCFG variables. */
2835 PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
2836#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
2837 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
2838#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
2839 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
2840#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
2841
2842 /* Cpuid 1:
2843 * EAX: CPU model, family and stepping.
2844 *
2845 * ECX + EDX: Supported features. Only report features we can support.
2846 * Note! When enabling new features the Synthetic CPU and Portable CPUID
2847 * options may require adjusting (i.e. stripping what was enabled).
2848 *
2849 * EBX: Branding, CLFLUSH line size, logical processors per package and
2850 * initial APIC ID.
2851 */
2852 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0); /* Note! Must refetch when used later. */
2853 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
2854 pStdFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pStdFeatureLeaf);
2855
2856 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
2857 | X86_CPUID_FEATURE_EDX_VME
2858 | X86_CPUID_FEATURE_EDX_DE
2859 | X86_CPUID_FEATURE_EDX_PSE
2860 | X86_CPUID_FEATURE_EDX_TSC
2861 | X86_CPUID_FEATURE_EDX_MSR
2862 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
2863 | X86_CPUID_FEATURE_EDX_MCE
2864 | X86_CPUID_FEATURE_EDX_CX8
2865 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
2866 //| RT_BIT_32(10) - not defined
2867 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
2868 //| X86_CPUID_FEATURE_EDX_SEP
2869 | X86_CPUID_FEATURE_EDX_MTRR
2870 | X86_CPUID_FEATURE_EDX_PGE
2871 | X86_CPUID_FEATURE_EDX_MCA
2872 | X86_CPUID_FEATURE_EDX_CMOV
2873 | X86_CPUID_FEATURE_EDX_PAT /* 16 */
2874 | X86_CPUID_FEATURE_EDX_PSE36
2875 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
2876 | X86_CPUID_FEATURE_EDX_CLFSH
2877 //| RT_BIT_32(20) - not defined
2878 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
2879 //| X86_CPUID_FEATURE_EDX_ACPI - not supported (not DevAcpi, right?).
2880 | X86_CPUID_FEATURE_EDX_MMX
2881 | X86_CPUID_FEATURE_EDX_FXSR
2882 | X86_CPUID_FEATURE_EDX_SSE
2883 | X86_CPUID_FEATURE_EDX_SSE2
2884 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
2885 | X86_CPUID_FEATURE_EDX_HTT
2886 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
2887 //| RT_BIT_32(30) - not defined
2888 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
2889 ;
2890 pStdFeatureLeaf->uEcx &= X86_CPUID_FEATURE_ECX_SSE3
2891 | PASSTHRU_FEATURE_TODO(pConfig->enmPClMul, X86_CPUID_FEATURE_ECX_PCLMUL)
2892 //| X86_CPUID_FEATURE_ECX_DTES64 - not implemented yet.
2893 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
2894 | PASSTHRU_FEATURE_EX(pConfig->enmMonitor, pHstFeat->fMonitorMWait, pVM->cCpus == 1, X86_CPUID_FEATURE_ECX_MONITOR)
2895 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
2896 | (pConfig->fNestedHWVirt ? X86_CPUID_FEATURE_ECX_VMX : 0)
2897 //| X86_CPUID_FEATURE_ECX_SMX - not virtualized yet.
2898 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
2899 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
2900 | X86_CPUID_FEATURE_ECX_SSSE3
2901 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
2902 //| X86_CPUID_FEATURE_ECX_FMA - not implemented yet.
2903 | PASSTHRU_FEATURE(pConfig->enmCmpXchg16b, pHstFeat->fMovCmpXchg16b, X86_CPUID_FEATURE_ECX_CX16)
2904 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
2905 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
2906 //| X86_CPUID_FEATURE_ECX_PDCM - not implemented yet.
2907 | PASSTHRU_FEATURE(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
2908 //| X86_CPUID_FEATURE_ECX_DCA - not implemented yet.
2909 | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
2910 | PASSTHRU_FEATURE(pConfig->enmSse42, pHstFeat->fSse42, X86_CPUID_FEATURE_ECX_SSE4_2)
2911 //| X86_CPUID_FEATURE_ECX_X2APIC - turned on later by the device if enabled.
2912 | PASSTHRU_FEATURE_TODO(pConfig->enmMovBe, X86_CPUID_FEATURE_ECX_MOVBE)
2913 | PASSTHRU_FEATURE_TODO(pConfig->enmPopCnt, X86_CPUID_FEATURE_ECX_POPCNT)
2914 //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet.
2915 | PASSTHRU_FEATURE_TODO(pConfig->enmAesNi, X86_CPUID_FEATURE_ECX_AES)
2916 | PASSTHRU_FEATURE(pConfig->enmXSave, pHstFeat->fXSaveRstor, X86_CPUID_FEATURE_ECX_XSAVE)
2917 //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically.
2918 | PASSTHRU_FEATURE(pConfig->enmAvx, pHstFeat->fAvx, X86_CPUID_FEATURE_ECX_AVX)
2919 //| X86_CPUID_FEATURE_ECX_F16C - not implemented yet.
2920 | PASSTHRU_FEATURE_TODO(pConfig->enmRdRand, X86_CPUID_FEATURE_ECX_RDRAND)
2921 //| X86_CPUID_FEATURE_ECX_HVP - Set explicitly later.
2922 ;
2923
2924 /* Mask out PCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
2925 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
2926 && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_PCID))
2927 {
2928 pStdFeatureLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_PCID;
2929 LogRel(("CPUM: Disabled PCID without FSGSBASE to workaround buggy guests\n"));
2930 }
2931
2932 if (pCpum->u8PortableCpuIdLevel > 0)
2933 {
2934 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
2935 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
2936 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCID, X86_CPUID_FEATURE_ECX_PCID, pConfig->enmPcid);
2937 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1, pConfig->enmSse41);
2938 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2, pConfig->enmSse42);
2939 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, MOVBE, X86_CPUID_FEATURE_ECX_MOVBE, pConfig->enmMovBe);
2940 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, AES, X86_CPUID_FEATURE_ECX_AES);
2941 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, VMX, X86_CPUID_FEATURE_ECX_VMX);
2942 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCLMUL, X86_CPUID_FEATURE_ECX_PCLMUL, pConfig->enmPClMul);
2943 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt);
2944 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, F16C, X86_CPUID_FEATURE_ECX_F16C);
2945 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE, pConfig->enmXSave);
2946 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX, pConfig->enmAvx);
2947 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand);
2948 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16, pConfig->enmCmpXchg16b);
2949 PORTABLE_DISABLE_FEATURE_BIT( 2, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
2950 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
2951 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
2952 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
2953 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
2954
2955 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP
2956 | X86_CPUID_FEATURE_EDX_PSN
2957 | X86_CPUID_FEATURE_EDX_DS
2958 | X86_CPUID_FEATURE_EDX_ACPI
2959 | X86_CPUID_FEATURE_EDX_SS
2960 | X86_CPUID_FEATURE_EDX_TM
2961 | X86_CPUID_FEATURE_EDX_PBE
2962 )));
2963 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_DTES64
2964 | X86_CPUID_FEATURE_ECX_CPLDS
2965 | X86_CPUID_FEATURE_ECX_AES
2966 | X86_CPUID_FEATURE_ECX_VMX
2967 | X86_CPUID_FEATURE_ECX_SMX
2968 | X86_CPUID_FEATURE_ECX_EST
2969 | X86_CPUID_FEATURE_ECX_TM2
2970 | X86_CPUID_FEATURE_ECX_CNTXID
2971 | X86_CPUID_FEATURE_ECX_FMA
2972 | X86_CPUID_FEATURE_ECX_TPRUPDATE
2973 | X86_CPUID_FEATURE_ECX_PDCM
2974 | X86_CPUID_FEATURE_ECX_DCA
2975 | X86_CPUID_FEATURE_ECX_OSXSAVE
2976 )));
2977 }
2978
2979 /* Set up APIC ID for CPU 0, configure multi core/threaded smp. */
2980 pStdFeatureLeaf->uEbx &= UINT32_C(0x0000ffff); /* (APIC-ID := 0 and #LogCpus := 0) */
2981
2982 /* The HTT bit is architectural and does not directly indicate hyper-threading or multiple cores;
2983 * it was set even on single-core/non-HT Northwood P4s for example. The HTT bit only means that the
2984 * information in EBX[23:16] (max number of addressable logical processor IDs) is valid.
2985 */
2986#ifdef VBOX_WITH_MULTI_CORE
2987 if (pVM->cCpus > 1)
2988 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* Force if emulating a multi-core CPU. */
2989#endif
2990 if (pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_HTT)
2991 {
2992 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU
2993 core times the number of CPU cores per processor */
2994#ifdef VBOX_WITH_MULTI_CORE
2995 pStdFeatureLeaf->uEbx |= pVM->cCpus <= 0xff ? (pVM->cCpus << 16) : UINT32_C(0x00ff0000);
2996#else
2997 /* Single logical processor in a package. */
2998 pStdFeatureLeaf->uEbx |= (1 << 16);
2999#endif
3000 }
3001
3002 uint32_t uMicrocodeRev;
3003 int rc = SUPR3QueryMicrocodeRev(&uMicrocodeRev);
3004 if (RT_SUCCESS(rc))
3005 {
3006 LogRel(("CPUM: Microcode revision 0x%08X\n", uMicrocodeRev));
3007 }
3008 else
3009 {
3010 uMicrocodeRev = 0;
3011 LogRel(("CPUM: Failed to query microcode revision. rc=%Rrc\n", rc));
3012 }
3013
3014 /* Mask out the VME capability on certain CPUs, unless overridden by fForceVme.
3015 * VME bug was fixed in AGESA 1.0.0.6, microcode patch level 8001126.
3016 */
3017 if ( ( pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen
3018 /** @todo The following ASSUMES that Hygon uses the same version numbering
3019 * as AMD and that they shipped buggy firmware. */
3020 || pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Hygon_Dhyana)
3021 && uMicrocodeRev < 0x8001126
3022 && !pConfig->fForceVme)
3023 {
3024 /** @todo The above is a very coarse test but at the moment we don't know any better (see @bugref{8852}). */
3025 LogRel(("CPUM: Zen VME workaround engaged\n"));
3026 pStdFeatureLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_VME;
3027 }
3028
3029 /* Force standard feature bits. */
3030 if (pConfig->enmPClMul == CPUMISAEXTCFG_ENABLED_ALWAYS)
3031 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_PCLMUL;
3032 if (pConfig->enmMonitor == CPUMISAEXTCFG_ENABLED_ALWAYS)
3033 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MONITOR;
3034 if (pConfig->enmCmpXchg16b == CPUMISAEXTCFG_ENABLED_ALWAYS)
3035 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_CX16;
3036 if (pConfig->enmSse41 == CPUMISAEXTCFG_ENABLED_ALWAYS)
3037 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_1;
3038 if (pConfig->enmSse42 == CPUMISAEXTCFG_ENABLED_ALWAYS)
3039 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_2;
3040 if (pConfig->enmMovBe == CPUMISAEXTCFG_ENABLED_ALWAYS)
3041 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MOVBE;
3042 if (pConfig->enmPopCnt == CPUMISAEXTCFG_ENABLED_ALWAYS)
3043 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_POPCNT;
3044 if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS)
3045 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES;
3046 if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS)
3047 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE;
3048 if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS)
3049 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX;
3050 if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS)
3051 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND;
3052
3053 pStdFeatureLeaf = NULL; /* Must refetch! */
3054
3055 /* Cpuid 0x80000001: (Similar, but in no way identical to 0x00000001.)
3056 * AMD:
3057 * EAX: CPU model, family and stepping.
3058 *
3059 * ECX + EDX: Supported features. Only report features we can support.
3060 * Note! When enabling new features the Synthetic CPU and Portable CPUID
3061 * options may require adjusting (i.e. stripping what was enabled).
3062 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
3063 *
3064 * EBX: Branding ID and package type (or reserved).
3065 *
3066 * Intel and probably most others:
3067 * EAX: 0
3068 * EBX: 0
3069 * ECX + EDX: Subset of AMD features, mainly for AMD64 support.
3070 */
3071 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
3072 if (pExtFeatureLeaf)
3073 {
3074 pExtFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pExtFeatureLeaf);
3075
3076 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
3077 | X86_CPUID_AMD_FEATURE_EDX_VME
3078 | X86_CPUID_AMD_FEATURE_EDX_DE
3079 | X86_CPUID_AMD_FEATURE_EDX_PSE
3080 | X86_CPUID_AMD_FEATURE_EDX_TSC
3081 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
3082 //| X86_CPUID_AMD_FEATURE_EDX_PAE - turned on when necessary
3083 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
3084 | X86_CPUID_AMD_FEATURE_EDX_CX8
3085 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
3086 //| RT_BIT_32(10) - reserved
3087 /* Note! We don't report sysenter/sysexit support due to our inability to keep the IOPL part of
3088 eflags in sync while in ring 1 (see @bugref{1757}). HM enables them later. */
3089 //| X86_CPUID_EXT_FEATURE_EDX_SYSCALL
3090 | X86_CPUID_AMD_FEATURE_EDX_MTRR
3091 | X86_CPUID_AMD_FEATURE_EDX_PGE
3092 | X86_CPUID_AMD_FEATURE_EDX_MCA
3093 | X86_CPUID_AMD_FEATURE_EDX_CMOV
3094 | X86_CPUID_AMD_FEATURE_EDX_PAT
3095 | X86_CPUID_AMD_FEATURE_EDX_PSE36
3096 //| RT_BIT_32(18) - reserved
3097 //| RT_BIT_32(19) - reserved
3098 //| X86_CPUID_EXT_FEATURE_EDX_NX - enabled later by PGM
3099 //| RT_BIT_32(21) - reserved
3100 | PASSTHRU_FEATURE(pConfig->enmAmdExtMmx, pHstFeat->fAmdMmxExts, X86_CPUID_AMD_FEATURE_EDX_AXMMX)
3101 | X86_CPUID_AMD_FEATURE_EDX_MMX
3102 | X86_CPUID_AMD_FEATURE_EDX_FXSR
3103 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
3104 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
3105 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
3106 //| RT_BIT_32(28) - reserved
3107 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
3108 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
3109 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
3110 ;
3111 pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
3112 //| X86_CPUID_AMD_FEATURE_ECX_CMPL - set below if applicable.
3113 | (pConfig->fNestedHWVirt ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
3114 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
3115 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
3116 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
3117 | PASSTHRU_FEATURE_TODO(pConfig->enmAbm, X86_CPUID_AMD_FEATURE_ECX_ABM)
3118 | PASSTHRU_FEATURE_TODO(pConfig->enmSse4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A)
3119 | PASSTHRU_FEATURE_TODO(pConfig->enmMisAlnSse, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE)
3120 | PASSTHRU_FEATURE(pConfig->enm3dNowPrf, pHstFeat->f3DNowPrefetch, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
3121 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
3122 //| X86_CPUID_AMD_FEATURE_ECX_IBS
3123 //| X86_CPUID_AMD_FEATURE_ECX_XOP
3124 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
3125 //| X86_CPUID_AMD_FEATURE_ECX_WDT
3126 //| RT_BIT_32(14) - reserved
3127 //| X86_CPUID_AMD_FEATURE_ECX_LWP - not supported
3128 //| X86_CPUID_AMD_FEATURE_ECX_FMA4 - not yet virtualized.
3129 //| RT_BIT_32(17) - reserved
3130 //| RT_BIT_32(18) - reserved
3131 //| X86_CPUID_AMD_FEATURE_ECX_NODEID - not yet virtualized.
3132 //| RT_BIT_32(20) - reserved
3133 //| X86_CPUID_AMD_FEATURE_ECX_TBM - not yet virtualized.
3134 //| X86_CPUID_AMD_FEATURE_ECX_TOPOEXT - not yet virtualized.
3135 //| RT_BIT_32(23) - reserved
3136 //| RT_BIT_32(24) - reserved
3137 //| RT_BIT_32(25) - reserved
3138 //| RT_BIT_32(26) - reserved
3139 //| RT_BIT_32(27) - reserved
3140 //| RT_BIT_32(28) - reserved
3141 //| RT_BIT_32(29) - reserved
3142 //| RT_BIT_32(30) - reserved
3143 //| RT_BIT_32(31) - reserved
3144 ;
3145#ifdef VBOX_WITH_MULTI_CORE
3146 if ( pVM->cCpus > 1
3147 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3148 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
3149 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; /* CmpLegacy */
3150#endif
3151
3152 if (pCpum->u8PortableCpuIdLevel > 0)
3153 {
3154 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
3155 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, SVM, X86_CPUID_AMD_FEATURE_ECX_SVM);
3156 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM, X86_CPUID_AMD_FEATURE_ECX_ABM, pConfig->enmAbm);
3157 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A, pConfig->enmSse4A);
3158 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, MISALNSSE, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE, pConfig->enmMisAlnSse);
3159 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, 3DNOWPRF, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF, pConfig->enm3dNowPrf);
3160 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, XOP, X86_CPUID_AMD_FEATURE_ECX_XOP);
3161 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, TBM, X86_CPUID_AMD_FEATURE_ECX_TBM);
3162 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, FMA4, X86_CPUID_AMD_FEATURE_ECX_FMA4);
3163 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEdx, AXMMX, X86_CPUID_AMD_FEATURE_EDX_AXMMX, pConfig->enmAmdExtMmx);
3164 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
3165 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
3166 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
3167 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
3168 PORTABLE_DISABLE_FEATURE_BIT( 2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
3169 PORTABLE_DISABLE_FEATURE_BIT( 3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
3170
3171 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_SVM
3172 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
3173 | X86_CPUID_AMD_FEATURE_ECX_OSVW
3174 | X86_CPUID_AMD_FEATURE_ECX_IBS
3175 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
3176 | X86_CPUID_AMD_FEATURE_ECX_WDT
3177 | X86_CPUID_AMD_FEATURE_ECX_LWP
3178 | X86_CPUID_AMD_FEATURE_ECX_NODEID
3179 | X86_CPUID_AMD_FEATURE_ECX_TOPOEXT
3180 | UINT32_C(0xff964000)
3181 )));
3182 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
3183 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
3184 | RT_BIT(18)
3185 | RT_BIT(19)
3186 | RT_BIT(21)
3187 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
3188 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
3189 | RT_BIT(28)
3190 )));
3191 }
3192
3193 /* Force extended feature bits. */
3194 if (pConfig->enmAbm == CPUMISAEXTCFG_ENABLED_ALWAYS)
3195 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_ABM;
3196 if (pConfig->enmSse4A == CPUMISAEXTCFG_ENABLED_ALWAYS)
3197 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
3198 if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
3199 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
3200 if (pConfig->enm3dNowPrf == CPUMISAEXTCFG_ENABLED_ALWAYS)
3201 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF;
3202 if (pConfig->enmAmdExtMmx == CPUMISAEXTCFG_ENABLED_ALWAYS)
3203 pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
3204 }
3205 pExtFeatureLeaf = NULL; /* Must refetch! */
3206
3207
3208 /* Cpuid 2:
3209 * Intel: (Nondeterministic) Cache and TLB information
3210 * AMD: Reserved
3211 * VIA: Reserved
3212 * Safe to expose.
3213 */
3214 uint32_t uSubLeaf = 0;
3215 PCPUMCPUIDLEAF pCurLeaf;
3216 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 2, uSubLeaf)) != NULL)
3217 {
3218 if ((pCurLeaf->uEax & 0xff) > 1)
3219 {
3220 LogRel(("CpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
3221 pCurLeaf->uEax &= UINT32_C(0xffffff01);
3222 }
3223 uSubLeaf++;
3224 }
3225
3226 /* Cpuid 3:
3227 * Intel: EAX, EBX - reserved (transmeta uses these)
3228 * ECX, EDX - Processor Serial Number if available, otherwise reserved
3229 * AMD: Reserved
3230 * VIA: Reserved
3231 * Safe to expose
3232 */
3233 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
3234 if (!(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN))
3235 {
3236 uSubLeaf = 0;
3237 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 3, uSubLeaf)) != NULL)
3238 {
3239 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
3240 if (pCpum->u8PortableCpuIdLevel > 0)
3241 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
3242 uSubLeaf++;
3243 }
3244 }
3245
3246 /* Cpuid 4 + ECX:
3247 * Intel: Deterministic Cache Parameters Leaf.
3248 * AMD: Reserved
3249 * VIA: Reserved
3250 * Safe to expose, except for EAX:
3251 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
3252 * Bits 31-26: Maximum number of processor cores in this physical package**
3253 * Note: These SMP values are constant regardless of ECX
3254 */
3255 uSubLeaf = 0;
3256 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 4, uSubLeaf)) != NULL)
3257 {
3258 pCurLeaf->uEax &= UINT32_C(0x00003fff); /* Clear the #maxcores, #threads-sharing-cache (both are #-1).*/
3259#ifdef VBOX_WITH_MULTI_CORE
3260 if ( pVM->cCpus > 1
3261 && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
3262 {
3263 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
3264 /* One logical processor with possibly multiple cores. */
3265 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
3266 pCurLeaf->uEax |= pVM->cCpus <= 0x40 ? ((pVM->cCpus - 1) << 26) : UINT32_C(0xfc000000); /* 6 bits only -> 64 cores! */
3267 }
3268#endif
3269 uSubLeaf++;
3270 }
3271
3272 /* Cpuid 5: Monitor/mwait Leaf
3273 * Intel: ECX, EDX - reserved
3274 * EAX, EBX - Smallest and largest monitor line size
3275 * AMD: EDX - reserved
3276 * EAX, EBX - Smallest and largest monitor line size
3277 * ECX - extensions (ignored for now)
3278 * VIA: Reserved
3279 * Safe to expose
3280 */
3281 uSubLeaf = 0;
3282 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 5, uSubLeaf)) != NULL)
3283 {
3284 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
3285 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
3286 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
3287
3288 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
3289 if (pConfig->enmMWaitExtensions)
3290 {
3291 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
3292 /** @todo for now we just expose host's MWAIT C-states, although conceptually
3293 it shall be part of our power management virtualization model */
3294#if 0
3295 /* MWAIT sub C-states */
3296 pCurLeaf->uEdx =
3297 (0 << 0) /* 0 in C0 */ |
3298 (2 << 4) /* 2 in C1 */ |
3299 (2 << 8) /* 2 in C2 */ |
3300 (2 << 12) /* 2 in C3 */ |
3301 (0 << 16) /* 0 in C4 */
3302 ;
3303#endif
3304 }
3305 else
3306 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
3307 uSubLeaf++;
3308 }
3309
3310 /* Cpuid 6: Digital Thermal Sensor and Power Management Paramenters.
3311 * Intel: Various stuff.
3312 * AMD: EAX, EBX, EDX - reserved.
3313 * ECX - Bit zero is EffFreq, indicating MSR_0000_00e7 and MSR_0000_00e8
3314 * present. Same as intel.
3315 * VIA: ??
3316 *
3317 * We clear everything here for now.
3318 */
3319 cpumR3CpuIdZeroLeaf(pCpum, 6);
3320
3321 /* Cpuid 7 + ECX: Structured Extended Feature Flags Enumeration
3322 * EAX: Number of sub leaves.
3323 * EBX+ECX+EDX: Feature flags
3324 *
3325 * We only have documentation for one sub-leaf, so clear all other (no need
3326 * to remove them as such, just set them to zero).
3327 *
3328 * Note! When enabling new features the Synthetic CPU and Portable CPUID
3329 * options may require adjusting (i.e. stripping what was enabled).
3330 */
3331 uSubLeaf = 0;
3332 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, uSubLeaf)) != NULL)
3333 {
3334 switch (uSubLeaf)
3335 {
3336 case 0:
3337 {
3338 pCurLeaf->uEax = 0; /* Max ECX input is 0. */
3339 pCurLeaf->uEbx &= 0
3340 | PASSTHRU_FEATURE(pConfig->enmFsGsBase, pHstFeat->fFsGsBase, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE)
3341 //| X86_CPUID_STEXT_FEATURE_EBX_TSC_ADJUST RT_BIT(1)
3342 //| X86_CPUID_STEXT_FEATURE_EBX_SGX RT_BIT(2)
3343 //| X86_CPUID_STEXT_FEATURE_EBX_BMI1 RT_BIT(3)
3344 //| X86_CPUID_STEXT_FEATURE_EBX_HLE RT_BIT(4)
3345 | PASSTHRU_FEATURE(pConfig->enmAvx2, pHstFeat->fAvx2, X86_CPUID_STEXT_FEATURE_EBX_AVX2)
3346 | X86_CPUID_STEXT_FEATURE_EBX_FDP_EXCPTN_ONLY
3347 //| X86_CPUID_STEXT_FEATURE_EBX_SMEP RT_BIT(7)
3348 //| X86_CPUID_STEXT_FEATURE_EBX_BMI2 RT_BIT(8)
3349 //| X86_CPUID_STEXT_FEATURE_EBX_ERMS RT_BIT(9)
3350 | PASSTHRU_FEATURE(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
3351 //| X86_CPUID_STEXT_FEATURE_EBX_RTM RT_BIT(11)
3352 //| X86_CPUID_STEXT_FEATURE_EBX_PQM RT_BIT(12)
3353 | X86_CPUID_STEXT_FEATURE_EBX_DEPR_FPU_CS_DS
3354 //| X86_CPUID_STEXT_FEATURE_EBX_MPE RT_BIT(14)
3355 //| X86_CPUID_STEXT_FEATURE_EBX_PQE RT_BIT(15)
3356 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512F RT_BIT(16)
3357 //| RT_BIT(17) - reserved
3358 | PASSTHRU_FEATURE_TODO(pConfig->enmRdSeed, X86_CPUID_STEXT_FEATURE_EBX_RDSEED)
3359 //| X86_CPUID_STEXT_FEATURE_EBX_ADX RT_BIT(19)
3360 //| X86_CPUID_STEXT_FEATURE_EBX_SMAP RT_BIT(20)
3361 //| RT_BIT(21) - reserved
3362 //| RT_BIT(22) - reserved
3363 | PASSTHRU_FEATURE(pConfig->enmCLFlushOpt, pHstFeat->fClFlushOpt, X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT)
3364 //| RT_BIT(24) - reserved
3365 //| X86_CPUID_STEXT_FEATURE_EBX_INTEL_PT RT_BIT(25)
3366 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512PF RT_BIT(26)
3367 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512ER RT_BIT(27)
3368 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512CD RT_BIT(28)
3369 //| X86_CPUID_STEXT_FEATURE_EBX_SHA RT_BIT(29)
3370 //| RT_BIT(30) - reserved
3371 //| RT_BIT(31) - reserved
3372 ;
3373 pCurLeaf->uEcx &= 0
3374 //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet.
3375 ;
3376 pCurLeaf->uEdx &= 0
3377 | PASSTHRU_FEATURE(pConfig->enmMdsClear, pHstFeat->fMdsClear, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
3378 //| X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT(26)
3379 //| X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT(27)
3380 | PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
3381 | PASSTHRU_FEATURE(pConfig->enmArchCapMsr, pHstFeat->fArchCap, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
3382 ;
3383
3384 /* Mask out INVPCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
3385 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
3386 && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_INVPCID))
3387 {
3388 pCurLeaf->uEbx &= ~X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
3389 LogRel(("CPUM: Disabled INVPCID without FSGSBASE to work around buggy guests\n"));
3390 }
3391
3392 if (pCpum->u8PortableCpuIdLevel > 0)
3393 {
3394 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, FSGSBASE, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE, pConfig->enmFsGsBase);
3395 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SGX, X86_CPUID_STEXT_FEATURE_EBX_SGX);
3396 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2);
3397 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMEP, X86_CPUID_STEXT_FEATURE_EBX_SMEP);
3398 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, BMI2, X86_CPUID_STEXT_FEATURE_EBX_BMI2);
3399 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, INVPCID, X86_CPUID_STEXT_FEATURE_EBX_INVPCID, pConfig->enmInvpcid);
3400 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512F, X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
3401 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, RDSEED, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmRdSeed);
3402 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, CLFLUSHOPT, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmCLFlushOpt);
3403 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512PF, X86_CPUID_STEXT_FEATURE_EBX_AVX512PF);
3404 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512ER, X86_CPUID_STEXT_FEATURE_EBX_AVX512ER);
3405 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512CD, X86_CPUID_STEXT_FEATURE_EBX_AVX512CD);
3406 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMAP, X86_CPUID_STEXT_FEATURE_EBX_SMAP);
3407 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA);
3408 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1);
3409 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr);
3410 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, MD_CLEAR, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR, pConfig->enmMdsClear);
3411 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, ARCHCAP, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP, pConfig->enmArchCapMsr);
3412 }
3413
3414 /* Dependencies. */
3415 if (!(pCurLeaf->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD))
3416 pCurLeaf->uEdx &= ~X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
3417
3418 /* Force standard feature bits. */
3419 if (pConfig->enmFsGsBase == CPUMISAEXTCFG_ENABLED_ALWAYS)
3420 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE;
3421 if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS)
3422 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2;
3423 if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS)
3424 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED;
3425 if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS)
3426 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT;
3427 if (pConfig->enmInvpcid == CPUMISAEXTCFG_ENABLED_ALWAYS)
3428 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
3429 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
3430 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
3431 if (pConfig->enmMdsClear == CPUMISAEXTCFG_ENABLED_ALWAYS)
3432 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
3433 if (pConfig->enmArchCapMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
3434 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
3435 break;
3436 }
3437
3438 default:
3439 /* Invalid index, all values are zero. */
3440 pCurLeaf->uEax = 0;
3441 pCurLeaf->uEbx = 0;
3442 pCurLeaf->uEcx = 0;
3443 pCurLeaf->uEdx = 0;
3444 break;
3445 }
3446 uSubLeaf++;
3447 }
3448
3449 /* Cpuid 8: Marked as reserved by Intel and AMD.
3450 * We zero this since we don't know what it may have been used for.
3451 */
3452 cpumR3CpuIdZeroLeaf(pCpum, 8);
3453
3454 /* Cpuid 9: Direct Cache Access (DCA) Parameters
3455 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
3456 * EBX, ECX, EDX - reserved.
3457 * AMD: Reserved
3458 * VIA: ??
3459 *
3460 * We zero this.
3461 */
3462 cpumR3CpuIdZeroLeaf(pCpum, 9);
3463
3464 /* Cpuid 0xa: Architectural Performance Monitor Features
3465 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
3466 * EBX, ECX, EDX - reserved.
3467 * AMD: Reserved
3468 * VIA: ??
3469 *
3470 * We zero this, for now at least.
3471 */
3472 cpumR3CpuIdZeroLeaf(pCpum, 10);
3473
3474 /* Cpuid 0xb+ECX: x2APIC Features / Processor Topology.
3475 * Intel: EAX - APCI ID shift right for next level.
3476 * EBX - Factory configured cores/threads at this level.
3477 * ECX - Level number (same as input) and level type (1,2,0).
3478 * EDX - Extended initial APIC ID.
3479 * AMD: Reserved
3480 * VIA: ??
3481 */
3482 uSubLeaf = 0;
3483 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 11, uSubLeaf)) != NULL)
3484 {
3485 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
3486 {
3487 uint8_t bLevelType = RT_BYTE2(pCurLeaf->uEcx);
3488 if (bLevelType == 1)
3489 {
3490 /* Thread level - we don't do threads at the moment. */
3491 pCurLeaf->uEax = 0; /** @todo is this correct? Real CPUs never do 0 here, I think... */
3492 pCurLeaf->uEbx = 1;
3493 }
3494 else if (bLevelType == 2)
3495 {
3496 /* Core level. */
3497 pCurLeaf->uEax = 1; /** @todo real CPUs are supposed to be in the 4-6 range, not 1. Our APIC ID assignments are a little special... */
3498#ifdef VBOX_WITH_MULTI_CORE
3499 while (RT_BIT_32(pCurLeaf->uEax) < pVM->cCpus)
3500 pCurLeaf->uEax++;
3501#endif
3502 pCurLeaf->uEbx = pVM->cCpus;
3503 }
3504 else
3505 {
3506 AssertLogRelMsg(bLevelType == 0, ("bLevelType=%#x uSubLeaf=%#x\n", bLevelType, uSubLeaf));
3507 pCurLeaf->uEax = 0;
3508 pCurLeaf->uEbx = 0;
3509 pCurLeaf->uEcx = 0;
3510 }
3511 pCurLeaf->uEcx = (pCurLeaf->uEcx & UINT32_C(0xffffff00)) | (uSubLeaf & 0xff);
3512 pCurLeaf->uEdx = 0; /* APIC ID is filled in by CPUMGetGuestCpuId() at runtime. Init for EMT(0) as usual. */
3513 }
3514 else
3515 {
3516 pCurLeaf->uEax = 0;
3517 pCurLeaf->uEbx = 0;
3518 pCurLeaf->uEcx = 0;
3519 pCurLeaf->uEdx = 0;
3520 }
3521 uSubLeaf++;
3522 }
3523
3524 /* Cpuid 0xc: Marked as reserved by Intel and AMD.
3525 * We zero this since we don't know what it may have been used for.
3526 */
3527 cpumR3CpuIdZeroLeaf(pCpum, 12);
3528
3529 /* Cpuid 0xd + ECX: Processor Extended State Enumeration
3530 * ECX=0: EAX - Valid bits in XCR0[31:0].
3531 * EBX - Maximum state size as per current XCR0 value.
3532 * ECX - Maximum state size for all supported features.
3533 * EDX - Valid bits in XCR0[63:32].
3534 * ECX=1: EAX - Various X-features.
3535 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
3536 * ECX - Valid bits in IA32_XSS[31:0].
3537 * EDX - Valid bits in IA32_XSS[63:32].
3538 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
3539 * if the bit invalid all four registers are set to zero.
3540 * EAX - The state size for this feature.
3541 * EBX - The state byte offset of this feature.
3542 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
3543 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
3544 *
3545 * Clear them all as we don't currently implement extended CPU state.
3546 */
3547 /* Figure out the supported XCR0/XSS mask component and make sure CPUID[1].ECX[27] = CR4.OSXSAVE. */
3548 uint64_t fGuestXcr0Mask = 0;
3549 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
3550 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
3551 {
3552 fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE;
3553 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX))
3554 fGuestXcr0Mask |= XSAVE_C_YMM;
3555 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0);
3556 if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F))
3557 fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK;
3558 fGuestXcr0Mask &= pCpum->fXStateHostMask;
3559
3560 pStdFeatureLeaf->fFlags |= CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE;
3561 }
3562 pStdFeatureLeaf = NULL;
3563 pCpum->fXStateGuestMask = fGuestXcr0Mask;
3564
3565 /* Work the sub-leaves. */
3566 uint32_t cbXSaveMaxActual = CPUM_MIN_XSAVE_AREA_SIZE;
3567 uint32_t cbXSaveMaxReport = CPUM_MIN_XSAVE_AREA_SIZE;
3568 for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++)
3569 {
3570 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf);
3571 if (pCurLeaf)
3572 {
3573 if (fGuestXcr0Mask)
3574 {
3575 switch (uSubLeaf)
3576 {
3577 case 0:
3578 pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask);
3579 pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask);
3580 AssertLogRelMsgReturn((pCurLeaf->uEax & (XSAVE_C_X87 | XSAVE_C_SSE)) == (XSAVE_C_X87 | XSAVE_C_SSE),
3581 ("CPUID(0xd/0).EAX missing mandatory X87 or SSE bits: %#RX32", pCurLeaf->uEax),
3582 VERR_CPUM_IPE_1);
3583 cbXSaveMaxActual = pCurLeaf->uEcx;
3584 AssertLogRelMsgReturn(cbXSaveMaxActual <= CPUM_MAX_XSAVE_AREA_SIZE && cbXSaveMaxActual >= CPUM_MIN_XSAVE_AREA_SIZE,
3585 ("%#x max=%#x\n", cbXSaveMaxActual, CPUM_MAX_XSAVE_AREA_SIZE), VERR_CPUM_IPE_2);
3586 AssertLogRelMsgReturn(pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE && pCurLeaf->uEbx <= cbXSaveMaxActual,
3587 ("ebx=%#x cbXSaveMaxActual=%#x\n", pCurLeaf->uEbx, cbXSaveMaxActual),
3588 VERR_CPUM_IPE_2);
3589 continue;
3590 case 1:
3591 pCurLeaf->uEax &= 0;
3592 pCurLeaf->uEcx &= 0;
3593 pCurLeaf->uEdx &= 0;
3594 /** @todo what about checking ebx? */
3595 continue;
3596 default:
3597 if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf))
3598 {
3599 AssertLogRelMsgReturn( pCurLeaf->uEax <= cbXSaveMaxActual
3600 && pCurLeaf->uEax > 0
3601 && pCurLeaf->uEbx < cbXSaveMaxActual
3602 && pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
3603 && pCurLeaf->uEbx + pCurLeaf->uEax <= cbXSaveMaxActual,
3604 ("%#x: eax=%#x ebx=%#x cbMax=%#x\n",
3605 uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, cbXSaveMaxActual),
3606 VERR_CPUM_IPE_2);
3607 AssertLogRel(!(pCurLeaf->uEcx & 1));
3608 pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */
3609 pCurLeaf->uEdx = 0; /* it's reserved... */
3610 if (pCurLeaf->uEbx + pCurLeaf->uEax > cbXSaveMaxReport)
3611 cbXSaveMaxReport = pCurLeaf->uEbx + pCurLeaf->uEax;
3612 continue;
3613 }
3614 break;
3615 }
3616 }
3617
3618 /* Clear the leaf. */
3619 pCurLeaf->uEax = 0;
3620 pCurLeaf->uEbx = 0;
3621 pCurLeaf->uEcx = 0;
3622 pCurLeaf->uEdx = 0;
3623 }
3624 }
3625
3626 /* Update the max and current feature sizes to shut up annoying Linux kernels. */
3627 if (cbXSaveMaxReport != cbXSaveMaxActual && fGuestXcr0Mask)
3628 {
3629 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, 0);
3630 if (pCurLeaf)
3631 {
3632 LogRel(("CPUM: Changing leaf 13[0]: EBX=%#RX32 -> %#RX32, ECX=%#RX32 -> %#RX32\n",
3633 pCurLeaf->uEbx, cbXSaveMaxReport, pCurLeaf->uEcx, cbXSaveMaxReport));
3634 pCurLeaf->uEbx = cbXSaveMaxReport;
3635 pCurLeaf->uEcx = cbXSaveMaxReport;
3636 }
3637 }
3638
3639 /* Cpuid 0xe: Marked as reserved by Intel and AMD.
3640 * We zero this since we don't know what it may have been used for.
3641 */
3642 cpumR3CpuIdZeroLeaf(pCpum, 14);
3643
3644 /* Cpuid 0xf + ECX: Platform quality of service monitoring (PQM),
3645 * also known as Intel Resource Director Technology (RDT) Monitoring
3646 * We zero this as we don't currently virtualize PQM.
3647 */
3648 cpumR3CpuIdZeroLeaf(pCpum, 15);
3649
3650 /* Cpuid 0x10 + ECX: Platform quality of service enforcement (PQE),
3651 * also known as Intel Resource Director Technology (RDT) Allocation
3652 * We zero this as we don't currently virtualize PQE.
3653 */
3654 cpumR3CpuIdZeroLeaf(pCpum, 16);
3655
3656 /* Cpuid 0x11: Marked as reserved by Intel and AMD.
3657 * We zero this since we don't know what it may have been used for.
3658 */
3659 cpumR3CpuIdZeroLeaf(pCpum, 17);
3660
3661 /* Cpuid 0x12 + ECX: SGX resource enumeration.
3662 * We zero this as we don't currently virtualize this.
3663 */
3664 cpumR3CpuIdZeroLeaf(pCpum, 18);
3665
3666 /* Cpuid 0x13: Marked as reserved by Intel and AMD.
3667 * We zero this since we don't know what it may have been used for.
3668 */
3669 cpumR3CpuIdZeroLeaf(pCpum, 19);
3670
3671 /* Cpuid 0x14 + ECX: Processor Trace (PT) capability enumeration.
3672 * We zero this as we don't currently virtualize this.
3673 */
3674 cpumR3CpuIdZeroLeaf(pCpum, 20);
3675
3676 /* Cpuid 0x15: Timestamp Counter / Core Crystal Clock info.
3677 * Intel: uTscFrequency = uCoreCrystalClockFrequency * EBX / EAX.
3678 * EAX - denominator (unsigned).
3679 * EBX - numerator (unsigned).
3680 * ECX, EDX - reserved.
3681 * AMD: Reserved / undefined / not implemented.
3682 * VIA: Reserved / undefined / not implemented.
3683 * We zero this as we don't currently virtualize this.
3684 */
3685 cpumR3CpuIdZeroLeaf(pCpum, 21);
3686
3687 /* Cpuid 0x16: Processor frequency info
3688 * Intel: EAX - Core base frequency in MHz.
3689 * EBX - Core maximum frequency in MHz.
3690 * ECX - Bus (reference) frequency in MHz.
3691 * EDX - Reserved.
3692 * AMD: Reserved / undefined / not implemented.
3693 * VIA: Reserved / undefined / not implemented.
3694 * We zero this as we don't currently virtualize this.
3695 */
3696 cpumR3CpuIdZeroLeaf(pCpum, 22);
3697
3698 /* Cpuid 0x17..0x10000000: Unknown.
3699 * We don't know these and what they mean, so remove them. */
3700 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
3701 UINT32_C(0x00000017), UINT32_C(0x0fffffff));
3702
3703
3704 /* CpuId 0x40000000..0x4fffffff: Reserved for hypervisor/emulator.
3705 * We remove all these as we're a hypervisor and must provide our own.
3706 */
3707 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
3708 UINT32_C(0x40000000), UINT32_C(0x4fffffff));
3709
3710
3711 /* Cpuid 0x80000000 is harmless. */
3712
3713 /* Cpuid 0x80000001 is handled with cpuid 1 way up above. */
3714
3715 /* Cpuid 0x80000002...0x80000004 contains the processor name and is considered harmless. */
3716
3717 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
3718 * Safe to pass on to the guest.
3719 *
3720 * AMD: 0x800000005 L1 cache information
3721 * 0x800000006 L2/L3 cache information
3722 * Intel: 0x800000005 reserved
3723 * 0x800000006 L2 cache information
3724 * VIA: 0x800000005 TLB and L1 cache information
3725 * 0x800000006 L2 cache information
3726 */
3727
3728 /* Cpuid 0x800000007: Advanced Power Management Information.
3729 * AMD: EAX: Processor feedback capabilities.
3730 * EBX: RAS capabilites.
3731 * ECX: Advanced power monitoring interface.
3732 * EDX: Enhanced power management capabilities.
3733 * Intel: EAX, EBX, ECX - reserved.
3734 * EDX - Invariant TSC indicator supported (bit 8), the rest is reserved.
3735 * VIA: Reserved
3736 * We let the guest see EDX_TSCINVAR (and later maybe EDX_EFRO). Actually, we should set EDX_TSCINVAR.
3737 */
3738 uSubLeaf = 0;
3739 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000007), uSubLeaf)) != NULL)
3740 {
3741 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
3742 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3743 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
3744 {
3745 /*
3746 * Older 64-bit linux kernels blindly assume that the AMD performance counters work
3747 * if X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR is set, see @bugref{7243#c85}. Exposing this
3748 * bit is now configurable.
3749 */
3750 pCurLeaf->uEdx &= 0
3751 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
3752 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
3753 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
3754 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
3755 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
3756 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
3757 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
3758 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
3759 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
3760 //| X86_CPUID_AMD_ADVPOWER_EDX_CPB RT_BIT(9)
3761 //| X86_CPUID_AMD_ADVPOWER_EDX_EFRO RT_BIT(10)
3762 //| X86_CPUID_AMD_ADVPOWER_EDX_PFI RT_BIT(11)
3763 //| X86_CPUID_AMD_ADVPOWER_EDX_PA RT_BIT(12)
3764 | 0;
3765 }
3766 else
3767 pCurLeaf->uEdx &= X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
3768 if (!pConfig->fInvariantTsc)
3769 pCurLeaf->uEdx &= ~X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
3770 uSubLeaf++;
3771 }
3772
3773 /* Cpuid 0x80000008:
3774 * AMD: EBX, EDX - reserved
3775 * EAX: Virtual/Physical/Guest address Size
3776 * ECX: Number of cores + APICIdCoreIdSize
3777 * Intel: EAX: Virtual/Physical address Size
3778 * EBX, ECX, EDX - reserved
3779 * VIA: EAX: Virtual/Physical address Size
3780 * EBX, ECX, EDX - reserved
3781 *
3782 * We only expose the virtual+pysical address size to the guest atm.
3783 * On AMD we set the core count, but not the apic id stuff as we're
3784 * currently not doing the apic id assignments in a complatible manner.
3785 */
3786 uSubLeaf = 0;
3787 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL)
3788 {
3789 pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */
3790 pCurLeaf->uEbx = 0; /* reserved - [12] == IBPB */
3791 pCurLeaf->uEdx = 0; /* reserved */
3792
3793 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu).
3794 * Set core count to 0, indicating 1 core. Adjust if we're in multi core mode on AMD. */
3795 pCurLeaf->uEcx = 0;
3796#ifdef VBOX_WITH_MULTI_CORE
3797 if ( pVM->cCpus > 1
3798 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3799 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
3800 pCurLeaf->uEcx |= (pVM->cCpus - 1) & UINT32_C(0xff);
3801#endif
3802 uSubLeaf++;
3803 }
3804
3805 /* Cpuid 0x80000009: Reserved
3806 * We zero this since we don't know what it may have been used for.
3807 */
3808 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000009));
3809
3810 /* Cpuid 0x8000000a: SVM information on AMD, invalid on Intel.
3811 * AMD: EAX - SVM revision.
3812 * EBX - Number of ASIDs.
3813 * ECX - Reserved.
3814 * EDX - SVM Feature identification.
3815 */
3816 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3817 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
3818 {
3819 pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
3820 if ( pExtFeatureLeaf
3821 && (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM))
3822 {
3823 PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
3824 if (pSvmFeatureLeaf)
3825 {
3826 pSvmFeatureLeaf->uEax = 0x1;
3827 pSvmFeatureLeaf->uEbx = 0x8000; /** @todo figure out virtual NASID. */
3828 pSvmFeatureLeaf->uEcx = 0;
3829 pSvmFeatureLeaf->uEdx &= ( X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE /** @todo Support other SVM features */
3830 | X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID
3831 | X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
3832 }
3833 else
3834 {
3835 /* Should never happen. */
3836 LogRel(("CPUM: Warning! Expected CPUID leaf 0x8000000a not present! SVM features not exposed to the guest\n"));
3837 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
3838 }
3839 }
3840 else
3841 {
3842 /* If SVM is not supported, this is reserved, zero out. */
3843 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
3844 }
3845 }
3846 else
3847 {
3848 /* Cpuid 0x8000000a: Reserved on Intel.
3849 * We zero this since we don't know what it may have been used for.
3850 */
3851 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
3852 }
3853
3854 /* Cpuid 0x8000000b thru 0x80000018: Reserved
3855 * We clear these as we don't know what purpose they might have. */
3856 for (uint32_t uLeaf = UINT32_C(0x8000000b); uLeaf <= UINT32_C(0x80000018); uLeaf++)
3857 cpumR3CpuIdZeroLeaf(pCpum, uLeaf);
3858
3859 /* Cpuid 0x80000019: TLB configuration
3860 * Seems to be harmless, pass them thru as is. */
3861
3862 /* Cpuid 0x8000001a: Peformance optimization identifiers.
3863 * Strip anything we don't know what is or addresses feature we don't implement. */
3864 uSubLeaf = 0;
3865 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001a), uSubLeaf)) != NULL)
3866 {
3867 pCurLeaf->uEax &= RT_BIT_32(0) /* FP128 - use 1x128-bit instead of 2x64-bit. */
3868 | RT_BIT_32(1) /* MOVU - Prefere unaligned MOV over MOVL + MOVH. */
3869 //| RT_BIT_32(2) /* FP256 - use 1x256-bit instead of 2x128-bit. */
3870 ;
3871 pCurLeaf->uEbx = 0; /* reserved */
3872 pCurLeaf->uEcx = 0; /* reserved */
3873 pCurLeaf->uEdx = 0; /* reserved */
3874 uSubLeaf++;
3875 }
3876
3877 /* Cpuid 0x8000001b: Instruct based sampling (IBS) information.
3878 * Clear this as we don't currently virtualize this feature. */
3879 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001b));
3880
3881 /* Cpuid 0x8000001c: Lightweight profiling (LWP) information.
3882 * Clear this as we don't currently virtualize this feature. */
3883 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001c));
3884
3885 /* Cpuid 0x8000001d+ECX: Get cache configuration descriptors.
3886 * We need to sanitize the cores per cache (EAX[25:14]).
3887 *
3888 * This is very much the same as Intel's CPUID(4) leaf, except EAX[31:26]
3889 * and EDX[2] are reserved here, and EAX[14:25] is documented having a
3890 * slightly different meaning.
3891 */
3892 uSubLeaf = 0;
3893 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uSubLeaf)) != NULL)
3894 {
3895#ifdef VBOX_WITH_MULTI_CORE
3896 uint32_t cCores = ((pCurLeaf->uEax >> 14) & 0xfff) + 1;
3897 if (cCores > pVM->cCpus)
3898 cCores = pVM->cCpus;
3899 pCurLeaf->uEax &= UINT32_C(0x00003fff);
3900 pCurLeaf->uEax |= ((cCores - 1) & 0xfff) << 14;
3901#else
3902 pCurLeaf->uEax &= UINT32_C(0x00003fff);
3903#endif
3904 uSubLeaf++;
3905 }
3906
3907 /* Cpuid 0x8000001e: Get APIC / unit / node information.
3908 * If AMD, we configure it for our layout (on EMT(0)). In the multi-core
3909 * setup, we have one compute unit with all the cores in it. Single node.
3910 */
3911 uSubLeaf = 0;
3912 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001e), uSubLeaf)) != NULL)
3913 {
3914 pCurLeaf->uEax = 0; /* Extended APIC ID = EMT(0).idApic (== 0). */
3915 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
3916 {
3917#ifdef VBOX_WITH_MULTI_CORE
3918 pCurLeaf->uEbx = pVM->cCpus < 0x100
3919 ? (pVM->cCpus - 1) << 8 : UINT32_C(0x0000ff00); /* Compute unit ID 0, core per unit. */
3920#else
3921 pCurLeaf->uEbx = 0; /* Compute unit ID 0, 1 core per unit. */
3922#endif
3923 pCurLeaf->uEcx = 0; /* Node ID 0, 1 node per CPU. */
3924 }
3925 else
3926 {
3927 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_AMD);
3928 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_HYGON);
3929 pCurLeaf->uEbx = 0; /* Reserved. */
3930 pCurLeaf->uEcx = 0; /* Reserved. */
3931 }
3932 pCurLeaf->uEdx = 0; /* Reserved. */
3933 uSubLeaf++;
3934 }
3935
3936 /* Cpuid 0x8000001f...0x8ffffffd: Unknown.
3937 * We don't know these and what they mean, so remove them. */
3938 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
3939 UINT32_C(0x8000001f), UINT32_C(0x8ffffffd));
3940
3941 /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf.
3942 * Just pass it thru for now. */
3943
3944 /* Cpuid 0x8fffffff: Mystery hammer time leaf!
3945 * Just pass it thru for now. */
3946
3947 /* Cpuid 0xc0000000: Centaur stuff.
3948 * Harmless, pass it thru. */
3949
3950 /* Cpuid 0xc0000001: Centaur features.
3951 * VIA: EAX - Family, model, stepping.
3952 * EDX - Centaur extended feature flags. Nothing interesting, except may
3953 * FEMMS (bit 5), but VIA marks it as 'reserved', so never mind.
3954 * EBX, ECX - reserved.
3955 * We keep EAX but strips the rest.
3956 */
3957 uSubLeaf = 0;
3958 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000001), uSubLeaf)) != NULL)
3959 {
3960 pCurLeaf->uEbx = 0;
3961 pCurLeaf->uEcx = 0;
3962 pCurLeaf->uEdx = 0; /* Bits 0 thru 9 are documented on sandpil.org, but we don't want them, except maybe 5 (FEMMS). */
3963 uSubLeaf++;
3964 }
3965
3966 /* Cpuid 0xc0000002: Old Centaur Current Performance Data.
3967 * We only have fixed stale values, but should be harmless. */
3968
3969 /* Cpuid 0xc0000003: Reserved.
3970 * We zero this since we don't know what it may have been used for.
3971 */
3972 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0xc0000003));
3973
3974 /* Cpuid 0xc0000004: Centaur Performance Info.
3975 * We only have fixed stale values, but should be harmless. */
3976
3977
3978 /* Cpuid 0xc0000005...0xcfffffff: Unknown.
3979 * We don't know these and what they mean, so remove them. */
3980 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
3981 UINT32_C(0xc0000005), UINT32_C(0xcfffffff));
3982
3983 return VINF_SUCCESS;
3984#undef PORTABLE_DISABLE_FEATURE_BIT
3985#undef PORTABLE_CLEAR_BITS_WHEN
3986}
3987
3988
3989/**
3990 * Reads a value in /CPUM/IsaExts/ node.
3991 *
3992 * @returns VBox status code (error message raised).
3993 * @param pVM The cross context VM structure. (For errors.)
3994 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
3995 * @param pszValueName The value / extension name.
3996 * @param penmValue Where to return the choice.
3997 * @param enmDefault The default choice.
3998 */
3999static int cpumR3CpuIdReadIsaExtCfg(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
4000 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
4001{
4002 /*
4003 * Try integer encoding first.
4004 */
4005 uint64_t uValue;
4006 int rc = CFGMR3QueryInteger(pIsaExts, pszValueName, &uValue);
4007 if (RT_SUCCESS(rc))
4008 switch (uValue)
4009 {
4010 case 0: *penmValue = CPUMISAEXTCFG_DISABLED; break;
4011 case 1: *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED; break;
4012 case 2: *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS; break;
4013 case 9: *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE; break;
4014 default:
4015 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
4016 "Invalid config value for '/CPUM/IsaExts/%s': %llu (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
4017 pszValueName, uValue);
4018 }
4019 /*
4020 * If missing, use default.
4021 */
4022 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
4023 *penmValue = enmDefault;
4024 else
4025 {
4026 if (rc == VERR_CFGM_NOT_INTEGER)
4027 {
4028 /*
4029 * Not an integer, try read it as a string.
4030 */
4031 char szValue[32];
4032 rc = CFGMR3QueryString(pIsaExts, pszValueName, szValue, sizeof(szValue));
4033 if (RT_SUCCESS(rc))
4034 {
4035 RTStrToLower(szValue);
4036 size_t cchValue = strlen(szValue);
4037#define EQ(a_str) (cchValue == sizeof(a_str) - 1U && memcmp(szValue, a_str, sizeof(a_str) - 1))
4038 if ( EQ("disabled") || EQ("disable") || EQ("off") || EQ("no"))
4039 *penmValue = CPUMISAEXTCFG_DISABLED;
4040 else if (EQ("enabled") || EQ("enable") || EQ("on") || EQ("yes"))
4041 *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED;
4042 else if (EQ("forced") || EQ("force") || EQ("always"))
4043 *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS;
4044 else if (EQ("portable"))
4045 *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE;
4046 else if (EQ("default") || EQ("def"))
4047 *penmValue = enmDefault;
4048 else
4049 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
4050 "Invalid config value for '/CPUM/IsaExts/%s': '%s' (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
4051 pszValueName, uValue);
4052#undef EQ
4053 }
4054 }
4055 if (RT_FAILURE(rc))
4056 return VMSetError(pVM, rc, RT_SRC_POS, "Error reading config value '/CPUM/IsaExts/%s': %Rrc", pszValueName, rc);
4057 }
4058 return VINF_SUCCESS;
4059}
4060
4061
4062/**
4063 * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted.
4064 *
4065 * @returns VBox status code (error message raised).
4066 * @param pVM The cross context VM structure. (For errors.)
4067 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
4068 * @param pszValueName The value / extension name.
4069 * @param penmValue Where to return the choice.
4070 * @param enmDefault The default choice.
4071 * @param fAllowed Allowed choice. Applied both to the result and to
4072 * the default value.
4073 */
4074static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
4075 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed)
4076{
4077 int rc;
4078 if (fAllowed)
4079 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
4080 else
4081 {
4082 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/);
4083 if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS)
4084 LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName));
4085 *penmValue = CPUMISAEXTCFG_DISABLED;
4086 }
4087 return rc;
4088}
4089
4090
4091/**
4092 * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/.
4093 *
4094 * @returns VBox status code (error message raised).
4095 * @param pVM The cross context VM structure. (For errors.)
4096 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
4097 * @param pCpumCfg The /CPUM node (can be NULL).
4098 * @param pszValueName The value / extension name.
4099 * @param penmValue Where to return the choice.
4100 * @param enmDefault The default choice.
4101 */
4102static int cpumR3CpuIdReadIsaExtCfgLegacy(PVM pVM, PCFGMNODE pIsaExts, PCFGMNODE pCpumCfg, const char *pszValueName,
4103 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
4104{
4105 if (CFGMR3Exists(pCpumCfg, pszValueName))
4106 {
4107 if (!CFGMR3Exists(pIsaExts, pszValueName))
4108 LogRel(("Warning: /CPUM/%s is deprecated, use /CPUM/IsaExts/%s instead.\n", pszValueName, pszValueName));
4109 else
4110 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS,
4111 "Duplicate config values '/CPUM/%s' and '/CPUM/IsaExts/%s' - please remove the former!",
4112 pszValueName, pszValueName);
4113
4114 bool fLegacy;
4115 int rc = CFGMR3QueryBoolDef(pCpumCfg, pszValueName, &fLegacy, enmDefault != CPUMISAEXTCFG_DISABLED);
4116 if (RT_SUCCESS(rc))
4117 {
4118 *penmValue = fLegacy;
4119 return VINF_SUCCESS;
4120 }
4121 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS, "Error querying '/CPUM/%s': %Rrc", pszValueName, rc);
4122 }
4123
4124 return cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
4125}
4126
4127
4128static int cpumR3CpuIdReadConfig(PVM pVM, PCPUMCPUIDCONFIG pConfig, PCFGMNODE pCpumCfg, bool fNestedPagingAndFullGuestExec)
4129{
4130 int rc;
4131
4132 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
4133 * When non-zero CPUID features that could cause portability issues will be
4134 * stripped. The higher the value the more features gets stripped. Higher
4135 * values should only be used when older CPUs are involved since it may
4136 * harm performance and maybe also cause problems with specific guests. */
4137 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pVM->cpum.s.u8PortableCpuIdLevel, 0);
4138 AssertLogRelRCReturn(rc, rc);
4139
4140 /** @cfgm{/CPUM/GuestCpuName, string}
4141 * The name of the CPU we're to emulate. The default is the host CPU.
4142 * Note! CPUs other than "host" one is currently unsupported. */
4143 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", pConfig->szCpuName, sizeof(pConfig->szCpuName), "host");
4144 AssertLogRelRCReturn(rc, rc);
4145
4146 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
4147 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
4148 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
4149 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
4150 */
4151 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &pConfig->fNt4LeafLimit, false);
4152 AssertLogRelRCReturn(rc, rc);
4153
4154 /** @cfgm{/CPUM/InvariantTsc, boolean, true}
4155 * Pass-through the invariant TSC flag in 0x80000007 if available on the host
4156 * CPU. On AMD CPUs, users may wish to suppress it to avoid trouble from older
4157 * 64-bit linux guests which assume the presence of AMD performance counters
4158 * that we do not virtualize.
4159 */
4160 rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantTsc", &pConfig->fInvariantTsc, true);
4161 AssertLogRelRCReturn(rc, rc);
4162
4163 /** @cfgm{/CPUM/ForceVme, boolean, false}
4164 * Always expose the VME (Virtual-8086 Mode Extensions) capability if true.
4165 * By default the flag is passed thru as is from the host CPU, except
4166 * on AMD Ryzen CPUs where it's masked to avoid trouble with XP/Server 2003
4167 * guests and DOS boxes in general.
4168 */
4169 rc = CFGMR3QueryBoolDef(pCpumCfg, "ForceVme", &pConfig->fForceVme, false);
4170 AssertLogRelRCReturn(rc, rc);
4171
4172 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
4173 * Restrict the reported CPU family+model+stepping of intel CPUs. This is
4174 * probably going to be a temporary hack, so don't depend on this.
4175 * The 1st byte of the value is the stepping, the 2nd byte value is the model
4176 * number and the 3rd byte value is the family, and the 4th value must be zero.
4177 */
4178 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &pConfig->uMaxIntelFamilyModelStep, UINT32_MAX);
4179 AssertLogRelRCReturn(rc, rc);
4180
4181 /** @cfgm{/CPUM/MaxStdLeaf, uint32_t, 0x00000016}
4182 * The last standard leaf to keep. The actual last value that is stored in EAX
4183 * is RT_MAX(CPUID[0].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max leaf are
4184 * removed. (This works independently of and differently from NT4LeafLimit.)
4185 * The default is usually set to what we're able to reasonably sanitize.
4186 */
4187 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxStdLeaf", &pConfig->uMaxStdLeaf, UINT32_C(0x00000016));
4188 AssertLogRelRCReturn(rc, rc);
4189
4190 /** @cfgm{/CPUM/MaxExtLeaf, uint32_t, 0x8000001e}
4191 * The last extended leaf to keep. The actual last value that is stored in EAX
4192 * is RT_MAX(CPUID[0x80000000].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max
4193 * leaf are removed. The default is set to what we're able to sanitize.
4194 */
4195 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x8000001e));
4196 AssertLogRelRCReturn(rc, rc);
4197
4198 /** @cfgm{/CPUM/MaxCentaurLeaf, uint32_t, 0xc0000004}
4199 * The last extended leaf to keep. The actual last value that is stored in EAX
4200 * is RT_MAX(CPUID[0xc0000000].EAX,/CPUM/MaxCentaurLeaf). Leaves beyond the max
4201 * leaf are removed. The default is set to what we're able to sanitize.
4202 */
4203 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004));
4204 AssertLogRelRCReturn(rc, rc);
4205
4206 bool fQueryNestedHwvirt = false
4207#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4208 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
4209 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON
4210#endif
4211#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4212 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
4213 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA
4214#endif
4215 ;
4216 if (fQueryNestedHwvirt)
4217 {
4218 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
4219 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
4220 * The default is false, and when enabled requires a 64-bit CPU with support for
4221 * nested-paging and AMD-V or unrestricted guest mode.
4222 */
4223 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
4224 AssertLogRelRCReturn(rc, rc);
4225 if (pConfig->fNestedHWVirt)
4226 {
4227 /** @todo Think about enabling this later with NEM/KVM. */
4228 if (VM_IS_NEM_ENABLED(pVM))
4229 {
4230 LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used! (later)\n"));
4231 pConfig->fNestedHWVirt = false;
4232 }
4233 else if (!fNestedPagingAndFullGuestExec)
4234 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
4235 "Cannot enable nested VT-x/AMD-V without nested-paging and unrestricted guest execution!\n");
4236 }
4237
4238 if (pConfig->fNestedHWVirt)
4239 {
4240 /** @cfgm{/CPUM/NestedVmxPreemptTimer, bool, true}
4241 * Whether to expose the VMX-preemption timer feature to the guest (if also
4242 * supported by the host hardware). When disabled will prevent exposing the
4243 * VMX-preemption timer feature to the guest even if the host supports it.
4244 *
4245 * @todo Currently disabled, see @bugref{9180#c108}.
4246 */
4247 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxPreemptTimer", &pVM->cpum.s.fNestedVmxPreemptTimer, false);
4248 AssertLogRelRCReturn(rc, rc);
4249
4250 /** @cfgm{/CPUM/NestedVmxEpt, bool, true}
4251 * Whether to expose the EPT feature to the guest. The default is false. When
4252 * disabled will automatically prevent exposing features that rely on
4253 */
4254 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &pVM->cpum.s.fNestedVmxEpt, false);
4255 AssertLogRelRCReturn(rc, rc);
4256
4257 /** @cfgm{/CPUM/NestedVmxUnrestrictedGuest, bool, true}
4258 * Whether to expose the Unrestricted Guest feature to the guest. The default is
4259 * false. When disabled will automatically prevent exposing features that rely on
4260 * it.
4261 */
4262 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &pVM->cpum.s.fNestedVmxUnrestrictedGuest, false);
4263 AssertLogRelRCReturn(rc, rc);
4264
4265 if ( pVM->cpum.s.fNestedVmxUnrestrictedGuest
4266 && !pVM->cpum.s.fNestedVmxEpt)
4267 {
4268 LogRel(("CPUM: WARNING! Can't expose \"Unrestricted Guest\" to the guest when EPT is not exposed!\n"));
4269 pVM->cpum.s.fNestedVmxUnrestrictedGuest = false;
4270 }
4271 }
4272 }
4273
4274 /*
4275 * Instruction Set Architecture (ISA) Extensions.
4276 */
4277 PCFGMNODE pIsaExts = CFGMR3GetChild(pCpumCfg, "IsaExts");
4278 if (pIsaExts)
4279 {
4280 rc = CFGMR3ValidateConfig(pIsaExts, "/CPUM/IsaExts/",
4281 "CMPXCHG16B"
4282 "|MONITOR"
4283 "|MWaitExtensions"
4284 "|SSE4.1"
4285 "|SSE4.2"
4286 "|XSAVE"
4287 "|AVX"
4288 "|AVX2"
4289 "|AESNI"
4290 "|PCLMUL"
4291 "|POPCNT"
4292 "|MOVBE"
4293 "|RDRAND"
4294 "|RDSEED"
4295 "|CLFLUSHOPT"
4296 "|FSGSBASE"
4297 "|PCID"
4298 "|INVPCID"
4299 "|FlushCmdMsr"
4300 "|ABM"
4301 "|SSE4A"
4302 "|MISALNSSE"
4303 "|3DNOWPRF"
4304 "|AXMMX"
4305 , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
4306 if (RT_FAILURE(rc))
4307 return rc;
4308 }
4309
4310 /** @cfgm{/CPUM/IsaExts/CMPXCHG16B, boolean, true}
4311 * Expose CMPXCHG16B to the guest if available. All host CPUs which support
4312 * hardware virtualization have it.
4313 */
4314 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "CMPXCHG16B", &pConfig->enmCmpXchg16b, true);
4315 AssertLogRelRCReturn(rc, rc);
4316
4317 /** @cfgm{/CPUM/IsaExts/MONITOR, boolean, true}
4318 * Expose MONITOR/MWAIT instructions to the guest.
4319 */
4320 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MONITOR", &pConfig->enmMonitor, true);
4321 AssertLogRelRCReturn(rc, rc);
4322
4323 /** @cfgm{/CPUM/IsaExts/MWaitExtensions, boolean, false}
4324 * Expose MWAIT extended features to the guest. For now we expose just MWAIT
4325 * break on interrupt feature (bit 1).
4326 */
4327 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MWaitExtensions", &pConfig->enmMWaitExtensions, false);
4328 AssertLogRelRCReturn(rc, rc);
4329
4330 /** @cfgm{/CPUM/IsaExts/SSE4.1, boolean, true}
4331 * Expose SSE4.1 to the guest if available.
4332 */
4333 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.1", &pConfig->enmSse41, true);
4334 AssertLogRelRCReturn(rc, rc);
4335
4336 /** @cfgm{/CPUM/IsaExts/SSE4.2, boolean, true}
4337 * Expose SSE4.2 to the guest if available.
4338 */
4339 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
4340 AssertLogRelRCReturn(rc, rc);
4341
4342 bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor
4343 && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
4344 && ( !VM_IS_NEM_ENABLED(pVM)
4345 ? fNestedPagingAndFullGuestExec
4346 : NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR);
4347 uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
4348
4349 /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
4350 * Expose XSAVE/XRSTOR to the guest if available. For the time being the
4351 * default is to only expose this to VMs with nested paging and AMD-V or
4352 * unrestricted guest execution mode. Not possible to force this one without
4353 * host support at the moment.
4354 */
4355 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec,
4356 fMayHaveXSave /*fAllowed*/);
4357 AssertLogRelRCReturn(rc, rc);
4358
4359 /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends}
4360 * Expose the AVX instruction set extensions to the guest if available and
4361 * XSAVE is exposed too. For the time being the default is to only expose this
4362 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
4363 */
4364 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
4365 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
4366 AssertLogRelRCReturn(rc, rc);
4367
4368 /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends}
4369 * Expose the AVX2 instruction set extensions to the guest if available and
4370 * XSAVE is exposed too. For the time being the default is to only expose this
4371 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
4372 */
4373 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec /* temporarily */,
4374 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
4375 AssertLogRelRCReturn(rc, rc);
4376
4377 /** @cfgm{/CPUM/IsaExts/AESNI, isaextcfg, depends}
4378 * Whether to expose the AES instructions to the guest. For the time being the
4379 * default is to only do this for VMs with nested paging and AMD-V or
4380 * unrestricted guest mode.
4381 */
4382 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AESNI", &pConfig->enmAesNi, fNestedPagingAndFullGuestExec);
4383 AssertLogRelRCReturn(rc, rc);
4384
4385 /** @cfgm{/CPUM/IsaExts/PCLMUL, isaextcfg, depends}
4386 * Whether to expose the PCLMULQDQ instructions to the guest. For the time
4387 * being the default is to only do this for VMs with nested paging and AMD-V or
4388 * unrestricted guest mode.
4389 */
4390 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCLMUL", &pConfig->enmPClMul, fNestedPagingAndFullGuestExec);
4391 AssertLogRelRCReturn(rc, rc);
4392
4393 /** @cfgm{/CPUM/IsaExts/POPCNT, isaextcfg, depends}
4394 * Whether to expose the POPCNT instructions to the guest. For the time
4395 * being the default is to only do this for VMs with nested paging and AMD-V or
4396 * unrestricted guest mode.
4397 */
4398 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "POPCNT", &pConfig->enmPopCnt, fNestedPagingAndFullGuestExec);
4399 AssertLogRelRCReturn(rc, rc);
4400
4401 /** @cfgm{/CPUM/IsaExts/MOVBE, isaextcfg, depends}
4402 * Whether to expose the MOVBE instructions to the guest. For the time
4403 * being the default is to only do this for VMs with nested paging and AMD-V or
4404 * unrestricted guest mode.
4405 */
4406 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MOVBE", &pConfig->enmMovBe, fNestedPagingAndFullGuestExec);
4407 AssertLogRelRCReturn(rc, rc);
4408
4409 /** @cfgm{/CPUM/IsaExts/RDRAND, isaextcfg, depends}
4410 * Whether to expose the RDRAND instructions to the guest. For the time being
4411 * the default is to only do this for VMs with nested paging and AMD-V or
4412 * unrestricted guest mode.
4413 */
4414 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDRAND", &pConfig->enmRdRand, fNestedPagingAndFullGuestExec);
4415 AssertLogRelRCReturn(rc, rc);
4416
4417 /** @cfgm{/CPUM/IsaExts/RDSEED, isaextcfg, depends}
4418 * Whether to expose the RDSEED instructions to the guest. For the time being
4419 * the default is to only do this for VMs with nested paging and AMD-V or
4420 * unrestricted guest mode.
4421 */
4422 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDSEED", &pConfig->enmRdSeed, fNestedPagingAndFullGuestExec);
4423 AssertLogRelRCReturn(rc, rc);
4424
4425 /** @cfgm{/CPUM/IsaExts/CLFLUSHOPT, isaextcfg, depends}
4426 * Whether to expose the CLFLUSHOPT instructions to the guest. For the time
4427 * being the default is to only do this for VMs with nested paging and AMD-V or
4428 * unrestricted guest mode.
4429 */
4430 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "CLFLUSHOPT", &pConfig->enmCLFlushOpt, fNestedPagingAndFullGuestExec);
4431 AssertLogRelRCReturn(rc, rc);
4432
4433 /** @cfgm{/CPUM/IsaExts/FSGSBASE, isaextcfg, true}
4434 * Whether to expose the read/write FSGSBASE instructions to the guest.
4435 */
4436 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FSGSBASE", &pConfig->enmFsGsBase, true);
4437 AssertLogRelRCReturn(rc, rc);
4438
4439 /** @cfgm{/CPUM/IsaExts/PCID, isaextcfg, true}
4440 * Whether to expose the PCID feature to the guest.
4441 */
4442 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCID", &pConfig->enmPcid, pConfig->enmFsGsBase);
4443 AssertLogRelRCReturn(rc, rc);
4444
4445 /** @cfgm{/CPUM/IsaExts/INVPCID, isaextcfg, true}
4446 * Whether to expose the INVPCID instruction to the guest.
4447 */
4448 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "INVPCID", &pConfig->enmInvpcid, pConfig->enmFsGsBase);
4449 AssertLogRelRCReturn(rc, rc);
4450
4451 /** @cfgm{/CPUM/IsaExts/FlushCmdMsr, isaextcfg, true}
4452 * Whether to expose the IA32_FLUSH_CMD MSR to the guest.
4453 */
4454 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FlushCmdMsr", &pConfig->enmFlushCmdMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
4455 AssertLogRelRCReturn(rc, rc);
4456
4457 /** @cfgm{/CPUM/IsaExts/MdsClear, isaextcfg, true}
4458 * Whether to advertise the VERW and MDS related IA32_FLUSH_CMD MSR bits to
4459 * the guest. Requires FlushCmdMsr to be present too.
4460 */
4461 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MdsClear", &pConfig->enmMdsClear, CPUMISAEXTCFG_ENABLED_SUPPORTED);
4462 AssertLogRelRCReturn(rc, rc);
4463
4464 /** @cfgm{/CPUM/IsaExts/ArchCapMSr, isaextcfg, true}
4465 * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest.
4466 */
4467 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
4468 AssertLogRelRCReturn(rc, rc);
4469
4470
4471 /* AMD: */
4472
4473 /** @cfgm{/CPUM/IsaExts/ABM, isaextcfg, depends}
4474 * Whether to expose the AMD ABM instructions to the guest. For the time
4475 * being the default is to only do this for VMs with nested paging and AMD-V or
4476 * unrestricted guest mode.
4477 */
4478 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ABM", &pConfig->enmAbm, fNestedPagingAndFullGuestExec);
4479 AssertLogRelRCReturn(rc, rc);
4480
4481 /** @cfgm{/CPUM/IsaExts/SSE4A, isaextcfg, depends}
4482 * Whether to expose the AMD SSE4A instructions to the guest. For the time
4483 * being the default is to only do this for VMs with nested paging and AMD-V or
4484 * unrestricted guest mode.
4485 */
4486 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fNestedPagingAndFullGuestExec);
4487 AssertLogRelRCReturn(rc, rc);
4488
4489 /** @cfgm{/CPUM/IsaExts/MISALNSSE, isaextcfg, depends}
4490 * Whether to expose the AMD MisAlSse feature (MXCSR flag 17) to the guest. For
4491 * the time being the default is to only do this for VMs with nested paging and
4492 * AMD-V or unrestricted guest mode.
4493 */
4494 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MISALNSSE", &pConfig->enmMisAlnSse, fNestedPagingAndFullGuestExec);
4495 AssertLogRelRCReturn(rc, rc);
4496
4497 /** @cfgm{/CPUM/IsaExts/3DNOWPRF, isaextcfg, depends}
4498 * Whether to expose the AMD 3D Now! prefetch instructions to the guest.
4499 * For the time being the default is to only do this for VMs with nested paging
4500 * and AMD-V or unrestricted guest mode.
4501 */
4502 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "3DNOWPRF", &pConfig->enm3dNowPrf, fNestedPagingAndFullGuestExec);
4503 AssertLogRelRCReturn(rc, rc);
4504
4505 /** @cfgm{/CPUM/IsaExts/AXMMX, isaextcfg, depends}
4506 * Whether to expose the AMD's MMX Extensions to the guest. For the time being
4507 * the default is to only do this for VMs with nested paging and AMD-V or
4508 * unrestricted guest mode.
4509 */
4510 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
4511 AssertLogRelRCReturn(rc, rc);
4512
4513 return VINF_SUCCESS;
4514}
4515
4516
4517/**
4518 * Initializes the emulated CPU's CPUID & MSR information.
4519 *
4520 * @returns VBox status code.
4521 * @param pVM The cross context VM structure.
4522 * @param pHostMsrs Pointer to the host MSRs.
4523 */
4524int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs)
4525{
4526 Assert(pHostMsrs);
4527
4528 PCPUM pCpum = &pVM->cpum.s;
4529 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
4530
4531 /*
4532 * Set the fCpuIdApicFeatureVisible flags so the APIC can assume visibility
4533 * on construction and manage everything from here on.
4534 */
4535 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4536 {
4537 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4538 pVCpu->cpum.s.fCpuIdApicFeatureVisible = true;
4539 }
4540
4541 /*
4542 * Read the configuration.
4543 */
4544 CPUMCPUIDCONFIG Config;
4545 RT_ZERO(Config);
4546
4547 bool const fNestedPagingAndFullGuestExec = VM_IS_NEM_ENABLED(pVM) || HMAreNestedPagingAndFullGuestExecEnabled(pVM);
4548 int rc = cpumR3CpuIdReadConfig(pVM, &Config, pCpumCfg, fNestedPagingAndFullGuestExec);
4549 AssertRCReturn(rc, rc);
4550
4551 /*
4552 * Get the guest CPU data from the database and/or the host.
4553 *
4554 * The CPUID and MSRs are currently living on the regular heap to avoid
4555 * fragmenting the hyper heap (and because there isn't/wasn't any realloc
4556 * API for the hyper heap). This means special cleanup considerations.
4557 */
4558 /** @todo The hyper heap will be removed ASAP, so the final destination is
4559 * now a fixed sized arrays in the VM structure. Maybe we can simplify
4560 * this allocation fun a little now? Or maybe it's too convenient for
4561 * the CPU reporter code... No time to figure that out now. */
4562 rc = cpumR3DbGetCpuInfo(Config.szCpuName, &pCpum->GuestInfo);
4563 if (RT_FAILURE(rc))
4564 return rc == VERR_CPUM_DB_CPU_NOT_FOUND
4565 ? VMSetError(pVM, rc, RT_SRC_POS,
4566 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", Config.szCpuName)
4567 : rc;
4568
4569 if (pCpum->GuestInfo.fMxCsrMask & ~pVM->cpum.s.fHostMxCsrMask)
4570 {
4571 LogRel(("Stripping unsupported MXCSR bits from guest mask: %#x -> %#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask,
4572 pCpum->GuestInfo.fMxCsrMask & pVM->cpum.s.fHostMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
4573 pCpum->GuestInfo.fMxCsrMask &= pVM->cpum.s.fHostMxCsrMask;
4574 }
4575 LogRel(("CPUM: MXCSR_MASK=%#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
4576
4577 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
4578 * Overrides the guest MSRs.
4579 */
4580 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
4581
4582 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
4583 * Overrides the CPUID leaf values (from the host CPU usually) used for
4584 * calculating the guest CPUID leaves. This can be used to preserve the CPUID
4585 * values when moving a VM to a different machine. Another use is restricting
4586 * (or extending) the feature set exposed to the guest. */
4587 if (RT_SUCCESS(rc))
4588 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
4589
4590 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
4591 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
4592 "Found unsupported configuration node '/CPUM/CPUID/'. "
4593 "Please use IMachine::setCPUIDLeaf() instead.");
4594
4595 CPUMMSRS GuestMsrs;
4596 RT_ZERO(GuestMsrs);
4597
4598 /*
4599 * Pre-explode the CPUID info.
4600 */
4601 if (RT_SUCCESS(rc))
4602 rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs,
4603 &pCpum->GuestFeatures);
4604
4605 /*
4606 * Sanitize the cpuid information passed on to the guest.
4607 */
4608 if (RT_SUCCESS(rc))
4609 {
4610 rc = cpumR3CpuIdSanitize(pVM, pCpum, &Config);
4611 if (RT_SUCCESS(rc))
4612 {
4613 cpumR3CpuIdLimitLeaves(pCpum, &Config);
4614 cpumR3CpuIdLimitIntelFamModStep(pCpum, &Config);
4615 }
4616 }
4617
4618 /*
4619 * Setup MSRs introduced in microcode updates or that are otherwise not in
4620 * the CPU profile, but are advertised in the CPUID info we just sanitized.
4621 */
4622 if (RT_SUCCESS(rc))
4623 rc = cpumR3MsrReconcileWithCpuId(pVM);
4624 /*
4625 * MSR fudging.
4626 */
4627 if (RT_SUCCESS(rc))
4628 {
4629 /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
4630 * Fudges some common MSRs if not present in the selected CPU database entry.
4631 * This is for trying to keep VMs running when moved between different hosts
4632 * and different CPU vendors. */
4633 bool fEnable;
4634 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
4635 if (RT_SUCCESS(rc) && fEnable)
4636 {
4637 rc = cpumR3MsrApplyFudge(pVM);
4638 AssertLogRelRC(rc);
4639 }
4640 }
4641 if (RT_SUCCESS(rc))
4642 {
4643 /*
4644 * Move the MSR and CPUID arrays over to the static VM structure allocations
4645 * and explode guest CPU features again.
4646 */
4647 void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
4648 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
4649 pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
4650 RTMemFree(pvFree);
4651
4652 AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
4653 ("%u\n", pCpum->GuestInfo.cMsrRanges));
4654 memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
4655 sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
4656 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
4657 pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
4658
4659 AssertLogRelRCReturn(rc, rc);
4660
4661 /*
4662 * Finally, initialize guest VMX MSRs.
4663 *
4664 * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
4665 * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
4666 * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
4667 */
4668 if (pVM->cpum.s.GuestFeatures.fVmx)
4669 {
4670 Assert(Config.fNestedHWVirt);
4671 cpumR3InitVmxGuestFeaturesAndMsrs(pVM, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
4672
4673 /* Copy MSRs to all VCPUs */
4674 PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
4675 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4676 {
4677 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4678 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
4679 }
4680 }
4681
4682 /*
4683 * Some more configuration that we're applying at the end of everything
4684 * via the CPUMR3SetGuestCpuIdFeature API.
4685 */
4686
4687 /* Check if PAE was explicitely enabled by the user. */
4688 bool fEnable;
4689 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false);
4690 AssertRCReturn(rc, rc);
4691 if (fEnable)
4692 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
4693
4694 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
4695 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false);
4696 AssertRCReturn(rc, rc);
4697 if (fEnable)
4698 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
4699
4700 /* Check if speculation control is enabled. */
4701 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &fEnable, false);
4702 AssertRCReturn(rc, rc);
4703 if (fEnable)
4704 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
4705 else
4706 {
4707 /*
4708 * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
4709 * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
4710 * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
4711 *
4712 * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
4713 * EIP: _raw_spin_lock+0x14/0x30
4714 * EFLAGS: 00010046 CPU: 0
4715 * EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
4716 * ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
4717 * DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
4718 * CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
4719 * Call Trace:
4720 * speculative_store_bypass_update+0x8e/0x180
4721 * ssb_prctl_set+0xc0/0xe0
4722 * arch_seccomp_spec_mitigate+0x1d/0x20
4723 * do_seccomp+0x3cb/0x610
4724 * SyS_seccomp+0x16/0x20
4725 * do_fast_syscall_32+0x7f/0x1d0
4726 * entry_SYSENTER_32+0x4e/0x7c
4727 *
4728 * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
4729 * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
4730 *
4731 * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
4732 * guest to not even try.
4733 */
4734 if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
4735 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
4736 {
4737 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
4738 if (pLeaf)
4739 {
4740 pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_NO_SSBD_REQUIRED;
4741 LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
4742 }
4743 }
4744 }
4745
4746 return VINF_SUCCESS;
4747 }
4748
4749 /*
4750 * Failed before switching to hyper heap.
4751 */
4752 RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
4753 pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
4754 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
4755 pCpum->GuestInfo.paMsrRangesR3 = NULL;
4756 return rc;
4757}
4758
4759
4760/**
4761 * Sets a CPUID feature bit during VM initialization.
4762 *
4763 * Since the CPUID feature bits are generally related to CPU features, other
4764 * CPUM configuration like MSRs can also be modified by calls to this API.
4765 *
4766 * @param pVM The cross context VM structure.
4767 * @param enmFeature The feature to set.
4768 */
4769VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
4770{
4771 PCPUMCPUIDLEAF pLeaf;
4772 PCPUMMSRRANGE pMsrRange;
4773
4774 switch (enmFeature)
4775 {
4776 /*
4777 * Set the APIC bit in both feature masks.
4778 */
4779 case CPUMCPUIDFEATURE_APIC:
4780 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4781 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
4782 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
4783
4784 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4785 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
4786 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
4787
4788 pVM->cpum.s.GuestFeatures.fApic = 1;
4789
4790 /* Make sure we've got the APICBASE MSR present. */
4791 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
4792 if (!pMsrRange)
4793 {
4794 static CPUMMSRRANGE const s_ApicBase =
4795 {
4796 /*.uFirst =*/ MSR_IA32_APICBASE, /*.uLast =*/ MSR_IA32_APICBASE,
4797 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ApicBase, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32ApicBase,
4798 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
4799 /*.szName = */ "IA32_APIC_BASE"
4800 };
4801 int rc = CPUMR3MsrRangesInsert(pVM, &s_ApicBase);
4802 AssertLogRelRC(rc);
4803 }
4804
4805 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
4806 break;
4807
4808 /*
4809 * Set the x2APIC bit in the standard feature mask.
4810 * Note! ASSUMES CPUMCPUIDFEATURE_APIC is called first.
4811 */
4812 case CPUMCPUIDFEATURE_X2APIC:
4813 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4814 if (pLeaf)
4815 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
4816 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
4817
4818 /* Make sure the MSR doesn't GP or ignore the EXTD bit. */
4819 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
4820 if (pMsrRange)
4821 {
4822 pMsrRange->fWrGpMask &= ~MSR_IA32_APICBASE_EXTD;
4823 pMsrRange->fWrIgnMask &= ~MSR_IA32_APICBASE_EXTD;
4824 }
4825
4826 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
4827 break;
4828
4829 /*
4830 * Set the sysenter/sysexit bit in the standard feature mask.
4831 * Assumes the caller knows what it's doing! (host must support these)
4832 */
4833 case CPUMCPUIDFEATURE_SEP:
4834 if (!pVM->cpum.s.HostFeatures.fSysEnter)
4835 {
4836 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
4837 return;
4838 }
4839
4840 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4841 if (pLeaf)
4842 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
4843 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
4844 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
4845 break;
4846
4847 /*
4848 * Set the syscall/sysret bit in the extended feature mask.
4849 * Assumes the caller knows what it's doing! (host must support these)
4850 */
4851 case CPUMCPUIDFEATURE_SYSCALL:
4852 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4853 if ( !pLeaf
4854 || !pVM->cpum.s.HostFeatures.fSysCall)
4855 {
4856 LogRel(("CPUM: WARNING! Can't turn on SYSCALL/SYSRET when the host doesn't support it!\n"));
4857 return;
4858 }
4859
4860 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
4861 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
4862 pVM->cpum.s.GuestFeatures.fSysCall = 1;
4863 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
4864 break;
4865
4866 /*
4867 * Set the PAE bit in both feature masks.
4868 * Assumes the caller knows what it's doing! (host must support these)
4869 */
4870 case CPUMCPUIDFEATURE_PAE:
4871 if (!pVM->cpum.s.HostFeatures.fPae)
4872 {
4873 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
4874 return;
4875 }
4876
4877 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4878 if (pLeaf)
4879 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
4880
4881 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4882 if ( pLeaf
4883 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
4884 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
4885 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
4886
4887 pVM->cpum.s.GuestFeatures.fPae = 1;
4888 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
4889 break;
4890
4891 /*
4892 * Set the LONG MODE bit in the extended feature mask.
4893 * Assumes the caller knows what it's doing! (host must support these)
4894 */
4895 case CPUMCPUIDFEATURE_LONG_MODE:
4896 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4897 if ( !pLeaf
4898 || !pVM->cpum.s.HostFeatures.fLongMode)
4899 {
4900 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
4901 return;
4902 }
4903
4904 /* Valid for both Intel and AMD. */
4905 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
4906 pVM->cpum.s.GuestFeatures.fLongMode = 1;
4907 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
4908 if (pVM->cpum.s.GuestFeatures.fVmx)
4909 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4910 {
4911 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4912 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic &= ~VMX_BASIC_PHYSADDR_WIDTH_32BIT;
4913 }
4914 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
4915 break;
4916
4917 /*
4918 * Set the NX/XD bit in the extended feature mask.
4919 * Assumes the caller knows what it's doing! (host must support these)
4920 */
4921 case CPUMCPUIDFEATURE_NX:
4922 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4923 if ( !pLeaf
4924 || !pVM->cpum.s.HostFeatures.fNoExecute)
4925 {
4926 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
4927 return;
4928 }
4929
4930 /* Valid for both Intel and AMD. */
4931 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
4932 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
4933 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
4934 break;
4935
4936
4937 /*
4938 * Set the LAHF/SAHF support in 64-bit mode.
4939 * Assumes the caller knows what it's doing! (host must support this)
4940 */
4941 case CPUMCPUIDFEATURE_LAHF:
4942 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4943 if ( !pLeaf
4944 || !pVM->cpum.s.HostFeatures.fLahfSahf)
4945 {
4946 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
4947 return;
4948 }
4949
4950 /* Valid for both Intel and AMD. */
4951 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
4952 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
4953 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
4954 break;
4955
4956 /*
4957 * Set the RDTSCP support bit.
4958 * Assumes the caller knows what it's doing! (host must support this)
4959 */
4960 case CPUMCPUIDFEATURE_RDTSCP:
4961 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4962 if ( !pLeaf
4963 || !pVM->cpum.s.HostFeatures.fRdTscP
4964 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
4965 {
4966 if (!pVM->cpum.s.u8PortableCpuIdLevel)
4967 LogRel(("CPUM: WARNING! Can't turn on RDTSCP when the host doesn't support it!\n"));
4968 return;
4969 }
4970
4971 /* Valid for both Intel and AMD. */
4972 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
4973 pVM->cpum.s.HostFeatures.fRdTscP = 1;
4974 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
4975 break;
4976
4977 /*
4978 * Set the Hypervisor Present bit in the standard feature mask.
4979 */
4980 case CPUMCPUIDFEATURE_HVP:
4981 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4982 if (pLeaf)
4983 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
4984 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
4985 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
4986 break;
4987
4988 /*
4989 * Set up the speculation control CPUID bits and MSRs. This is quite complicated
4990 * on Intel CPUs, and different on AMDs.
4991 */
4992 case CPUMCPUIDFEATURE_SPEC_CTRL:
4993 if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
4994 {
4995 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
4996 if ( !pLeaf
4997 || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
4998 {
4999 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
5000 return;
5001 }
5002
5003 /* The feature can be enabled. Let's see what we can actually do. */
5004 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
5005
5006 /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
5007 if (pVM->cpum.s.HostFeatures.fIbrs)
5008 {
5009 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
5010 pVM->cpum.s.GuestFeatures.fIbrs = 1;
5011 if (pVM->cpum.s.HostFeatures.fStibp)
5012 {
5013 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_STIBP;
5014 pVM->cpum.s.GuestFeatures.fStibp = 1;
5015 }
5016
5017 /* Make sure we have the speculation control MSR... */
5018 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_SPEC_CTRL);
5019 if (!pMsrRange)
5020 {
5021 static CPUMMSRRANGE const s_SpecCtrl =
5022 {
5023 /*.uFirst =*/ MSR_IA32_SPEC_CTRL, /*.uLast =*/ MSR_IA32_SPEC_CTRL,
5024 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32SpecCtrl, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32SpecCtrl,
5025 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
5026 /*.szName = */ "IA32_SPEC_CTRL"
5027 };
5028 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
5029 AssertLogRelRC(rc);
5030 }
5031
5032 /* ... and the predictor command MSR. */
5033 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_PRED_CMD);
5034 if (!pMsrRange)
5035 {
5036 /** @todo incorrect fWrGpMask. */
5037 static CPUMMSRRANGE const s_SpecCtrl =
5038 {
5039 /*.uFirst =*/ MSR_IA32_PRED_CMD, /*.uLast =*/ MSR_IA32_PRED_CMD,
5040 /*.enmRdFn =*/ kCpumMsrRdFn_WriteOnly, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32PredCmd,
5041 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
5042 /*.szName = */ "IA32_PRED_CMD"
5043 };
5044 int rc = CPUMR3MsrRangesInsert(pVM, &s_SpecCtrl);
5045 AssertLogRelRC(rc);
5046 }
5047
5048 }
5049
5050 if (pVM->cpum.s.HostFeatures.fArchCap)
5051 {
5052 /* Install the architectural capabilities MSR. */
5053 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES);
5054 if (!pMsrRange)
5055 {
5056 static CPUMMSRRANGE const s_ArchCaps =
5057 {
5058 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES,
5059 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly,
5060 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ UINT64_MAX,
5061 /*.szName = */ "IA32_ARCH_CAPABILITIES"
5062 };
5063 int rc = CPUMR3MsrRangesInsert(pVM, &s_ArchCaps);
5064 AssertLogRelRC(rc);
5065 }
5066
5067 /* Advertise IBRS_ALL if present at this point... */
5068 if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
5069 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
5070 }
5071
5072 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
5073 }
5074 else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
5075 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
5076 {
5077 /* The precise details of AMD's implementation are not yet clear. */
5078 }
5079 break;
5080
5081 default:
5082 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
5083 break;
5084 }
5085
5086 /** @todo can probably kill this as this API is now init time only... */
5087 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
5088 {
5089 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
5090 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
5091 }
5092}
5093
5094
5095/**
5096 * Queries a CPUID feature bit.
5097 *
5098 * @returns boolean for feature presence
5099 * @param pVM The cross context VM structure.
5100 * @param enmFeature The feature to query.
5101 * @deprecated Use the cpum.ro.GuestFeatures directly instead.
5102 */
5103VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
5104{
5105 switch (enmFeature)
5106 {
5107 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
5108 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
5109 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
5110 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
5111 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
5112 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
5113 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
5114 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
5115 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
5116 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
5117 case CPUMCPUIDFEATURE_SPEC_CTRL: return pVM->cpum.s.GuestFeatures.fSpeculationControl;
5118 case CPUMCPUIDFEATURE_INVALID:
5119 case CPUMCPUIDFEATURE_32BIT_HACK:
5120 break;
5121 }
5122 AssertFailed();
5123 return false;
5124}
5125
5126
5127/**
5128 * Clears a CPUID feature bit.
5129 *
5130 * @param pVM The cross context VM structure.
5131 * @param enmFeature The feature to clear.
5132 *
5133 * @deprecated Probably better to default the feature to disabled and only allow
5134 * setting (enabling) it during construction.
5135 */
5136VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
5137{
5138 PCPUMCPUIDLEAF pLeaf;
5139 switch (enmFeature)
5140 {
5141 case CPUMCPUIDFEATURE_APIC:
5142 Assert(!pVM->cpum.s.GuestFeatures.fApic); /* We only expect this call during init. No MSR adjusting needed. */
5143 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
5144 if (pLeaf)
5145 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
5146
5147 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
5148 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
5149 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
5150
5151 pVM->cpum.s.GuestFeatures.fApic = 0;
5152 Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
5153 break;
5154
5155 case CPUMCPUIDFEATURE_X2APIC:
5156 Assert(!pVM->cpum.s.GuestFeatures.fX2Apic); /* We only expect this call during init. No MSR adjusting needed. */
5157 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
5158 if (pLeaf)
5159 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
5160 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
5161 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
5162 break;
5163
5164 case CPUMCPUIDFEATURE_PAE:
5165 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
5166 if (pLeaf)
5167 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
5168
5169 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
5170 if ( pLeaf
5171 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
5172 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
5173 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
5174
5175 pVM->cpum.s.GuestFeatures.fPae = 0;
5176 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
5177 break;
5178
5179 case CPUMCPUIDFEATURE_LONG_MODE:
5180 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
5181 if (pLeaf)
5182 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
5183 pVM->cpum.s.GuestFeatures.fLongMode = 0;
5184 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = 32;
5185 if (pVM->cpum.s.GuestFeatures.fVmx)
5186 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
5187 {
5188 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
5189 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic |= VMX_BASIC_PHYSADDR_WIDTH_32BIT;
5190 }
5191 break;
5192
5193 case CPUMCPUIDFEATURE_LAHF:
5194 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
5195 if (pLeaf)
5196 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
5197 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
5198 break;
5199
5200 case CPUMCPUIDFEATURE_RDTSCP:
5201 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
5202 if (pLeaf)
5203 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
5204 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
5205 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
5206 break;
5207
5208 case CPUMCPUIDFEATURE_HVP:
5209 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
5210 if (pLeaf)
5211 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
5212 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
5213 break;
5214
5215 case CPUMCPUIDFEATURE_SPEC_CTRL:
5216 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
5217 if (pLeaf)
5218 pLeaf->uEdx &= ~(X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP);
5219 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps &= ~MSR_IA32_ARCH_CAP_F_IBRS_ALL);
5220 Log(("CPUM: ClearGuestCpuIdFeature: Disabled speculation control!\n"));
5221 break;
5222
5223 default:
5224 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
5225 break;
5226 }
5227
5228 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
5229 {
5230 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
5231 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
5232 }
5233}
5234
5235
5236
5237/*
5238 *
5239 *
5240 * Saved state related code.
5241 * Saved state related code.
5242 * Saved state related code.
5243 *
5244 *
5245 */
5246
5247/**
5248 * Called both in pass 0 and the final pass.
5249 *
5250 * @param pVM The cross context VM structure.
5251 * @param pSSM The saved state handle.
5252 */
5253void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
5254{
5255 /*
5256 * Save all the CPU ID leaves.
5257 */
5258 SSMR3PutU32(pSSM, sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]));
5259 SSMR3PutU32(pSSM, pVM->cpum.s.GuestInfo.cCpuIdLeaves);
5260 SSMR3PutMem(pSSM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3,
5261 sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]) * pVM->cpum.s.GuestInfo.cCpuIdLeaves);
5262
5263 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
5264
5265 /*
5266 * Save a good portion of the raw CPU IDs as well as they may come in
5267 * handy when validating features for raw mode.
5268 */
5269 CPUMCPUID aRawStd[16];
5270 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
5271 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
5272 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
5273 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
5274
5275 CPUMCPUID aRawExt[32];
5276 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
5277 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
5278 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
5279 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
5280}
5281
5282
5283static int cpumR3LoadOneOldGuestCpuIdArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
5284{
5285 uint32_t cCpuIds;
5286 int rc = SSMR3GetU32(pSSM, &cCpuIds);
5287 if (RT_SUCCESS(rc))
5288 {
5289 if (cCpuIds < 64)
5290 {
5291 for (uint32_t i = 0; i < cCpuIds; i++)
5292 {
5293 CPUMCPUID CpuId;
5294 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
5295 if (RT_FAILURE(rc))
5296 break;
5297
5298 CPUMCPUIDLEAF NewLeaf;
5299 NewLeaf.uLeaf = uBase + i;
5300 NewLeaf.uSubLeaf = 0;
5301 NewLeaf.fSubLeafMask = 0;
5302 NewLeaf.uEax = CpuId.uEax;
5303 NewLeaf.uEbx = CpuId.uEbx;
5304 NewLeaf.uEcx = CpuId.uEcx;
5305 NewLeaf.uEdx = CpuId.uEdx;
5306 NewLeaf.fFlags = 0;
5307 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
5308 }
5309 }
5310 else
5311 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5312 }
5313 if (RT_FAILURE(rc))
5314 {
5315 RTMemFree(*ppaLeaves);
5316 *ppaLeaves = NULL;
5317 *pcLeaves = 0;
5318 }
5319 return rc;
5320}
5321
5322
5323static int cpumR3LoadGuestCpuIdArray(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
5324{
5325 *ppaLeaves = NULL;
5326 *pcLeaves = 0;
5327
5328 int rc;
5329 if (uVersion > CPUM_SAVED_STATE_VERSION_PUT_STRUCT)
5330 {
5331 /*
5332 * The new format. Starts by declaring the leave size and count.
5333 */
5334 uint32_t cbLeaf;
5335 SSMR3GetU32(pSSM, &cbLeaf);
5336 uint32_t cLeaves;
5337 rc = SSMR3GetU32(pSSM, &cLeaves);
5338 if (RT_SUCCESS(rc))
5339 {
5340 if (cbLeaf == sizeof(**ppaLeaves))
5341 {
5342 if (cLeaves <= CPUM_CPUID_MAX_LEAVES)
5343 {
5344 /*
5345 * Load the leaves one by one.
5346 *
5347 * The uPrev stuff is a kludge for working around a week worth of bad saved
5348 * states during the CPUID revamp in March 2015. We saved too many leaves
5349 * due to a bug in cpumR3CpuIdInstallAndExplodeLeaves, thus ending up with
5350 * garbage entires at the end of the array when restoring. We also had
5351 * a subleaf insertion bug that triggered with the leaf 4 stuff below,
5352 * this kludge doesn't deal correctly with that, but who cares...
5353 */
5354 uint32_t uPrev = 0;
5355 for (uint32_t i = 0; i < cLeaves && RT_SUCCESS(rc); i++)
5356 {
5357 CPUMCPUIDLEAF Leaf;
5358 rc = SSMR3GetMem(pSSM, &Leaf, sizeof(Leaf));
5359 if (RT_SUCCESS(rc))
5360 {
5361 if ( uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
5362 || Leaf.uLeaf >= uPrev)
5363 {
5364 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
5365 uPrev = Leaf.uLeaf;
5366 }
5367 else
5368 uPrev = UINT32_MAX;
5369 }
5370 }
5371 }
5372 else
5373 rc = SSMR3SetLoadError(pSSM, VERR_TOO_MANY_CPUID_LEAVES, RT_SRC_POS,
5374 "Too many CPUID leaves: %#x, max %#x", cLeaves, CPUM_CPUID_MAX_LEAVES);
5375 }
5376 else
5377 rc = SSMR3SetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
5378 "CPUMCPUIDLEAF size differs: saved=%#x, our=%#x", cbLeaf, sizeof(**ppaLeaves));
5379 }
5380 }
5381 else
5382 {
5383 /*
5384 * The old format with its three inflexible arrays.
5385 */
5386 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
5387 if (RT_SUCCESS(rc))
5388 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
5389 if (RT_SUCCESS(rc))
5390 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
5391 if (RT_SUCCESS(rc))
5392 {
5393 /*
5394 * Fake up leaf 4 on intel like we used to do in CPUMGetGuestCpuId earlier.
5395 */
5396 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(*ppaLeaves, *pcLeaves, 0, 0);
5397 if ( pLeaf
5398 && ASMIsIntelCpuEx(pLeaf->uEbx, pLeaf->uEcx, pLeaf->uEdx))
5399 {
5400 CPUMCPUIDLEAF Leaf;
5401 Leaf.uLeaf = 4;
5402 Leaf.fSubLeafMask = UINT32_MAX;
5403 Leaf.uSubLeaf = 0;
5404 Leaf.uEdx = UINT32_C(0); /* 3 flags, 0 is fine. */
5405 Leaf.uEcx = UINT32_C(63); /* sets - 1 */
5406 Leaf.uEbx = (UINT32_C(7) << 22) /* associativity -1 */
5407 | (UINT32_C(0) << 12) /* phys line partitions - 1 */
5408 | UINT32_C(63); /* system coherency line size - 1 */
5409 Leaf.uEax = (RT_MIN(pVM->cCpus - 1, UINT32_C(0x3f)) << 26) /* cores per package - 1 */
5410 | (UINT32_C(0) << 14) /* threads per cache - 1 */
5411 | (UINT32_C(1) << 5) /* cache level */
5412 | UINT32_C(1); /* cache type (data) */
5413 Leaf.fFlags = 0;
5414 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
5415 if (RT_SUCCESS(rc))
5416 {
5417 Leaf.uSubLeaf = 1; /* Should've been cache type 2 (code), but buggy code made it data. */
5418 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
5419 }
5420 if (RT_SUCCESS(rc))
5421 {
5422 Leaf.uSubLeaf = 2; /* Should've been cache type 3 (unified), but buggy code made it data. */
5423 Leaf.uEcx = 4095; /* sets - 1 */
5424 Leaf.uEbx &= UINT32_C(0x003fffff); /* associativity - 1 */
5425 Leaf.uEbx |= UINT32_C(23) << 22;
5426 Leaf.uEax &= UINT32_C(0xfc003fff); /* threads per cache - 1 */
5427 Leaf.uEax |= RT_MIN(pVM->cCpus - 1, UINT32_C(0xfff)) << 14;
5428 Leaf.uEax &= UINT32_C(0xffffff1f); /* level */
5429 Leaf.uEax |= UINT32_C(2) << 5;
5430 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
5431 }
5432 }
5433 }
5434 }
5435 return rc;
5436}
5437
5438
5439/**
5440 * Loads the CPU ID leaves saved by pass 0, inner worker.
5441 *
5442 * @returns VBox status code.
5443 * @param pVM The cross context VM structure.
5444 * @param pSSM The saved state handle.
5445 * @param uVersion The format version.
5446 * @param paLeaves Guest CPUID leaves loaded from the state.
5447 * @param cLeaves The number of leaves in @a paLeaves.
5448 * @param pMsrs The guest MSRs.
5449 */
5450int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
5451{
5452 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
5453
5454 /*
5455 * Continue loading the state into stack buffers.
5456 */
5457 CPUMCPUID GuestDefCpuId;
5458 int rc = SSMR3GetMem(pSSM, &GuestDefCpuId, sizeof(GuestDefCpuId));
5459 AssertRCReturn(rc, rc);
5460
5461 CPUMCPUID aRawStd[16];
5462 uint32_t cRawStd;
5463 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
5464 if (cRawStd > RT_ELEMENTS(aRawStd))
5465 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5466 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
5467 AssertRCReturn(rc, rc);
5468 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
5469 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
5470
5471 CPUMCPUID aRawExt[32];
5472 uint32_t cRawExt;
5473 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
5474 if (cRawExt > RT_ELEMENTS(aRawExt))
5475 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5476 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
5477 AssertRCReturn(rc, rc);
5478 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
5479 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
5480
5481 /*
5482 * Get the raw CPU IDs for the current host.
5483 */
5484 CPUMCPUID aHostRawStd[16];
5485 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
5486 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
5487
5488 CPUMCPUID aHostRawExt[32];
5489 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
5490 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
5491 &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
5492
5493 /*
5494 * Get the host and guest overrides so we don't reject the state because
5495 * some feature was enabled thru these interfaces.
5496 * Note! We currently only need the feature leaves, so skip rest.
5497 */
5498 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
5499 CPUMCPUID aHostOverrideStd[2];
5500 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
5501 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
5502
5503 CPUMCPUID aHostOverrideExt[2];
5504 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
5505 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
5506
5507 /*
5508 * This can be skipped.
5509 */
5510 bool fStrictCpuIdChecks;
5511 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);
5512
5513 /*
5514 * Define a bunch of macros for simplifying the santizing/checking code below.
5515 */
5516 /* Generic expression + failure message. */
5517#define CPUID_CHECK_RET(expr, fmt) \
5518 do { \
5519 if (!(expr)) \
5520 { \
5521 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
5522 if (fStrictCpuIdChecks) \
5523 { \
5524 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
5525 RTStrFree(pszMsg); \
5526 return rcCpuid; \
5527 } \
5528 LogRel(("CPUM: %s\n", pszMsg)); \
5529 RTStrFree(pszMsg); \
5530 } \
5531 } while (0)
5532#define CPUID_CHECK_WRN(expr, fmt) \
5533 do { \
5534 if (!(expr)) \
5535 LogRel(fmt); \
5536 } while (0)
5537
5538 /* For comparing two values and bitch if they differs. */
5539#define CPUID_CHECK2_RET(what, host, saved) \
5540 do { \
5541 if ((host) != (saved)) \
5542 { \
5543 if (fStrictCpuIdChecks) \
5544 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
5545 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
5546 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
5547 } \
5548 } while (0)
5549#define CPUID_CHECK2_WRN(what, host, saved) \
5550 do { \
5551 if ((host) != (saved)) \
5552 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
5553 } while (0)
5554
5555 /* For checking raw cpu features (raw mode). */
5556#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
5557 do { \
5558 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
5559 { \
5560 if (fStrictCpuIdChecks) \
5561 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
5562 N_(#bit " mismatch: host=%d saved=%d"), \
5563 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
5564 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
5565 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
5566 } \
5567 } while (0)
5568#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
5569 do { \
5570 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
5571 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
5572 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
5573 } while (0)
5574#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
5575
5576 /* For checking guest features. */
5577#define CPUID_GST_FEATURE_RET(set, reg, bit) \
5578 do { \
5579 if ( (aGuestCpuId##set [1].reg & bit) \
5580 && !(aHostRaw##set [1].reg & bit) \
5581 && !(aHostOverride##set [1].reg & bit) \
5582 ) \
5583 { \
5584 if (fStrictCpuIdChecks) \
5585 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
5586 N_(#bit " is not supported by the host but has already exposed to the guest")); \
5587 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
5588 } \
5589 } while (0)
5590#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
5591 do { \
5592 if ( (aGuestCpuId##set [1].reg & bit) \
5593 && !(aHostRaw##set [1].reg & bit) \
5594 && !(aHostOverride##set [1].reg & bit) \
5595 ) \
5596 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
5597 } while (0)
5598#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
5599 do { \
5600 if ( (aGuestCpuId##set [1].reg & bit) \
5601 && !(aHostRaw##set [1].reg & bit) \
5602 && !(aHostOverride##set [1].reg & bit) \
5603 ) \
5604 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
5605 } while (0)
5606#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
5607
5608 /* For checking guest features if AMD guest CPU. */
5609#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
5610 do { \
5611 if ( (aGuestCpuId##set [1].reg & bit) \
5612 && fGuestAmd \
5613 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
5614 && !(aHostOverride##set [1].reg & bit) \
5615 ) \
5616 { \
5617 if (fStrictCpuIdChecks) \
5618 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
5619 N_(#bit " is not supported by the host but has already exposed to the guest")); \
5620 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
5621 } \
5622 } while (0)
5623#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
5624 do { \
5625 if ( (aGuestCpuId##set [1].reg & bit) \
5626 && fGuestAmd \
5627 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
5628 && !(aHostOverride##set [1].reg & bit) \
5629 ) \
5630 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
5631 } while (0)
5632#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
5633 do { \
5634 if ( (aGuestCpuId##set [1].reg & bit) \
5635 && fGuestAmd \
5636 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
5637 && !(aHostOverride##set [1].reg & bit) \
5638 ) \
5639 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
5640 } while (0)
5641#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
5642
5643 /* For checking AMD features which have a corresponding bit in the standard
5644 range. (Intel defines very few bits in the extended feature sets.) */
5645#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
5646 do { \
5647 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
5648 && !(fHostAmd \
5649 ? aHostRawExt[1].reg & (ExtBit) \
5650 : aHostRawStd[1].reg & (StdBit)) \
5651 && !(aHostOverrideExt[1].reg & (ExtBit)) \
5652 ) \
5653 { \
5654 if (fStrictCpuIdChecks) \
5655 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
5656 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
5657 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
5658 } \
5659 } while (0)
5660#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
5661 do { \
5662 if ( (aGuestCpuId[1].reg & (ExtBit)) \
5663 && !(fHostAmd \
5664 ? aHostRawExt[1].reg & (ExtBit) \
5665 : aHostRawStd[1].reg & (StdBit)) \
5666 && !(aHostOverrideExt[1].reg & (ExtBit)) \
5667 ) \
5668 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
5669 } while (0)
5670#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
5671 do { \
5672 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
5673 && !(fHostAmd \
5674 ? aHostRawExt[1].reg & (ExtBit) \
5675 : aHostRawStd[1].reg & (StdBit)) \
5676 && !(aHostOverrideExt[1].reg & (ExtBit)) \
5677 ) \
5678 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
5679 } while (0)
5680#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
5681
5682
5683 /*
5684 * Verify that we can support the features already exposed to the guest on
5685 * this host.
5686 *
5687 * Most of the features we're emulating requires intercepting instruction
5688 * and doing it the slow way, so there is no need to warn when they aren't
5689 * present in the host CPU. Thus we use IGN instead of EMU on these.
5690 *
5691 * Trailing comments:
5692 * "EMU" - Possible to emulate, could be lots of work and very slow.
5693 * "EMU?" - Can this be emulated?
5694 */
5695 CPUMCPUID aGuestCpuIdStd[2];
5696 RT_ZERO(aGuestCpuIdStd);
5697 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
5698
5699 /* CPUID(1).ecx */
5700 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
5701 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
5702 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
5703 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
5704 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
5705 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
5706 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
5707 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
5708 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
5709 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
5710 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
5711 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SDBG);
5712 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
5713 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
5714 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
5715 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
5716 CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
5717 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
5718 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
5719 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
5720 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
5721 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
5722 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
5723 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
5724 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
5725 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
5726 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
5727 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
5728 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
5729 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
5730 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
5731 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
5732
5733 /* CPUID(1).edx */
5734 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
5735 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
5736 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
5737 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
5738 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
5739 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
5740 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
5741 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
5742 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
5743 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
5744 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
5745 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
5746 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
5747 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
5748 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
5749 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
5750 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
5751 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
5752 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
5753 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
5754 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
5755 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
5756 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
5757 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
5758 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
5759 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
5760 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
5761 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
5762 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
5763 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
5764 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
5765 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
5766
5767 /* CPUID(0x80000000). */
5768 CPUMCPUID aGuestCpuIdExt[2];
5769 RT_ZERO(aGuestCpuIdExt);
5770 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
5771 {
5772 /** @todo deal with no 0x80000001 on the host. */
5773 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx)
5774 || ASMIsHygonCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
5775 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx)
5776 || ASMIsHygonCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
5777
5778 /* CPUID(0x80000001).ecx */
5779 CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
5780 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
5781 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
5782 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
5783 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
5784 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
5785 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
5786 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
5787 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
5788 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
5789 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
5790 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_XOP); // -> EMU
5791 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
5792 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
5793 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
5794 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
5795 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
5796 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
5797 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
5798 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
5799 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
5800 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
5801 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
5802 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
5803 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
5804 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
5805 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
5806 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
5807 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
5808 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
5809 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
5810 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
5811
5812 /* CPUID(0x80000001).edx */
5813 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
5814 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
5815 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
5816 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
5817 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
5818 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
5819 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
5820 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
5821 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
5822 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
5823 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
5824 CPUID_GST_FEATURE_IGN( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
5825 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
5826 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
5827 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
5828 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
5829 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
5830 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
5831 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
5832 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
5833 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
5834 CPUID_GST_FEATURE_WRN( Ext, uEdx, RT_BIT_32(21) /*reserved*/);
5835 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
5836 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
5837 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
5838 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
5839 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
5840 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
5841 CPUID_GST_FEATURE_IGN( Ext, uEdx, RT_BIT_32(28) /*reserved*/);
5842 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
5843 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
5844 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
5845 }
5846
5847 /** @todo check leaf 7 */
5848
5849 /* CPUID(d) - XCR0 stuff - takes ECX as input.
5850 * ECX=0: EAX - Valid bits in XCR0[31:0].
5851 * EBX - Maximum state size as per current XCR0 value.
5852 * ECX - Maximum state size for all supported features.
5853 * EDX - Valid bits in XCR0[63:32].
5854 * ECX=1: EAX - Various X-features.
5855 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
5856 * ECX - Valid bits in IA32_XSS[31:0].
5857 * EDX - Valid bits in IA32_XSS[63:32].
5858 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
5859 * if the bit invalid all four registers are set to zero.
5860 * EAX - The state size for this feature.
5861 * EBX - The state byte offset of this feature.
5862 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
5863 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
5864 */
5865 uint64_t fGuestXcr0Mask = 0;
5866 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 0);
5867 if ( pCurLeaf
5868 && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE)
5869 && ( pCurLeaf->uEax
5870 || pCurLeaf->uEbx
5871 || pCurLeaf->uEcx
5872 || pCurLeaf->uEdx) )
5873 {
5874 fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx);
5875 if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask)
5876 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5877 N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"),
5878 fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask);
5879 if ((fGuestXcr0Mask & (XSAVE_C_X87 | XSAVE_C_SSE)) != (XSAVE_C_X87 | XSAVE_C_SSE))
5880 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5881 N_("CPUID(0xd/0).EDX:EAX missing mandatory X87 or SSE bits: %#RX64"), fGuestXcr0Mask);
5882
5883 /* We don't support any additional features yet. */
5884 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 1);
5885 if (pCurLeaf && pCurLeaf->uEax)
5886 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5887 N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax);
5888 if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx))
5889 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5890 N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"),
5891 RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx));
5892
5893
5894 for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++)
5895 {
5896 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
5897 if (pCurLeaf)
5898 {
5899 /* If advertised, the state component offset and size must match the one used by host. */
5900 if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx)
5901 {
5902 CPUMCPUID RawHost;
5903 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0,
5904 &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx);
5905 if ( RawHost.uEbx != pCurLeaf->uEbx
5906 || RawHost.uEax != pCurLeaf->uEax)
5907 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5908 N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"),
5909 uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax);
5910 }
5911 }
5912 }
5913 }
5914 /* Clear leaf 0xd just in case we're loading an old state... */
5915 else if (pCurLeaf)
5916 {
5917 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
5918 {
5919 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
5920 if (pCurLeaf)
5921 {
5922 AssertLogRelMsg( uVersion <= CPUM_SAVED_STATE_VERSION_PUT_STRUCT
5923 || ( pCurLeaf->uEax == 0
5924 && pCurLeaf->uEbx == 0
5925 && pCurLeaf->uEcx == 0
5926 && pCurLeaf->uEdx == 0),
5927 ("uVersion=%#x; %#x %#x %#x %#x\n",
5928 uVersion, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx));
5929 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
5930 }
5931 }
5932 }
5933
5934 /* Update the fXStateGuestMask value for the VM. */
5935 if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask)
5936 {
5937 LogRel(("CPUM: fXStateGuestMask=%#llx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask));
5938 pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask;
5939 if (!fGuestXcr0Mask && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
5940 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5941 N_("Internal Processing Error: XSAVE feature bit enabled, but leaf 0xd is empty."));
5942 }
5943
5944#undef CPUID_CHECK_RET
5945#undef CPUID_CHECK_WRN
5946#undef CPUID_CHECK2_RET
5947#undef CPUID_CHECK2_WRN
5948#undef CPUID_RAW_FEATURE_RET
5949#undef CPUID_RAW_FEATURE_WRN
5950#undef CPUID_RAW_FEATURE_IGN
5951#undef CPUID_GST_FEATURE_RET
5952#undef CPUID_GST_FEATURE_WRN
5953#undef CPUID_GST_FEATURE_EMU
5954#undef CPUID_GST_FEATURE_IGN
5955#undef CPUID_GST_FEATURE2_RET
5956#undef CPUID_GST_FEATURE2_WRN
5957#undef CPUID_GST_FEATURE2_EMU
5958#undef CPUID_GST_FEATURE2_IGN
5959#undef CPUID_GST_AMD_FEATURE_RET
5960#undef CPUID_GST_AMD_FEATURE_WRN
5961#undef CPUID_GST_AMD_FEATURE_EMU
5962#undef CPUID_GST_AMD_FEATURE_IGN
5963
5964 /*
5965 * We're good, commit the CPU ID leaves.
5966 */
5967 pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId;
5968 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs);
5969 AssertLogRelRCReturn(rc, rc);
5970
5971 return VINF_SUCCESS;
5972}
5973
5974
5975/**
5976 * Loads the CPU ID leaves saved by pass 0.
5977 *
5978 * @returns VBox status code.
5979 * @param pVM The cross context VM structure.
5980 * @param pSSM The saved state handle.
5981 * @param uVersion The format version.
5982 * @param pMsrs The guest MSRs.
5983 */
5984int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
5985{
5986 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
5987
5988 /*
5989 * Load the CPUID leaves array first and call worker to do the rest, just so
5990 * we can free the memory when we need to without ending up in column 1000.
5991 */
5992 PCPUMCPUIDLEAF paLeaves;
5993 uint32_t cLeaves;
5994 int rc = cpumR3LoadGuestCpuIdArray(pVM, pSSM, uVersion, &paLeaves, &cLeaves);
5995 AssertRC(rc);
5996 if (RT_SUCCESS(rc))
5997 {
5998 rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs);
5999 RTMemFree(paLeaves);
6000 }
6001 return rc;
6002}
6003
6004
6005
6006/**
6007 * Loads the CPU ID leaves saved by pass 0 in an pre 3.2 saved state.
6008 *
6009 * @returns VBox status code.
6010 * @param pVM The cross context VM structure.
6011 * @param pSSM The saved state handle.
6012 * @param uVersion The format version.
6013 */
6014int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
6015{
6016 AssertMsgReturn(uVersion < CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
6017
6018 /*
6019 * Restore the CPUID leaves.
6020 *
6021 * Note that we support restoring less than the current amount of standard
6022 * leaves because we've been allowed more is newer version of VBox.
6023 */
6024 uint32_t cElements;
6025 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
6026 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
6027 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
6028 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
6029
6030 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
6031 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
6032 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
6033 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
6034
6035 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
6036 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
6037 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
6038 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
6039
6040 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
6041
6042 /*
6043 * Check that the basic cpuid id information is unchanged.
6044 */
6045 /** @todo we should check the 64 bits capabilities too! */
6046 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
6047 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
6048 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
6049 uint32_t au32CpuIdSaved[8];
6050 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
6051 if (RT_SUCCESS(rc))
6052 {
6053 /* Ignore CPU stepping. */
6054 au32CpuId[4] &= 0xfffffff0;
6055 au32CpuIdSaved[4] &= 0xfffffff0;
6056
6057 /* Ignore APIC ID (AMD specs). */
6058 au32CpuId[5] &= ~0xff000000;
6059 au32CpuIdSaved[5] &= ~0xff000000;
6060
6061 /* Ignore the number of Logical CPUs (AMD specs). */
6062 au32CpuId[5] &= ~0x00ff0000;
6063 au32CpuIdSaved[5] &= ~0x00ff0000;
6064
6065 /* Ignore some advanced capability bits, that we don't expose to the guest. */
6066 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
6067 | X86_CPUID_FEATURE_ECX_VMX
6068 | X86_CPUID_FEATURE_ECX_SMX
6069 | X86_CPUID_FEATURE_ECX_EST
6070 | X86_CPUID_FEATURE_ECX_TM2
6071 | X86_CPUID_FEATURE_ECX_CNTXID
6072 | X86_CPUID_FEATURE_ECX_TPRUPDATE
6073 | X86_CPUID_FEATURE_ECX_PDCM
6074 | X86_CPUID_FEATURE_ECX_DCA
6075 | X86_CPUID_FEATURE_ECX_X2APIC
6076 );
6077 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
6078 | X86_CPUID_FEATURE_ECX_VMX
6079 | X86_CPUID_FEATURE_ECX_SMX
6080 | X86_CPUID_FEATURE_ECX_EST
6081 | X86_CPUID_FEATURE_ECX_TM2
6082 | X86_CPUID_FEATURE_ECX_CNTXID
6083 | X86_CPUID_FEATURE_ECX_TPRUPDATE
6084 | X86_CPUID_FEATURE_ECX_PDCM
6085 | X86_CPUID_FEATURE_ECX_DCA
6086 | X86_CPUID_FEATURE_ECX_X2APIC
6087 );
6088
6089 /* Make sure we don't forget to update the masks when enabling
6090 * features in the future.
6091 */
6092 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
6093 ( X86_CPUID_FEATURE_ECX_DTES64
6094 | X86_CPUID_FEATURE_ECX_VMX
6095 | X86_CPUID_FEATURE_ECX_SMX
6096 | X86_CPUID_FEATURE_ECX_EST
6097 | X86_CPUID_FEATURE_ECX_TM2
6098 | X86_CPUID_FEATURE_ECX_CNTXID
6099 | X86_CPUID_FEATURE_ECX_TPRUPDATE
6100 | X86_CPUID_FEATURE_ECX_PDCM
6101 | X86_CPUID_FEATURE_ECX_DCA
6102 | X86_CPUID_FEATURE_ECX_X2APIC
6103 )));
6104 /* do the compare */
6105 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
6106 {
6107 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
6108 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
6109 "Saved=%.*Rhxs\n"
6110 "Real =%.*Rhxs\n",
6111 sizeof(au32CpuIdSaved), au32CpuIdSaved,
6112 sizeof(au32CpuId), au32CpuId));
6113 else
6114 {
6115 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
6116 "Saved=%.*Rhxs\n"
6117 "Real =%.*Rhxs\n",
6118 sizeof(au32CpuIdSaved), au32CpuIdSaved,
6119 sizeof(au32CpuId), au32CpuId));
6120 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
6121 }
6122 }
6123 }
6124
6125 return rc;
6126}
6127
6128
6129
6130/*
6131 *
6132 *
6133 * CPUID Info Handler.
6134 * CPUID Info Handler.
6135 * CPUID Info Handler.
6136 *
6137 *
6138 */
6139
6140
6141
6142/**
6143 * Get L1 cache / TLS associativity.
6144 */
6145static const char *getCacheAss(unsigned u, char *pszBuf)
6146{
6147 if (u == 0)
6148 return "res0 ";
6149 if (u == 1)
6150 return "direct";
6151 if (u == 255)
6152 return "fully";
6153 if (u >= 256)
6154 return "???";
6155
6156 RTStrPrintf(pszBuf, 16, "%d way", u);
6157 return pszBuf;
6158}
6159
6160
6161/**
6162 * Get L2 cache associativity.
6163 */
6164const char *getL2CacheAss(unsigned u)
6165{
6166 switch (u)
6167 {
6168 case 0: return "off ";
6169 case 1: return "direct";
6170 case 2: return "2 way ";
6171 case 3: return "res3 ";
6172 case 4: return "4 way ";
6173 case 5: return "res5 ";
6174 case 6: return "8 way ";
6175 case 7: return "res7 ";
6176 case 8: return "16 way";
6177 case 9: return "res9 ";
6178 case 10: return "res10 ";
6179 case 11: return "res11 ";
6180 case 12: return "res12 ";
6181 case 13: return "res13 ";
6182 case 14: return "res14 ";
6183 case 15: return "fully ";
6184 default: return "????";
6185 }
6186}
6187
6188
6189/** CPUID(1).EDX field descriptions. */
6190static DBGFREGSUBFIELD const g_aLeaf1EdxSubFields[] =
6191{
6192 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
6193 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
6194 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
6195 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
6196 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
6197 DBGFREGSUBFIELD_RO("MSR\0" "Model Specific Registers", 5, 1, 0),
6198 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
6199 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
6200 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
6201 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
6202 DBGFREGSUBFIELD_RO("SEP\0" "SYSENTER and SYSEXIT Present", 11, 1, 0),
6203 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
6204 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
6205 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
6206 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
6207 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
6208 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
6209 DBGFREGSUBFIELD_RO("PSN\0" "Processor Serial Number", 18, 1, 0),
6210 DBGFREGSUBFIELD_RO("CLFSH\0" "CLFLUSH instruction", 19, 1, 0),
6211 DBGFREGSUBFIELD_RO("DS\0" "Debug Store", 21, 1, 0),
6212 DBGFREGSUBFIELD_RO("ACPI\0" "Thermal Mon. & Soft. Clock Ctrl.", 22, 1, 0),
6213 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
6214 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR instructions", 24, 1, 0),
6215 DBGFREGSUBFIELD_RO("SSE\0" "SSE support", 25, 1, 0),
6216 DBGFREGSUBFIELD_RO("SSE2\0" "SSE2 support", 26, 1, 0),
6217 DBGFREGSUBFIELD_RO("SS\0" "Self Snoop", 27, 1, 0),
6218 DBGFREGSUBFIELD_RO("HTT\0" "Hyper-Threading Technology", 28, 1, 0),
6219 DBGFREGSUBFIELD_RO("TM\0" "Therm. Monitor", 29, 1, 0),
6220 DBGFREGSUBFIELD_RO("PBE\0" "Pending Break Enabled", 31, 1, 0),
6221 DBGFREGSUBFIELD_TERMINATOR()
6222};
6223
6224/** CPUID(1).ECX field descriptions. */
6225static DBGFREGSUBFIELD const g_aLeaf1EcxSubFields[] =
6226{
6227 DBGFREGSUBFIELD_RO("SSE3\0" "SSE3 support", 0, 1, 0),
6228 DBGFREGSUBFIELD_RO("PCLMUL\0" "PCLMULQDQ support (for AES-GCM)", 1, 1, 0),
6229 DBGFREGSUBFIELD_RO("DTES64\0" "DS Area 64-bit Layout", 2, 1, 0),
6230 DBGFREGSUBFIELD_RO("MONITOR\0" "MONITOR/MWAIT instructions", 3, 1, 0),
6231 DBGFREGSUBFIELD_RO("CPL-DS\0" "CPL Qualified Debug Store", 4, 1, 0),
6232 DBGFREGSUBFIELD_RO("VMX\0" "Virtual Machine Extensions", 5, 1, 0),
6233 DBGFREGSUBFIELD_RO("SMX\0" "Safer Mode Extensions", 6, 1, 0),
6234 DBGFREGSUBFIELD_RO("EST\0" "Enhanced SpeedStep Technology", 7, 1, 0),
6235 DBGFREGSUBFIELD_RO("TM2\0" "Terminal Monitor 2", 8, 1, 0),
6236 DBGFREGSUBFIELD_RO("SSSE3\0" "Supplemental Streaming SIMD Extensions 3", 9, 1, 0),
6237 DBGFREGSUBFIELD_RO("CNTX-ID\0" "L1 Context ID", 10, 1, 0),
6238 DBGFREGSUBFIELD_RO("SDBG\0" "Silicon Debug interface", 11, 1, 0),
6239 DBGFREGSUBFIELD_RO("FMA\0" "Fused Multiply Add extensions", 12, 1, 0),
6240 DBGFREGSUBFIELD_RO("CX16\0" "CMPXCHG16B instruction", 13, 1, 0),
6241 DBGFREGSUBFIELD_RO("TPRUPDATE\0" "xTPR Update Control", 14, 1, 0),
6242 DBGFREGSUBFIELD_RO("PDCM\0" "Perf/Debug Capability MSR", 15, 1, 0),
6243 DBGFREGSUBFIELD_RO("PCID\0" "Process Context Identifiers", 17, 1, 0),
6244 DBGFREGSUBFIELD_RO("DCA\0" "Direct Cache Access", 18, 1, 0),
6245 DBGFREGSUBFIELD_RO("SSE4_1\0" "SSE4_1 support", 19, 1, 0),
6246 DBGFREGSUBFIELD_RO("SSE4_2\0" "SSE4_2 support", 20, 1, 0),
6247 DBGFREGSUBFIELD_RO("X2APIC\0" "x2APIC support", 21, 1, 0),
6248 DBGFREGSUBFIELD_RO("MOVBE\0" "MOVBE instruction", 22, 1, 0),
6249 DBGFREGSUBFIELD_RO("POPCNT\0" "POPCNT instruction", 23, 1, 0),
6250 DBGFREGSUBFIELD_RO("TSCDEADL\0" "Time Stamp Counter Deadline", 24, 1, 0),
6251 DBGFREGSUBFIELD_RO("AES\0" "AES instructions", 25, 1, 0),
6252 DBGFREGSUBFIELD_RO("XSAVE\0" "XSAVE instruction", 26, 1, 0),
6253 DBGFREGSUBFIELD_RO("OSXSAVE\0" "OSXSAVE instruction", 27, 1, 0),
6254 DBGFREGSUBFIELD_RO("AVX\0" "AVX support", 28, 1, 0),
6255 DBGFREGSUBFIELD_RO("F16C\0" "16-bit floating point conversion instructions", 29, 1, 0),
6256 DBGFREGSUBFIELD_RO("RDRAND\0" "RDRAND instruction", 30, 1, 0),
6257 DBGFREGSUBFIELD_RO("HVP\0" "Hypervisor Present (we're a guest)", 31, 1, 0),
6258 DBGFREGSUBFIELD_TERMINATOR()
6259};
6260
6261/** CPUID(7,0).EBX field descriptions. */
6262static DBGFREGSUBFIELD const g_aLeaf7Sub0EbxSubFields[] =
6263{
6264 DBGFREGSUBFIELD_RO("FSGSBASE\0" "RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE instr.", 0, 1, 0),
6265 DBGFREGSUBFIELD_RO("TSCADJUST\0" "Supports MSR_IA32_TSC_ADJUST", 1, 1, 0),
6266 DBGFREGSUBFIELD_RO("SGX\0" "Supports Software Guard Extensions", 2, 1, 0),
6267 DBGFREGSUBFIELD_RO("BMI1\0" "Advanced Bit Manipulation extension 1", 3, 1, 0),
6268 DBGFREGSUBFIELD_RO("HLE\0" "Hardware Lock Elision", 4, 1, 0),
6269 DBGFREGSUBFIELD_RO("AVX2\0" "Advanced Vector Extensions 2", 5, 1, 0),
6270 DBGFREGSUBFIELD_RO("FDP_EXCPTN_ONLY\0" "FPU DP only updated on exceptions", 6, 1, 0),
6271 DBGFREGSUBFIELD_RO("SMEP\0" "Supervisor Mode Execution Prevention", 7, 1, 0),
6272 DBGFREGSUBFIELD_RO("BMI2\0" "Advanced Bit Manipulation extension 2", 8, 1, 0),
6273 DBGFREGSUBFIELD_RO("ERMS\0" "Enhanced REP MOVSB/STOSB instructions", 9, 1, 0),
6274 DBGFREGSUBFIELD_RO("INVPCID\0" "INVPCID instruction", 10, 1, 0),
6275 DBGFREGSUBFIELD_RO("RTM\0" "Restricted Transactional Memory", 11, 1, 0),
6276 DBGFREGSUBFIELD_RO("PQM\0" "Platform Quality of Service Monitoring", 12, 1, 0),
6277 DBGFREGSUBFIELD_RO("DEPFPU_CS_DS\0" "Deprecates FPU CS, FPU DS values if set", 13, 1, 0),
6278 DBGFREGSUBFIELD_RO("MPE\0" "Intel Memory Protection Extensions", 14, 1, 0),
6279 DBGFREGSUBFIELD_RO("PQE\0" "Platform Quality of Service Enforcement", 15, 1, 0),
6280 DBGFREGSUBFIELD_RO("AVX512F\0" "AVX512 Foundation instructions", 16, 1, 0),
6281 DBGFREGSUBFIELD_RO("RDSEED\0" "RDSEED instruction", 18, 1, 0),
6282 DBGFREGSUBFIELD_RO("ADX\0" "ADCX/ADOX instructions", 19, 1, 0),
6283 DBGFREGSUBFIELD_RO("SMAP\0" "Supervisor Mode Access Prevention", 20, 1, 0),
6284 DBGFREGSUBFIELD_RO("CLFLUSHOPT\0" "CLFLUSHOPT (Cache Line Flush) instruction", 23, 1, 0),
6285 DBGFREGSUBFIELD_RO("INTEL_PT\0" "Intel Processor Trace", 25, 1, 0),
6286 DBGFREGSUBFIELD_RO("AVX512PF\0" "AVX512 Prefetch instructions", 26, 1, 0),
6287 DBGFREGSUBFIELD_RO("AVX512ER\0" "AVX512 Exponential & Reciprocal instructions", 27, 1, 0),
6288 DBGFREGSUBFIELD_RO("AVX512CD\0" "AVX512 Conflict Detection instructions", 28, 1, 0),
6289 DBGFREGSUBFIELD_RO("SHA\0" "Secure Hash Algorithm extensions", 29, 1, 0),
6290 DBGFREGSUBFIELD_TERMINATOR()
6291};
6292
6293/** CPUID(7,0).ECX field descriptions. */
6294static DBGFREGSUBFIELD const g_aLeaf7Sub0EcxSubFields[] =
6295{
6296 DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0),
6297 DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0),
6298 DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0),
6299 DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0),
6300 DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0),
6301 DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0),
6302 DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0),
6303 DBGFREGSUBFIELD_TERMINATOR()
6304};
6305
6306/** CPUID(7,0).EDX field descriptions. */
6307static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] =
6308{
6309 DBGFREGSUBFIELD_RO("MD_CLEAR\0" "Supports MDS related buffer clearing", 10, 1, 0),
6310 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0),
6311 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0),
6312 DBGFREGSUBFIELD_RO("FLUSH_CMD\0" "Supports IA32_FLUSH_CMD", 28, 1, 0),
6313 DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0),
6314 DBGFREGSUBFIELD_RO("CORECAP\0" "Supports IA32_CORE_CAP", 30, 1, 0),
6315 DBGFREGSUBFIELD_RO("SSBD\0" "Supports IA32_SPEC_CTRL.SSBD", 31, 1, 0),
6316 DBGFREGSUBFIELD_TERMINATOR()
6317};
6318
6319
6320/** CPUID(13,0).EAX+EDX, XCR0, ++ bit descriptions. */
6321static DBGFREGSUBFIELD const g_aXSaveStateBits[] =
6322{
6323 DBGFREGSUBFIELD_RO("x87\0" "Legacy FPU state", 0, 1, 0),
6324 DBGFREGSUBFIELD_RO("SSE\0" "128-bit SSE state", 1, 1, 0),
6325 DBGFREGSUBFIELD_RO("YMM_Hi128\0" "Upper 128 bits of YMM0-15 (AVX)", 2, 1, 0),
6326 DBGFREGSUBFIELD_RO("BNDREGS\0" "MPX bound register state", 3, 1, 0),
6327 DBGFREGSUBFIELD_RO("BNDCSR\0" "MPX bound config and status state", 4, 1, 0),
6328 DBGFREGSUBFIELD_RO("Opmask\0" "opmask state", 5, 1, 0),
6329 DBGFREGSUBFIELD_RO("ZMM_Hi256\0" "Upper 256 bits of ZMM0-15 (AVX-512)", 6, 1, 0),
6330 DBGFREGSUBFIELD_RO("Hi16_ZMM\0" "512-bits ZMM16-31 state (AVX-512)", 7, 1, 0),
6331 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling (AMD)", 62, 1, 0),
6332 DBGFREGSUBFIELD_TERMINATOR()
6333};
6334
6335/** CPUID(13,1).EAX field descriptions. */
6336static DBGFREGSUBFIELD const g_aLeaf13Sub1EaxSubFields[] =
6337{
6338 DBGFREGSUBFIELD_RO("XSAVEOPT\0" "XSAVEOPT is available", 0, 1, 0),
6339 DBGFREGSUBFIELD_RO("XSAVEC\0" "XSAVEC and compacted XRSTOR supported", 1, 1, 0),
6340 DBGFREGSUBFIELD_RO("XGETBC1\0" "XGETBV with ECX=1 supported", 2, 1, 0),
6341 DBGFREGSUBFIELD_RO("XSAVES\0" "XSAVES/XRSTORS and IA32_XSS supported", 3, 1, 0),
6342 DBGFREGSUBFIELD_TERMINATOR()
6343};
6344
6345
6346/** CPUID(0x80000001,0).EDX field descriptions. */
6347static DBGFREGSUBFIELD const g_aExtLeaf1EdxSubFields[] =
6348{
6349 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
6350 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
6351 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
6352 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
6353 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
6354 DBGFREGSUBFIELD_RO("MSR\0" "K86 Model Specific Registers", 5, 1, 0),
6355 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
6356 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
6357 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
6358 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
6359 DBGFREGSUBFIELD_RO("SEP\0" "SYSCALL/SYSRET", 11, 1, 0),
6360 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
6361 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
6362 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
6363 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
6364 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
6365 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
6366 DBGFREGSUBFIELD_RO("NX\0" "No-Execute/Execute-Disable", 20, 1, 0),
6367 DBGFREGSUBFIELD_RO("AXMMX\0" "AMD Extensions to MMX instructions", 22, 1, 0),
6368 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
6369 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR Instructions", 24, 1, 0),
6370 DBGFREGSUBFIELD_RO("FFXSR\0" "AMD fast FXSAVE and FXRSTOR instructions", 25, 1, 0),
6371 DBGFREGSUBFIELD_RO("Page1GB\0" "1 GB large page", 26, 1, 0),
6372 DBGFREGSUBFIELD_RO("RDTSCP\0" "RDTSCP instruction", 27, 1, 0),
6373 DBGFREGSUBFIELD_RO("LM\0" "AMD64 Long Mode", 29, 1, 0),
6374 DBGFREGSUBFIELD_RO("3DNOWEXT\0" "AMD Extensions to 3DNow", 30, 1, 0),
6375 DBGFREGSUBFIELD_RO("3DNOW\0" "AMD 3DNow", 31, 1, 0),
6376 DBGFREGSUBFIELD_TERMINATOR()
6377};
6378
6379/** CPUID(0x80000001,0).ECX field descriptions. */
6380static DBGFREGSUBFIELD const g_aExtLeaf1EcxSubFields[] =
6381{
6382 DBGFREGSUBFIELD_RO("LahfSahf\0" "LAHF/SAHF support in 64-bit mode", 0, 1, 0),
6383 DBGFREGSUBFIELD_RO("CmpLegacy\0" "Core multi-processing legacy mode", 1, 1, 0),
6384 DBGFREGSUBFIELD_RO("SVM\0" "AMD Secure Virtual Machine extensions", 2, 1, 0),
6385 DBGFREGSUBFIELD_RO("EXTAPIC\0" "AMD Extended APIC registers", 3, 1, 0),
6386 DBGFREGSUBFIELD_RO("CR8L\0" "AMD LOCK MOV CR0 means MOV CR8", 4, 1, 0),
6387 DBGFREGSUBFIELD_RO("ABM\0" "AMD Advanced Bit Manipulation", 5, 1, 0),
6388 DBGFREGSUBFIELD_RO("SSE4A\0" "SSE4A instructions", 6, 1, 0),
6389 DBGFREGSUBFIELD_RO("MISALIGNSSE\0" "AMD Misaligned SSE mode", 7, 1, 0),
6390 DBGFREGSUBFIELD_RO("3DNOWPRF\0" "AMD PREFETCH and PREFETCHW instructions", 8, 1, 0),
6391 DBGFREGSUBFIELD_RO("OSVW\0" "AMD OS Visible Workaround", 9, 1, 0),
6392 DBGFREGSUBFIELD_RO("IBS\0" "Instruct Based Sampling", 10, 1, 0),
6393 DBGFREGSUBFIELD_RO("XOP\0" "Extended Operation support", 11, 1, 0),
6394 DBGFREGSUBFIELD_RO("SKINIT\0" "SKINIT, STGI, and DEV support", 12, 1, 0),
6395 DBGFREGSUBFIELD_RO("WDT\0" "AMD Watchdog Timer support", 13, 1, 0),
6396 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling support", 15, 1, 0),
6397 DBGFREGSUBFIELD_RO("FMA4\0" "Four operand FMA instruction support", 16, 1, 0),
6398 DBGFREGSUBFIELD_RO("NodeId\0" "NodeId in MSR C001_100C", 19, 1, 0),
6399 DBGFREGSUBFIELD_RO("TBM\0" "Trailing Bit Manipulation instructions", 21, 1, 0),
6400 DBGFREGSUBFIELD_RO("TOPOEXT\0" "Topology Extensions", 22, 1, 0),
6401 DBGFREGSUBFIELD_RO("PRFEXTCORE\0" "Performance Counter Extensions support", 23, 1, 0),
6402 DBGFREGSUBFIELD_RO("PRFEXTNB\0" "NB Performance Counter Extensions support", 24, 1, 0),
6403 DBGFREGSUBFIELD_RO("DATABPEXT\0" "Data-access Breakpoint Extension", 26, 1, 0),
6404 DBGFREGSUBFIELD_RO("PERFTSC\0" "Performance Time Stamp Counter", 27, 1, 0),
6405 DBGFREGSUBFIELD_RO("PCX_L2I\0" "L2I/L3 Performance Counter Extensions", 28, 1, 0),
6406 DBGFREGSUBFIELD_RO("MWAITX\0" "MWAITX and MONITORX instructions", 29, 1, 0),
6407 DBGFREGSUBFIELD_TERMINATOR()
6408};
6409
6410/** CPUID(0x8000000a,0).EDX field descriptions. */
6411static DBGFREGSUBFIELD const g_aExtLeafAEdxSubFields[] =
6412{
6413 DBGFREGSUBFIELD_RO("NP\0" "Nested Paging", 0, 1, 0),
6414 DBGFREGSUBFIELD_RO("LbrVirt\0" "Last Branch Record Virtualization", 1, 1, 0),
6415 DBGFREGSUBFIELD_RO("SVML\0" "SVM Lock", 2, 1, 0),
6416 DBGFREGSUBFIELD_RO("NRIPS\0" "NextRIP Save", 3, 1, 0),
6417 DBGFREGSUBFIELD_RO("TscRateMsr\0" "MSR based TSC rate control", 4, 1, 0),
6418 DBGFREGSUBFIELD_RO("VmcbClean\0" "VMCB clean bits", 5, 1, 0),
6419 DBGFREGSUBFIELD_RO("FlushByASID\0" "Flush by ASID", 6, 1, 0),
6420 DBGFREGSUBFIELD_RO("DecodeAssists\0" "Decode Assists", 7, 1, 0),
6421 DBGFREGSUBFIELD_RO("PauseFilter\0" "Pause intercept filter", 10, 1, 0),
6422 DBGFREGSUBFIELD_RO("PauseFilterThreshold\0" "Pause filter threshold", 12, 1, 0),
6423 DBGFREGSUBFIELD_RO("AVIC\0" "Advanced Virtual Interrupt Controller", 13, 1, 0),
6424 DBGFREGSUBFIELD_RO("VMSAVEVirt\0" "VMSAVE and VMLOAD Virtualization", 15, 1, 0),
6425 DBGFREGSUBFIELD_RO("VGIF\0" "Virtual Global-Interrupt Flag", 16, 1, 0),
6426 DBGFREGSUBFIELD_RO("GMET\0" "Guest Mode Execute Trap Extension", 17, 1, 0),
6427 DBGFREGSUBFIELD_TERMINATOR()
6428};
6429
6430
6431/** CPUID(0x80000007,0).EDX field descriptions. */
6432static DBGFREGSUBFIELD const g_aExtLeaf7EdxSubFields[] =
6433{
6434 DBGFREGSUBFIELD_RO("TS\0" "Temperature Sensor", 0, 1, 0),
6435 DBGFREGSUBFIELD_RO("FID\0" "Frequency ID control", 1, 1, 0),
6436 DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
6437 DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
6438 DBGFREGSUBFIELD_RO("TTP\0" "Thermal Trip", 3, 1, 0),
6439 DBGFREGSUBFIELD_RO("TM\0" "Hardware Thermal Control (HTC)", 4, 1, 0),
6440 DBGFREGSUBFIELD_RO("100MHzSteps\0" "100 MHz Multiplier control", 6, 1, 0),
6441 DBGFREGSUBFIELD_RO("HwPstate\0" "Hardware P-state control", 7, 1, 0),
6442 DBGFREGSUBFIELD_RO("TscInvariant\0" "Invariant Time Stamp Counter", 8, 1, 0),
6443 DBGFREGSUBFIELD_RO("CBP\0" "Core Performance Boost", 9, 1, 0),
6444 DBGFREGSUBFIELD_RO("EffFreqRO\0" "Read-only Effective Frequency Interface", 10, 1, 0),
6445 DBGFREGSUBFIELD_RO("ProcFdbkIf\0" "Processor Feedback Interface", 11, 1, 0),
6446 DBGFREGSUBFIELD_RO("ProcPwrRep\0" "Core power reporting interface support", 12, 1, 0),
6447 DBGFREGSUBFIELD_TERMINATOR()
6448};
6449
6450/** CPUID(0x80000008,0).EBX field descriptions. */
6451static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] =
6452{
6453 DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0),
6454 DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0),
6455 DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR*)", 2, 1, 0),
6456 DBGFREGSUBFIELD_RO("RDPRU\0" "RDPRU instruction", 4, 1, 0),
6457 DBGFREGSUBFIELD_RO("MCOMMIT\0" "MCOMMIT instruction", 8, 1, 0),
6458 DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0),
6459 DBGFREGSUBFIELD_TERMINATOR()
6460};
6461
6462
6463static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc,
6464 const char *pszLeadIn, uint32_t cchWidth)
6465{
6466 if (pszLeadIn)
6467 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
6468
6469 for (uint32_t iBit = 0; iBit < 32; iBit++)
6470 if (RT_BIT_32(iBit) & uVal)
6471 {
6472 while ( pDesc->pszName != NULL
6473 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
6474 pDesc++;
6475 if ( pDesc->pszName != NULL
6476 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
6477 {
6478 if (pDesc->cBits == 1)
6479 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
6480 else
6481 {
6482 uint32_t uFieldValue = uVal >> pDesc->iFirstBit;
6483 if (pDesc->cBits < 32)
6484 uFieldValue &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
6485 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%u" : " %s=%#x", pDesc->pszName, uFieldValue);
6486 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
6487 }
6488 }
6489 else
6490 pHlp->pfnPrintf(pHlp, " %u", iBit);
6491 }
6492 if (pszLeadIn)
6493 pHlp->pfnPrintf(pHlp, "\n");
6494}
6495
6496
6497static void cpumR3CpuIdInfoMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
6498 const char *pszLeadIn, uint32_t cchWidth)
6499{
6500 if (pszLeadIn)
6501 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
6502
6503 for (uint32_t iBit = 0; iBit < 64; iBit++)
6504 if (RT_BIT_64(iBit) & uVal)
6505 {
6506 while ( pDesc->pszName != NULL
6507 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
6508 pDesc++;
6509 if ( pDesc->pszName != NULL
6510 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
6511 {
6512 if (pDesc->cBits == 1)
6513 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
6514 else
6515 {
6516 uint64_t uFieldValue = uVal >> pDesc->iFirstBit;
6517 if (pDesc->cBits < 64)
6518 uFieldValue &= RT_BIT_64(pDesc->cBits) - UINT64_C(1);
6519 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%llu" : " %s=%#llx", pDesc->pszName, uFieldValue);
6520 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
6521 }
6522 }
6523 else
6524 pHlp->pfnPrintf(pHlp, " %u", iBit);
6525 }
6526 if (pszLeadIn)
6527 pHlp->pfnPrintf(pHlp, "\n");
6528}
6529
6530
6531static void cpumR3CpuIdInfoValueWithMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
6532 const char *pszLeadIn, uint32_t cchWidth)
6533{
6534 if (!uVal)
6535 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x\n", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
6536 else
6537 {
6538 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x (", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
6539 cpumR3CpuIdInfoMnemonicListU64(pHlp, uVal, pDesc, NULL, 0);
6540 pHlp->pfnPrintf(pHlp, " )\n");
6541 }
6542}
6543
6544
6545static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc,
6546 uint32_t cchWidth)
6547{
6548 uint32_t uCombined = uVal1 | uVal2;
6549 for (uint32_t iBit = 0; iBit < 32; iBit++)
6550 if ( (RT_BIT_32(iBit) & uCombined)
6551 || (iBit == pDesc->iFirstBit && pDesc->pszName) )
6552 {
6553 while ( pDesc->pszName != NULL
6554 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
6555 pDesc++;
6556
6557 if ( pDesc->pszName != NULL
6558 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
6559 {
6560 size_t cchMnemonic = strlen(pDesc->pszName);
6561 const char *pszDesc = pDesc->pszName + cchMnemonic + 1;
6562 size_t cchDesc = strlen(pszDesc);
6563 uint32_t uFieldValue1 = uVal1 >> pDesc->iFirstBit;
6564 uint32_t uFieldValue2 = uVal2 >> pDesc->iFirstBit;
6565 if (pDesc->cBits < 32)
6566 {
6567 uFieldValue1 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
6568 uFieldValue2 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
6569 }
6570
6571 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s - %s%*s= %u (%u)\n" : " %s - %s%*s= %#x (%#x)\n",
6572 pDesc->pszName, pszDesc,
6573 cchMnemonic + 3 + cchDesc < cchWidth ? cchWidth - (cchMnemonic + 3 + cchDesc) : 1, "",
6574 uFieldValue1, uFieldValue2);
6575
6576 iBit = pDesc->iFirstBit + pDesc->cBits - 1U;
6577 pDesc++;
6578 }
6579 else
6580 pHlp->pfnPrintf(pHlp, " %2u - Reserved%*s= %u (%u)\n", iBit, 13 < cchWidth ? cchWidth - 13 : 1, "",
6581 RT_BOOL(uVal1 & RT_BIT_32(iBit)), RT_BOOL(uVal2 & RT_BIT_32(iBit)));
6582 }
6583}
6584
6585
6586/**
6587 * Produces a detailed summary of standard leaf 0x00000001.
6588 *
6589 * @param pHlp The info helper functions.
6590 * @param pCurLeaf The 0x00000001 leaf.
6591 * @param fVerbose Whether to be very verbose or not.
6592 * @param fIntel Set if intel CPU.
6593 */
6594static void cpumR3CpuIdInfoStdLeaf1Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose, bool fIntel)
6595{
6596 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 1);
6597 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
6598 uint32_t uEAX = pCurLeaf->uEax;
6599 uint32_t uEBX = pCurLeaf->uEbx;
6600
6601 pHlp->pfnPrintf(pHlp,
6602 "%36s %2d \tExtended: %d \tEffective: %d\n"
6603 "%36s %2d \tExtended: %d \tEffective: %d\n"
6604 "%36s %d\n"
6605 "%36s %d (%s)\n"
6606 "%36s %#04x\n"
6607 "%36s %d\n"
6608 "%36s %d\n"
6609 "%36s %#04x\n"
6610 ,
6611 "Family:", (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
6612 "Model:", (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
6613 "Stepping:", ASMGetCpuStepping(uEAX),
6614 "Type:", (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
6615 "APIC ID:", (uEBX >> 24) & 0xff,
6616 "Logical CPUs:",(uEBX >> 16) & 0xff,
6617 "CLFLUSH Size:",(uEBX >> 8) & 0xff,
6618 "Brand ID:", (uEBX >> 0) & 0xff);
6619 if (fVerbose)
6620 {
6621 CPUMCPUID Host;
6622 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6623 pHlp->pfnPrintf(pHlp, "Features\n");
6624 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
6625 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, 56);
6626 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, 56);
6627 }
6628 else
6629 {
6630 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf1EdxSubFields, "Features EDX:", 36);
6631 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf1EcxSubFields, "Features ECX:", 36);
6632 }
6633}
6634
6635
6636/**
6637 * Produces a detailed summary of standard leaf 0x00000007.
6638 *
6639 * @param pHlp The info helper functions.
6640 * @param paLeaves The CPUID leaves array.
6641 * @param cLeaves The number of leaves in the array.
6642 * @param pCurLeaf The first 0x00000007 leaf.
6643 * @param fVerbose Whether to be very verbose or not.
6644 */
6645static void cpumR3CpuIdInfoStdLeaf7Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
6646 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
6647{
6648 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 7);
6649 pHlp->pfnPrintf(pHlp, "Structured Extended Feature Flags Enumeration (leaf 7):\n");
6650 for (;;)
6651 {
6652 CPUMCPUID Host;
6653 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6654
6655 switch (pCurLeaf->uSubLeaf)
6656 {
6657 case 0:
6658 if (fVerbose)
6659 {
6660 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
6661 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, 56);
6662 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56);
6663 if (pCurLeaf->uEdx || Host.uEdx)
6664 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);
6665 }
6666 else
6667 {
6668 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aLeaf7Sub0EbxSubFields, "Ext Features EBX:", 36);
6669 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36);
6670 if (pCurLeaf->uEdx)
6671 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36);
6672 }
6673 break;
6674
6675 default:
6676 if (pCurLeaf->uEdx || pCurLeaf->uEcx || pCurLeaf->uEbx)
6677 pHlp->pfnPrintf(pHlp, "Unknown extended feature sub-leaf #%u: EAX=%#x EBX=%#x ECX=%#x EDX=%#x\n",
6678 pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx);
6679 break;
6680
6681 }
6682
6683 /* advance. */
6684 pCurLeaf++;
6685 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6686 || pCurLeaf->uLeaf != 0x7)
6687 break;
6688 }
6689}
6690
6691
6692/**
6693 * Produces a detailed summary of standard leaf 0x0000000d.
6694 *
6695 * @param pHlp The info helper functions.
6696 * @param paLeaves The CPUID leaves array.
6697 * @param cLeaves The number of leaves in the array.
6698 * @param pCurLeaf The first 0x00000007 leaf.
6699 * @param fVerbose Whether to be very verbose or not.
6700 */
6701static void cpumR3CpuIdInfoStdLeaf13Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
6702 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
6703{
6704 RT_NOREF_PV(fVerbose);
6705 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 13);
6706 pHlp->pfnPrintf(pHlp, "Processor Extended State Enumeration (leaf 0xd):\n");
6707 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
6708 {
6709 CPUMCPUID Host;
6710 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6711
6712 switch (uSubLeaf)
6713 {
6714 case 0:
6715 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6716 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, guest:",
6717 pCurLeaf->uEbx, pCurLeaf->uEcx);
6718 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, host:", Host.uEbx, Host.uEcx);
6719
6720 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6721 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx), g_aXSaveStateBits,
6722 "Valid XCR0 bits, guest:", 42);
6723 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEax, Host.uEdx), g_aXSaveStateBits,
6724 "Valid XCR0 bits, host:", 42);
6725 break;
6726
6727 case 1:
6728 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6729 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, guest:", 42);
6730 cpumR3CpuIdInfoMnemonicListU32(pHlp, Host.uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, host:", 42);
6731
6732 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6733 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, guest:", pCurLeaf->uEbx);
6734 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, host:", Host.uEbx);
6735
6736 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6737 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEcx, pCurLeaf->uEdx), g_aXSaveStateBits,
6738 " Valid IA32_XSS bits, guest:", 42);
6739 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEdx, Host.uEcx), g_aXSaveStateBits,
6740 " Valid IA32_XSS bits, host:", 42);
6741 break;
6742
6743 default:
6744 if ( pCurLeaf
6745 && pCurLeaf->uSubLeaf == uSubLeaf
6746 && (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx) )
6747 {
6748 pHlp->pfnPrintf(pHlp, " State #%u, guest: off=%#06x, cb=%#06x %s", uSubLeaf, pCurLeaf->uEbx,
6749 pCurLeaf->uEax, pCurLeaf->uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
6750 if (pCurLeaf->uEcx & ~RT_BIT_32(0))
6751 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", pCurLeaf->uEcx & ~RT_BIT_32(0));
6752 if (pCurLeaf->uEdx)
6753 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", pCurLeaf->uEdx);
6754 pHlp->pfnPrintf(pHlp, " --");
6755 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
6756 pHlp->pfnPrintf(pHlp, "\n");
6757 }
6758 if (Host.uEax || Host.uEbx || Host.uEcx || Host.uEdx)
6759 {
6760 pHlp->pfnPrintf(pHlp, " State #%u, host: off=%#06x, cb=%#06x %s", uSubLeaf, Host.uEbx,
6761 Host.uEax, Host.uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
6762 if (Host.uEcx & ~RT_BIT_32(0))
6763 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", Host.uEcx & ~RT_BIT_32(0));
6764 if (Host.uEdx)
6765 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", Host.uEdx);
6766 pHlp->pfnPrintf(pHlp, " --");
6767 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
6768 pHlp->pfnPrintf(pHlp, "\n");
6769 }
6770 break;
6771
6772 }
6773
6774 /* advance. */
6775 if (pCurLeaf)
6776 {
6777 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6778 && pCurLeaf->uSubLeaf <= uSubLeaf
6779 && pCurLeaf->uLeaf == UINT32_C(0x0000000d))
6780 pCurLeaf++;
6781 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6782 || pCurLeaf->uLeaf != UINT32_C(0x0000000d))
6783 pCurLeaf = NULL;
6784 }
6785 }
6786}
6787
6788
6789static PCCPUMCPUIDLEAF cpumR3CpuIdInfoRawRange(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
6790 PCCPUMCPUIDLEAF pCurLeaf, uint32_t uUpToLeaf, const char *pszTitle)
6791{
6792 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6793 && pCurLeaf->uLeaf <= uUpToLeaf)
6794 {
6795 pHlp->pfnPrintf(pHlp,
6796 " %s\n"
6797 " Leaf/sub-leaf eax ebx ecx edx\n", pszTitle);
6798 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6799 && pCurLeaf->uLeaf <= uUpToLeaf)
6800 {
6801 CPUMCPUID Host;
6802 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6803 pHlp->pfnPrintf(pHlp,
6804 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6805 "Hst: %08x %08x %08x %08x\n",
6806 pCurLeaf->uLeaf, pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6807 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6808 pCurLeaf++;
6809 }
6810 }
6811
6812 return pCurLeaf;
6813}
6814
6815
6816/**
6817 * Display the guest CpuId leaves.
6818 *
6819 * @param pVM The cross context VM structure.
6820 * @param pHlp The info helper functions.
6821 * @param pszArgs "terse", "default" or "verbose".
6822 */
6823DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
6824{
6825 /*
6826 * Parse the argument.
6827 */
6828 unsigned iVerbosity = 1;
6829 if (pszArgs)
6830 {
6831 pszArgs = RTStrStripL(pszArgs);
6832 if (!strcmp(pszArgs, "terse"))
6833 iVerbosity--;
6834 else if (!strcmp(pszArgs, "verbose"))
6835 iVerbosity++;
6836 }
6837
6838 uint32_t uLeaf;
6839 CPUMCPUID Host;
6840 uint32_t cLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
6841 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
6842 PCCPUMCPUIDLEAF pCurLeaf;
6843 PCCPUMCPUIDLEAF pNextLeaf;
6844 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
6845 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
6846 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
6847
6848 /*
6849 * Standard leaves. Custom raw dump here due to ECX sub-leaves host handling.
6850 */
6851 uint32_t cHstMax = ASMCpuId_EAX(0);
6852 uint32_t cGstMax = paLeaves[0].uLeaf == 0 ? paLeaves[0].uEax : 0;
6853 uint32_t cMax = RT_MAX(cGstMax, cHstMax);
6854 pHlp->pfnPrintf(pHlp,
6855 " Raw Standard CPUID Leaves\n"
6856 " Leaf/sub-leaf eax ebx ecx edx\n");
6857 for (uLeaf = 0, pCurLeaf = paLeaves; uLeaf <= cMax; uLeaf++)
6858 {
6859 uint32_t cMaxSubLeaves = 1;
6860 if (uLeaf == 4 || uLeaf == 7 || uLeaf == 0xb)
6861 cMaxSubLeaves = 16;
6862 else if (uLeaf == 0xd)
6863 cMaxSubLeaves = 128;
6864
6865 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
6866 {
6867 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6868 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6869 && pCurLeaf->uLeaf == uLeaf
6870 && pCurLeaf->uSubLeaf == uSubLeaf)
6871 {
6872 pHlp->pfnPrintf(pHlp,
6873 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6874 "Hst: %08x %08x %08x %08x\n",
6875 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6876 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6877 pCurLeaf++;
6878 }
6879 else if ( uLeaf != 0xd
6880 || uSubLeaf <= 1
6881 || Host.uEbx != 0 )
6882 pHlp->pfnPrintf(pHlp,
6883 "Hst: %08x/%04x %08x %08x %08x %08x\n",
6884 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6885
6886 /* Done? */
6887 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6888 || pCurLeaf->uLeaf != uLeaf)
6889 && ( (uLeaf == 0x4 && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8))
6890 || (uLeaf == 0x7 && Host.uEax == 0)
6891 || (uLeaf == 0xb && ((Host.uEcx & 0xff00) == 0 || (Host.uEcx & 0xff00) >= 8))
6892 || (uLeaf == 0xb && (Host.uEcx & 0xff) != uSubLeaf)
6893 || (uLeaf == 0xd && uSubLeaf >= 128)
6894 )
6895 )
6896 break;
6897 }
6898 }
6899 pNextLeaf = pCurLeaf;
6900
6901 /*
6902 * If verbose, decode it.
6903 */
6904 if (iVerbosity && paLeaves[0].uLeaf == 0)
6905 pHlp->pfnPrintf(pHlp,
6906 "%36s %.04s%.04s%.04s\n"
6907 "%36s 0x00000000-%#010x\n"
6908 ,
6909 "Name:", &paLeaves[0].uEbx, &paLeaves[0].uEdx, &paLeaves[0].uEcx,
6910 "Supports:", paLeaves[0].uEax);
6911
6912 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x00000001), 0)) != NULL)
6913 cpumR3CpuIdInfoStdLeaf1Details(pHlp, pCurLeaf, iVerbosity > 1, fIntel);
6914
6915 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x00000007), 0)) != NULL)
6916 cpumR3CpuIdInfoStdLeaf7Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
6917
6918 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 0)) != NULL)
6919 cpumR3CpuIdInfoStdLeaf13Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
6920
6921 pCurLeaf = pNextLeaf;
6922
6923 /*
6924 * Hypervisor leaves.
6925 *
6926 * Unlike most of the other leaves reported, the guest hypervisor leaves
6927 * aren't a subset of the host CPUID bits.
6928 */
6929 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x3fffffff), "Unknown CPUID Leaves");
6930
6931 ASMCpuIdExSlow(UINT32_C(0x40000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6932 cHstMax = Host.uEax >= UINT32_C(0x40000001) && Host.uEax <= UINT32_C(0x40000fff) ? Host.uEax : 0;
6933 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x40000000)
6934 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x40000fff)) : 0;
6935 cMax = RT_MAX(cHstMax, cGstMax);
6936 if (cMax >= UINT32_C(0x40000000))
6937 {
6938 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Hypervisor CPUID Leaves");
6939
6940 /** @todo dump these in more detail. */
6941
6942 pCurLeaf = pNextLeaf;
6943 }
6944
6945
6946 /*
6947 * Extended. Custom raw dump here due to ECX sub-leaves host handling.
6948 * Implemented after AMD specs.
6949 */
6950 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x7fffffff), "Unknown CPUID Leaves");
6951
6952 ASMCpuIdExSlow(UINT32_C(0x80000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6953 cHstMax = ASMIsValidExtRange(Host.uEax) ? RT_MIN(Host.uEax, UINT32_C(0x80000fff)) : 0;
6954 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x80000000)
6955 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x80000fff)) : 0;
6956 cMax = RT_MAX(cHstMax, cGstMax);
6957 if (cMax >= UINT32_C(0x80000000))
6958 {
6959
6960 pHlp->pfnPrintf(pHlp,
6961 " Raw Extended CPUID Leaves\n"
6962 " Leaf/sub-leaf eax ebx ecx edx\n");
6963 PCCPUMCPUIDLEAF pExtLeaf = pCurLeaf;
6964 for (uLeaf = UINT32_C(0x80000000); uLeaf <= cMax; uLeaf++)
6965 {
6966 uint32_t cMaxSubLeaves = 1;
6967 if (uLeaf == UINT32_C(0x8000001d))
6968 cMaxSubLeaves = 16;
6969
6970 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
6971 {
6972 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6973 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6974 && pCurLeaf->uLeaf == uLeaf
6975 && pCurLeaf->uSubLeaf == uSubLeaf)
6976 {
6977 pHlp->pfnPrintf(pHlp,
6978 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6979 "Hst: %08x %08x %08x %08x\n",
6980 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6981 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6982 pCurLeaf++;
6983 }
6984 else if ( uLeaf != 0xd
6985 || uSubLeaf <= 1
6986 || Host.uEbx != 0 )
6987 pHlp->pfnPrintf(pHlp,
6988 "Hst: %08x/%04x %08x %08x %08x %08x\n",
6989 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6990
6991 /* Done? */
6992 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6993 || pCurLeaf->uLeaf != uLeaf)
6994 && (uLeaf == UINT32_C(0x8000001d) && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8)) )
6995 break;
6996 }
6997 }
6998 pNextLeaf = pCurLeaf;
6999
7000 /*
7001 * Understandable output
7002 */
7003 if (iVerbosity)
7004 pHlp->pfnPrintf(pHlp,
7005 "Ext Name: %.4s%.4s%.4s\n"
7006 "Ext Supports: 0x80000000-%#010x\n",
7007 &pExtLeaf->uEbx, &pExtLeaf->uEdx, &pExtLeaf->uEcx, pExtLeaf->uEax);
7008
7009 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000001), 0);
7010 if (iVerbosity && pCurLeaf)
7011 {
7012 uint32_t uEAX = pCurLeaf->uEax;
7013 pHlp->pfnPrintf(pHlp,
7014 "Family: %d \tExtended: %d \tEffective: %d\n"
7015 "Model: %d \tExtended: %d \tEffective: %d\n"
7016 "Stepping: %d\n"
7017 "Brand ID: %#05x\n",
7018 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
7019 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
7020 ASMGetCpuStepping(uEAX),
7021 pCurLeaf->uEbx & 0xfff);
7022
7023 if (iVerbosity == 1)
7024 {
7025 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf1EdxSubFields, "Ext Features EDX:", 34);
7026 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aExtLeaf1EdxSubFields, "Ext Features ECX:", 34);
7027 }
7028 else
7029 {
7030 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7031 pHlp->pfnPrintf(pHlp, "Ext Features\n");
7032 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
7033 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, 56);
7034 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, 56);
7035 if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
7036 {
7037 pHlp->pfnPrintf(pHlp, "SVM Feature Identification (leaf A):\n");
7038 ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7039 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x8000000a), 0);
7040 uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0;
7041 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields, 56);
7042 }
7043 }
7044 }
7045
7046 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000002), 0)) != NULL)
7047 {
7048 char szString[4*4*3+1] = {0};
7049 uint32_t *pu32 = (uint32_t *)szString;
7050 *pu32++ = pCurLeaf->uEax;
7051 *pu32++ = pCurLeaf->uEbx;
7052 *pu32++ = pCurLeaf->uEcx;
7053 *pu32++ = pCurLeaf->uEdx;
7054 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000003), 0);
7055 if (pCurLeaf)
7056 {
7057 *pu32++ = pCurLeaf->uEax;
7058 *pu32++ = pCurLeaf->uEbx;
7059 *pu32++ = pCurLeaf->uEcx;
7060 *pu32++ = pCurLeaf->uEdx;
7061 }
7062 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000004), 0);
7063 if (pCurLeaf)
7064 {
7065 *pu32++ = pCurLeaf->uEax;
7066 *pu32++ = pCurLeaf->uEbx;
7067 *pu32++ = pCurLeaf->uEcx;
7068 *pu32++ = pCurLeaf->uEdx;
7069 }
7070 pHlp->pfnPrintf(pHlp, "Full Name: \"%s\"\n", szString);
7071 }
7072
7073 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000005), 0)) != NULL)
7074 {
7075 uint32_t uEAX = pCurLeaf->uEax;
7076 uint32_t uEBX = pCurLeaf->uEbx;
7077 uint32_t uECX = pCurLeaf->uEcx;
7078 uint32_t uEDX = pCurLeaf->uEdx;
7079 char sz1[32];
7080 char sz2[32];
7081
7082 pHlp->pfnPrintf(pHlp,
7083 "TLB 2/4M Instr/Uni: %s %3d entries\n"
7084 "TLB 2/4M Data: %s %3d entries\n",
7085 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
7086 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
7087 pHlp->pfnPrintf(pHlp,
7088 "TLB 4K Instr/Uni: %s %3d entries\n"
7089 "TLB 4K Data: %s %3d entries\n",
7090 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
7091 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
7092 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
7093 "L1 Instr Cache Lines Per Tag: %d\n"
7094 "L1 Instr Cache Associativity: %s\n"
7095 "L1 Instr Cache Size: %d KB\n",
7096 (uEDX >> 0) & 0xff,
7097 (uEDX >> 8) & 0xff,
7098 getCacheAss((uEDX >> 16) & 0xff, sz1),
7099 (uEDX >> 24) & 0xff);
7100 pHlp->pfnPrintf(pHlp,
7101 "L1 Data Cache Line Size: %d bytes\n"
7102 "L1 Data Cache Lines Per Tag: %d\n"
7103 "L1 Data Cache Associativity: %s\n"
7104 "L1 Data Cache Size: %d KB\n",
7105 (uECX >> 0) & 0xff,
7106 (uECX >> 8) & 0xff,
7107 getCacheAss((uECX >> 16) & 0xff, sz1),
7108 (uECX >> 24) & 0xff);
7109 }
7110
7111 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000006), 0)) != NULL)
7112 {
7113 uint32_t uEAX = pCurLeaf->uEax;
7114 uint32_t uEBX = pCurLeaf->uEbx;
7115 uint32_t uEDX = pCurLeaf->uEdx;
7116
7117 pHlp->pfnPrintf(pHlp,
7118 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
7119 "L2 TLB 2/4M Data: %s %4d entries\n",
7120 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
7121 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
7122 pHlp->pfnPrintf(pHlp,
7123 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
7124 "L2 TLB 4K Data: %s %4d entries\n",
7125 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
7126 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
7127 pHlp->pfnPrintf(pHlp,
7128 "L2 Cache Line Size: %d bytes\n"
7129 "L2 Cache Lines Per Tag: %d\n"
7130 "L2 Cache Associativity: %s\n"
7131 "L2 Cache Size: %d KB\n",
7132 (uEDX >> 0) & 0xff,
7133 (uEDX >> 8) & 0xf,
7134 getL2CacheAss((uEDX >> 12) & 0xf),
7135 (uEDX >> 16) & 0xffff);
7136 }
7137
7138 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000007), 0)) != NULL)
7139 {
7140 ASMCpuIdExSlow(UINT32_C(0x80000007), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7141 if (pCurLeaf->uEdx || (Host.uEdx && iVerbosity))
7142 {
7143 if (iVerbosity < 1)
7144 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34);
7145 else
7146 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields, 56);
7147 }
7148 }
7149
7150 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0);
7151 if (pCurLeaf != NULL)
7152 {
7153 ASMCpuIdExSlow(UINT32_C(0x80000008), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7154 if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity))
7155 {
7156 if (iVerbosity < 1)
7157 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
7158 else
7159 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56);
7160 }
7161
7162 if (iVerbosity)
7163 {
7164 uint32_t uEAX = pCurLeaf->uEax;
7165 uint32_t uECX = pCurLeaf->uEcx;
7166
7167 /** @todo 0x80000008:EAX[23:16] is only defined for AMD. We'll get 0 on Intel. On
7168 * AMD if we get 0, the guest physical address width should be taken from
7169 * 0x80000008:EAX[7:0] instead. Guest Physical address width is relevant
7170 * for guests using nested paging. */
7171 pHlp->pfnPrintf(pHlp,
7172 "Physical Address Width: %d bits\n"
7173 "Virtual Address Width: %d bits\n"
7174 "Guest Physical Address Width: %d bits\n",
7175 (uEAX >> 0) & 0xff,
7176 (uEAX >> 8) & 0xff,
7177 (uEAX >> 16) & 0xff);
7178
7179 /** @todo 0x80000008:ECX is reserved on Intel (we'll get incorrect physical core
7180 * count here). */
7181 pHlp->pfnPrintf(pHlp,
7182 "Physical Core Count: %d\n",
7183 ((uECX >> 0) & 0xff) + 1);
7184 }
7185 }
7186
7187 pCurLeaf = pNextLeaf;
7188 }
7189
7190
7191
7192 /*
7193 * Centaur.
7194 */
7195 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xbfffffff), "Unknown CPUID Leaves");
7196
7197 ASMCpuIdExSlow(UINT32_C(0xc0000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7198 cHstMax = Host.uEax >= UINT32_C(0xc0000001) && Host.uEax <= UINT32_C(0xc0000fff)
7199 ? RT_MIN(Host.uEax, UINT32_C(0xc0000fff)) : 0;
7200 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0xc0000000)
7201 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000fff)) : 0;
7202 cMax = RT_MAX(cHstMax, cGstMax);
7203 if (cMax >= UINT32_C(0xc0000000))
7204 {
7205 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Centaur CPUID Leaves");
7206
7207 /*
7208 * Understandable output
7209 */
7210 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0xc0000000), 0)) != NULL)
7211 pHlp->pfnPrintf(pHlp,
7212 "Centaur Supports: 0xc0000000-%#010x\n",
7213 pCurLeaf->uEax);
7214
7215 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0xc0000001), 0)) != NULL)
7216 {
7217 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
7218 uint32_t uEdxGst = pCurLeaf->uEdx;
7219 uint32_t uEdxHst = Host.uEdx;
7220
7221 if (iVerbosity == 1)
7222 {
7223 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
7224 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
7225 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
7226 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
7227 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
7228 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
7229 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
7230 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
7231 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
7232 /* possibly indicating MM/HE and MM/HE-E on older chips... */
7233 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
7234 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
7235 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
7236 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
7237 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
7238 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
7239 for (unsigned iBit = 14; iBit < 32; iBit++)
7240 if (uEdxGst & RT_BIT(iBit))
7241 pHlp->pfnPrintf(pHlp, " %d", iBit);
7242 pHlp->pfnPrintf(pHlp, "\n");
7243 }
7244 else
7245 {
7246 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
7247 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
7248 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
7249 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
7250 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
7251 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
7252 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
7253 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
7254 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
7255 /* possibly indicating MM/HE and MM/HE-E on older chips... */
7256 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
7257 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
7258 pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
7259 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
7260 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
7261 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
7262 pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
7263 pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
7264 pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
7265 pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
7266 pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
7267 pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
7268 pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
7269 pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
7270 pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
7271 pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
7272 pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
7273 pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
7274 pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
7275 for (unsigned iBit = 27; iBit < 32; iBit++)
7276 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
7277 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
7278 pHlp->pfnPrintf(pHlp, "\n");
7279 }
7280 }
7281
7282 pCurLeaf = pNextLeaf;
7283 }
7284
7285 /*
7286 * The remainder.
7287 */
7288 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xffffffff), "Unknown CPUID Leaves");
7289}
7290
7291#endif /* !IN_VBOX_CPU_REPORT */
7292
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette