VirtualBox

source: vbox/trunk/src/VBox/VMM/CPUM.cpp@ 24442

Last change on this file since 24442 was 24416, checked in by vboxsync, 15 years ago

Fixed bugs in VBoxManage list hostcpuids and the cpuid part of the showvminfo output. Use the s_auCpuIdRanges thing more places.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 132.4 KB
Line 
1/* $Id: CPUM.cpp 24416 2009-11-05 21:17:56Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_cpum CPUM - CPU Monitor / Manager
23 *
24 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
25 * also responsible for lazy FPU handling and some of the context loading
26 * in raw mode.
27 *
28 * There are three CPU contexts, the most important one is the guest one (GC).
29 * When running in raw-mode (RC) there is a special hyper context for the VMM
30 * part that floats around inside the guest address space. When running in
31 * raw-mode, CPUM also maintains a host context for saving and restoring
32 * registers accross world switches. This latter is done in cooperation with the
33 * world switcher (@see pg_vmm).
34 *
35 * @see grp_cpum
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_CPUM
42#include <VBox/cpum.h>
43#include <VBox/cpumdis.h>
44#include <VBox/pgm.h>
45#include <VBox/pdm.h>
46#include <VBox/mm.h>
47#include <VBox/selm.h>
48#include <VBox/dbgf.h>
49#include <VBox/patm.h>
50#include <VBox/hwaccm.h>
51#include <VBox/ssm.h>
52#include "CPUMInternal.h"
53#include <VBox/vm.h>
54
55#include <VBox/param.h>
56#include <VBox/dis.h>
57#include <VBox/err.h>
58#include <VBox/log.h>
59#include <iprt/assert.h>
60#include <iprt/asm.h>
61#include <iprt/string.h>
62#include <iprt/mp.h>
63#include <iprt/cpuset.h>
64
65
66/*******************************************************************************
67* Defined Constants And Macros *
68*******************************************************************************/
69/** The current saved state version. */
70#ifdef VBOX_WITH_LIVE_MIGRATION
71#define CPUM_SAVED_STATE_VERSION 11
72#else
73#define CPUM_SAVED_STATE_VERSION 10
74#endif
75/** The saved state version of 3.0 and 3.1 trunk before the teleportation
76 * changes. */
77#define CPUM_SAVED_STATE_VERSION_VER3_0 10
78/** The saved state version for the 2.1 trunk before the MSR changes. */
79#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
80/** The saved state version of 2.0, used for backwards compatibility. */
81#define CPUM_SAVED_STATE_VERSION_VER2_0 8
82/** The saved state version of 1.6, used for backwards compatability. */
83#define CPUM_SAVED_STATE_VERSION_VER1_6 6
84
85
86/*******************************************************************************
87* Structures and Typedefs *
88*******************************************************************************/
89
90/**
91 * What kind of cpu info dump to perform.
92 */
93typedef enum CPUMDUMPTYPE
94{
95 CPUMDUMPTYPE_TERSE,
96 CPUMDUMPTYPE_DEFAULT,
97 CPUMDUMPTYPE_VERBOSE
98} CPUMDUMPTYPE;
99/** Pointer to a cpu info dump type. */
100typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
101
102
103/*******************************************************************************
104* Internal Functions *
105*******************************************************************************/
106static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
107static int cpumR3CpuIdInit(PVM pVM);
108#ifdef VBOX_WITH_LIVE_MIGRATION
109static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
110#endif
111static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
112static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
113static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
114static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
115static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
116static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
117static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
118static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
119
120
121/**
122 * Initializes the CPUM.
123 *
124 * @returns VBox status code.
125 * @param pVM The VM to operate on.
126 */
127VMMR3DECL(int) CPUMR3Init(PVM pVM)
128{
129 LogFlow(("CPUMR3Init\n"));
130
131 /*
132 * Assert alignment and sizes.
133 */
134 AssertCompileMemberAlignment(VM, cpum.s, 32);
135 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
136 AssertCompileSizeAlignment(CPUMCTX, 64);
137 AssertCompileSizeAlignment(CPUMCTXMSR, 64);
138 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
139 AssertCompileMemberAlignment(VM, cpum, 64);
140 AssertCompileMemberAlignment(VM, aCpus, 64);
141 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
142 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
143
144 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
145 pVM->cpum.s.ulOffCPUMCPU = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
146 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.ulOffCPUMCPU == (uintptr_t)&pVM->aCpus[0].cpum);
147
148 /* Calculate the offset from CPUMCPU to CPUM. */
149 for (VMCPUID i = 0; i < pVM->cCpus; i++)
150 {
151 PVMCPU pVCpu = &pVM->aCpus[i];
152
153 /*
154 * Setup any fixed pointers and offsets.
155 */
156 pVCpu->cpum.s.pHyperCoreR3 = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
157 pVCpu->cpum.s.pHyperCoreR0 = VM_R0_ADDR(pVM, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper));
158
159 pVCpu->cpum.s.ulOffCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
160 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.ulOffCPUM == (uintptr_t)&pVM->cpum);
161 }
162
163 /*
164 * Check that the CPU supports the minimum features we require.
165 */
166 if (!ASMHasCpuId())
167 {
168 Log(("The CPU doesn't support CPUID!\n"));
169 return VERR_UNSUPPORTED_CPU;
170 }
171 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
172 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
173
174 /* Setup the CR4 AND and OR masks used in the switcher */
175 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
176 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
177 {
178 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
179 /* No FXSAVE implies no SSE */
180 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
181 pVM->cpum.s.CR4.OrMask = 0;
182 }
183 else
184 {
185 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
186 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
187 }
188
189 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
190 {
191 Log(("The CPU doesn't support MMX!\n"));
192 return VERR_UNSUPPORTED_CPU;
193 }
194 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
195 {
196 Log(("The CPU doesn't support TSC!\n"));
197 return VERR_UNSUPPORTED_CPU;
198 }
199 /* Bogus on AMD? */
200 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
201 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
202
203 /*
204 * Detech the host CPU vendor.
205 * (The guest CPU vendor is re-detected later on.)
206 */
207 uint32_t uEAX, uEBX, uECX, uEDX;
208 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
209 pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
210 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
211
212 /*
213 * Setup hypervisor startup values.
214 */
215
216 /*
217 * Register saved state data item.
218 */
219#ifdef VBOX_WITH_LIVE_MIGRATION
220 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
221 NULL, cpumR3LiveExec, NULL,
222 NULL, cpumR3SaveExec, NULL,
223 NULL, cpumR3LoadExec, NULL);
224#else
225 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
226 NULL, NULL, NULL,
227 NULL, cpumR3SaveExec, NULL,
228 NULL, cpumR3LoadExec, NULL);
229#endif
230 if (RT_FAILURE(rc))
231 return rc;
232
233 /*
234 * Register info handlers.
235 */
236 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
237 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
238 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
239 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
240 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
241 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
242
243 /*
244 * Initialize the Guest CPUID state.
245 */
246 rc = cpumR3CpuIdInit(pVM);
247 if (RT_FAILURE(rc))
248 return rc;
249 CPUMR3Reset(pVM);
250 return VINF_SUCCESS;
251}
252
253
254/**
255 * Initializes the per-VCPU CPUM.
256 *
257 * @returns VBox status code.
258 * @param pVM The VM to operate on.
259 */
260VMMR3DECL(int) CPUMR3InitCPU(PVM pVM)
261{
262 LogFlow(("CPUMR3InitCPU\n"));
263 return VINF_SUCCESS;
264}
265
266
267/**
268 * Detect the CPU vendor give n the
269 *
270 * @returns The vendor.
271 * @param uEAX EAX from CPUID(0).
272 * @param uEBX EBX from CPUID(0).
273 * @param uECX ECX from CPUID(0).
274 * @param uEDX EDX from CPUID(0).
275 */
276static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
277{
278 if ( uEAX >= 1
279 && uEBX == X86_CPUID_VENDOR_AMD_EBX
280 && uECX == X86_CPUID_VENDOR_AMD_ECX
281 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
282 return CPUMCPUVENDOR_AMD;
283
284 if ( uEAX >= 1
285 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
286 && uECX == X86_CPUID_VENDOR_INTEL_ECX
287 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
288 return CPUMCPUVENDOR_INTEL;
289
290 /** @todo detect the other buggers... */
291 return CPUMCPUVENDOR_UNKNOWN;
292}
293
294
295/**
296 * Fetches overrides for a CPUID leaf.
297 *
298 * @returns VBox status code.
299 * @param pLeaf The leaf to load the overrides into.
300 * @param pCfgNode The CFGM node containing the overrides
301 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
302 * @param iLeaf The CPUID leaf number.
303 */
304static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
305{
306 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
307 if (pLeafNode)
308 {
309 uint32_t u32;
310 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
311 if (RT_SUCCESS(rc))
312 pLeaf->eax = u32;
313 else
314 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
315
316 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
317 if (RT_SUCCESS(rc))
318 pLeaf->ebx = u32;
319 else
320 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
321
322 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
323 if (RT_SUCCESS(rc))
324 pLeaf->ecx = u32;
325 else
326 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
327
328 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
329 if (RT_SUCCESS(rc))
330 pLeaf->edx = u32;
331 else
332 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
333
334 }
335 return VINF_SUCCESS;
336}
337
338
339/**
340 * Load the overrides for a set of CPUID leafs.
341 *
342 * @returns VBox status code.
343 * @param paLeafs The leaf array.
344 * @param cLeafs The number of leafs.
345 * @param uStart The start leaf number.
346 * @param pCfgNode The CFGM node containing the overrides
347 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
348 */
349static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeafs, uint32_t cLeafs, PCFGMNODE pCfgNode)
350{
351 for (uint32_t i = 0; i < cLeafs; i++)
352 {
353 int rc = cpumR3CpuIdFetchLeafOverride(&paLeafs[i], pCfgNode, uStart + i);
354 if (RT_FAILURE(rc))
355 return rc;
356 }
357
358 return VINF_SUCCESS;
359}
360
361/**
362 * Init a set of host CPUID leafs.
363 *
364 * @returns VBox status code.
365 * @param paLeafs The leaf array.
366 * @param cLeafs The number of leafs.
367 * @param uStart The start leaf number.
368 * @param pCfgNode The /CPUM/HostCPUID/ node.
369 */
370static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeafs, uint32_t cLeafs, PCFGMNODE pCfgNode)
371{
372 /* Using the ECX variant for all of them can't hurt... */
373 for (uint32_t i = 0; i < cLeafs; i++)
374 ASMCpuId_Idx_ECX(uStart + i, 0, &paLeafs[i].eax, &paLeafs[i].ebx, &paLeafs[i].ecx, &paLeafs[i].edx);
375
376 /* Load CPUID leaf override; we currently don't care if the caller
377 specifies features the host CPU doesn't support. */
378 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeafs, cLeafs, pCfgNode);
379}
380
381
382/**
383 * Initializes the emulated CPU's cpuid information.
384 *
385 * @returns VBox status code.
386 * @param pVM The VM to operate on.
387 */
388static int cpumR3CpuIdInit(PVM pVM)
389{
390 PCPUM pCPUM = &pVM->cpum.s;
391 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
392 uint32_t i;
393 int rc;
394
395 /*
396 * Get the host CPUIDs and redetect the guest CPU vendor (could've been overridden).
397 */
398 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
399 * Overrides the host CPUID leaf values used for calculating the guest CPUID
400 * leafs. This can be used to preserve the CPUID values when moving a VM to
401 * a different machine. Another use is restricting (or extending) the
402 * feature set exposed to the guest. */
403 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
404 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg);
405 AssertRCReturn(rc, rc);
406 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg);
407 AssertRCReturn(rc, rc);
408 rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
409 AssertRCReturn(rc, rc);
410
411 pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
412 pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
413
414 /*
415 * Only report features we can support.
416 */
417 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
418 | X86_CPUID_FEATURE_EDX_VME
419 | X86_CPUID_FEATURE_EDX_DE
420 | X86_CPUID_FEATURE_EDX_PSE
421 | X86_CPUID_FEATURE_EDX_TSC
422 | X86_CPUID_FEATURE_EDX_MSR
423 //| X86_CPUID_FEATURE_EDX_PAE - not implemented yet.
424 | X86_CPUID_FEATURE_EDX_MCE
425 | X86_CPUID_FEATURE_EDX_CX8
426 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
427 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
428 //| X86_CPUID_FEATURE_EDX_SEP
429 | X86_CPUID_FEATURE_EDX_MTRR
430 | X86_CPUID_FEATURE_EDX_PGE
431 | X86_CPUID_FEATURE_EDX_MCA
432 | X86_CPUID_FEATURE_EDX_CMOV
433 | X86_CPUID_FEATURE_EDX_PAT
434 | X86_CPUID_FEATURE_EDX_PSE36
435 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
436 | X86_CPUID_FEATURE_EDX_CLFSH
437 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
438 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
439 | X86_CPUID_FEATURE_EDX_MMX
440 | X86_CPUID_FEATURE_EDX_FXSR
441 | X86_CPUID_FEATURE_EDX_SSE
442 | X86_CPUID_FEATURE_EDX_SSE2
443 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
444 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
445 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
446 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
447 | 0;
448 pCPUM->aGuestCpuIdStd[1].ecx &= 0
449 | X86_CPUID_FEATURE_ECX_SSE3
450 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
451 | ((pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
452 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
453 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
454 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
455 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
456 //| X86_CPUID_FEATURE_ECX_SSSE3 - no SSSE3 support
457 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
458 //| X86_CPUID_FEATURE_ECX_CX16 - no cmpxchg16b
459 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
460 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
461 /* ECX Bit 21 - x2APIC support - not yet. */
462 // | X86_CPUID_FEATURE_ECX_X2APIC
463 /* ECX Bit 23 - POPCOUNT instruction. */
464 //| X86_CPUID_FEATURE_ECX_POPCOUNT
465 | 0;
466
467 /* ASSUMES that this is ALWAYS the AMD define feature set if present. */
468 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
469 | X86_CPUID_AMD_FEATURE_EDX_VME
470 | X86_CPUID_AMD_FEATURE_EDX_DE
471 | X86_CPUID_AMD_FEATURE_EDX_PSE
472 | X86_CPUID_AMD_FEATURE_EDX_TSC
473 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
474 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
475 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
476 | X86_CPUID_AMD_FEATURE_EDX_CX8
477 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
478 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
479 //| X86_CPUID_AMD_FEATURE_EDX_SEP
480 | X86_CPUID_AMD_FEATURE_EDX_MTRR
481 | X86_CPUID_AMD_FEATURE_EDX_PGE
482 | X86_CPUID_AMD_FEATURE_EDX_MCA
483 | X86_CPUID_AMD_FEATURE_EDX_CMOV
484 | X86_CPUID_AMD_FEATURE_EDX_PAT
485 | X86_CPUID_AMD_FEATURE_EDX_PSE36
486 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
487 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
488 | X86_CPUID_AMD_FEATURE_EDX_MMX
489 | X86_CPUID_AMD_FEATURE_EDX_FXSR
490 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
491 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
492 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP - AMD only; turned on when necessary
493 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - turned on when necessary
494 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
495 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
496 | 0;
497 pCPUM->aGuestCpuIdExt[1].ecx &= 0
498 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
499 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
500 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
501 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
502 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
503 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
504 //| X86_CPUID_AMD_FEATURE_ECX_ABM
505 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
506 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
507 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
508 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
509 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
510 //| X86_CPUID_AMD_FEATURE_ECX_WDT
511 | 0;
512
513 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false); AssertRCReturn(rc, rc);
514 if (pCPUM->fSyntheticCpu)
515 {
516 const char szVendor[13] = "VirtualBox ";
517 const char szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */
518
519 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
520
521 /* Limit the nr of standard leaves; 5 for monitor/mwait */
522 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
523
524 /* 0: Vendor */
525 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)szVendor)[0];
526 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)szVendor)[2];
527 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)szVendor)[1];
528
529 /* 1.eax: Version information. family : model : stepping */
530 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
531
532 /* Leaves 2 - 4 are Intel only - zero them out */
533 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
534 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
535 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
536
537 /* Leaf 5 = monitor/mwait */
538
539 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
540 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
541 /* AMD only - set to zero. */
542 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
543
544 /* 0x800000001: AMD only; shared feature bits are set dynamically. */
545 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
546
547 /* 0x800000002-4: Processor Name String Identifier. */
548 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)szProcessor)[0];
549 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)szProcessor)[1];
550 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)szProcessor)[2];
551 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)szProcessor)[3];
552 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)szProcessor)[4];
553 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)szProcessor)[5];
554 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)szProcessor)[6];
555 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)szProcessor)[7];
556 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)szProcessor)[8];
557 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)szProcessor)[9];
558 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)szProcessor)[10];
559 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)szProcessor)[11];
560
561 /* 0x800000005-7 - reserved -> zero */
562 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
563 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
564 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
565
566 /* 0x800000008: only the max virtual and physical address size. */
567 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
568 }
569
570 /*
571 * Hide HTT, multicode, SMP, whatever.
572 * (APIC-ID := 0 and #LogCpus := 0)
573 */
574 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
575#ifdef VBOX_WITH_MULTI_CORE
576 if ( pVM->cCpus > 1
577 && pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC)
578 {
579 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
580 pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
581 pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
582 }
583#endif
584
585 /* Cpuid 2:
586 * Intel: Cache and TLB information
587 * AMD: Reserved
588 * Safe to expose
589 */
590
591 /* Cpuid 3:
592 * Intel: EAX, EBX - reserved
593 * ECX, EDX - Processor Serial Number if available, otherwise reserved
594 * AMD: Reserved
595 * Safe to expose
596 */
597 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
598 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
599
600 /* Cpuid 4:
601 * Intel: Deterministic Cache Parameters Leaf
602 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
603 * AMD: Reserved
604 * Safe to expose, except for EAX:
605 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
606 * Bits 31-26: Maximum number of processor cores in this physical package**
607 * Note: These SMP values are constant regardless of ECX
608 */
609 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
610 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
611#ifdef VBOX_WITH_MULTI_CORE
612 if ( pVM->cCpus > 1
613 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
614 {
615 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
616 /* One logical processor with possibly multiple cores. */
617 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
618 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
619 }
620#endif
621
622 /* Cpuid 5: Monitor/mwait Leaf
623 * Intel: ECX, EDX - reserved
624 * EAX, EBX - Smallest and largest monitor line size
625 * AMD: EDX - reserved
626 * EAX, EBX - Smallest and largest monitor line size
627 * ECX - extensions (ignored for now)
628 * Safe to expose
629 */
630 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
631 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
632
633 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
634
635 /*
636 * Determine the default.
637 *
638 * Intel returns values of the highest standard function, while AMD
639 * returns zeros. VIA on the other hand seems to returning nothing or
640 * perhaps some random garbage, we don't try to duplicate this behavior.
641 */
642 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10,
643 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
644 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
645
646 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
647 * Safe to pass on to the guest.
648 *
649 * Intel: 0x800000005 reserved
650 * 0x800000006 L2 cache information
651 * AMD: 0x800000005 L1 cache information
652 * 0x800000006 L2/L3 cache information
653 */
654
655 /* Cpuid 0x800000007:
656 * AMD: EAX, EBX, ECX - reserved
657 * EDX: Advanced Power Management Information
658 * Intel: Reserved
659 */
660 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
661 {
662 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
663
664 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
665
666 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
667 {
668 /* Only expose the TSC invariant capability bit to the guest. */
669 pCPUM->aGuestCpuIdExt[7].edx &= 0
670 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
671 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
672 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
673 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
674 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
675 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
676 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
677 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
678#if 1
679 /* We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer Linux kernels blindly assume
680 * that the AMD performance counters work if this is set for 64 bits guests. (can't really find a CPUID feature bit for them though)
681 */
682#else
683 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
684#endif
685 | 0;
686 }
687 else
688 pCPUM->aGuestCpuIdExt[7].edx = 0;
689 }
690
691 /* Cpuid 0x800000008:
692 * AMD: EBX, EDX - reserved
693 * EAX: Virtual/Physical address Size
694 * ECX: Number of cores + APICIdCoreIdSize
695 * Intel: EAX: Virtual/Physical address Size
696 * EBX, ECX, EDX - reserved
697 */
698 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
699 {
700 /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
701 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
702 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
703 * NC (0-7) Number of cores; 0 equals 1 core */
704 pCPUM->aGuestCpuIdExt[8].ecx = 0;
705#ifdef VBOX_WITH_MULTI_CORE
706 if ( pVM->cCpus > 1
707 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
708 {
709 /* Legacy method to determine the number of cores. */
710 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
711 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
712
713 }
714#endif
715 }
716
717 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
718 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
719 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
720 * This option corrsponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
721 */
722 bool fNt4LeafLimit;
723 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
724 if (fNt4LeafLimit)
725 pCPUM->aGuestCpuIdStd[0].eax = 3;
726
727 /*
728 * Limit it the number of entries and fill the remaining with the defaults.
729 *
730 * The limits are masking off stuff about power saving and similar, this
731 * is perhaps a bit crudely done as there is probably some relatively harmless
732 * info too in these leaves (like words about having a constant TSC).
733 */
734 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
735 pCPUM->aGuestCpuIdStd[0].eax = 5;
736
737 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
738 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
739
740 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
741 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
742 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
743 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
744 : 0;
745 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
746 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
747
748 /*
749 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestCpuIdDef:
750 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
751 * of processors from (cpuid(4).eax >> 26) + 1.
752 */
753 if (pVM->cCpus == 1)
754 pCPUM->aGuestCpuIdStd[4].eax = 0;
755
756 /*
757 * Centaur stuff (VIA).
758 *
759 * The important part here (we think) is to make sure the 0xc0000000
760 * function returns 0xc0000001. As for the features, we don't currently
761 * let on about any of those... 0xc0000002 seems to be some
762 * temperature/hz/++ stuff, include it as well (static).
763 */
764 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
765 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
766 {
767 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
768 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
769 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
770 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
771 i++)
772 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
773 }
774 else
775 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
776 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
777
778
779 /*
780 * Load CPUID overrides from configuration.
781 * Note: Kind of redundant now, but allows unchanged overrides
782 */
783 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
784 * Overrides the CPUID leaf values. */
785 PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
786 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg);
787 AssertRCReturn(rc, rc);
788 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg);
789 AssertRCReturn(rc, rc);
790 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
791 AssertRCReturn(rc, rc);
792
793 /*
794 * Check if PAE was explicitely enabled by the user.
795 */
796 bool fEnable;
797 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);
798 if (fEnable)
799 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
800
801 /*
802 * Log the cpuid and we're good.
803 */
804 RTCPUSET OnlineSet;
805 LogRel(("Logical host processors: %d, processor active mask: %016RX64\n",
806 (int)RTMpGetCount(), RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
807 LogRel(("************************* CPUID dump ************************\n"));
808 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
809 LogRel(("\n"));
810 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
811 LogRel(("******************** End of CPUID dump **********************\n"));
812 return VINF_SUCCESS;
813}
814
815
816
817
818/**
819 * Applies relocations to data and code managed by this
820 * component. This function will be called at init and
821 * whenever the VMM need to relocate it self inside the GC.
822 *
823 * The CPUM will update the addresses used by the switcher.
824 *
825 * @param pVM The VM.
826 */
827VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
828{
829 LogFlow(("CPUMR3Relocate\n"));
830 for (VMCPUID i = 0; i < pVM->cCpus; i++)
831 {
832 /*
833 * Switcher pointers.
834 */
835 PVMCPU pVCpu = &pVM->aCpus[i];
836 pVCpu->cpum.s.pHyperCoreRC = MMHyperCCToRC(pVM, pVCpu->cpum.s.pHyperCoreR3);
837 Assert(pVCpu->cpum.s.pHyperCoreRC != NIL_RTRCPTR);
838 }
839}
840
841
842/**
843 * Terminates the CPUM.
844 *
845 * Termination means cleaning up and freeing all resources,
846 * the VM it self is at this point powered off or suspended.
847 *
848 * @returns VBox status code.
849 * @param pVM The VM to operate on.
850 */
851VMMR3DECL(int) CPUMR3Term(PVM pVM)
852{
853 CPUMR3TermCPU(pVM);
854 return 0;
855}
856
857
858/**
859 * Terminates the per-VCPU CPUM.
860 *
861 * Termination means cleaning up and freeing all resources,
862 * the VM it self is at this point powered off or suspended.
863 *
864 * @returns VBox status code.
865 * @param pVM The VM to operate on.
866 */
867VMMR3DECL(int) CPUMR3TermCPU(PVM pVM)
868{
869#ifdef VBOX_WITH_CRASHDUMP_MAGIC
870 for (VMCPUID i = 0; i < pVM->cCpus; i++)
871 {
872 PVMCPU pVCpu = &pVM->aCpus[i];
873 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
874
875 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
876 pVCpu->cpum.s.uMagic = 0;
877 pCtx->dr[5] = 0;
878 }
879#endif
880 return 0;
881}
882
883VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
884{
885 /* @todo anything different for VCPU > 0? */
886 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
887
888 /*
889 * Initialize everything to ZERO first.
890 */
891 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
892 memset(pCtx, 0, sizeof(*pCtx));
893 pVCpu->cpum.s.fUseFlags = fUseFlags;
894
895 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
896 pCtx->eip = 0x0000fff0;
897 pCtx->edx = 0x00000600; /* P6 processor */
898 pCtx->eflags.Bits.u1Reserved0 = 1;
899
900 pCtx->cs = 0xf000;
901 pCtx->csHid.u64Base = UINT64_C(0xffff0000);
902 pCtx->csHid.u32Limit = 0x0000ffff;
903 pCtx->csHid.Attr.n.u1DescType = 1; /* code/data segment */
904 pCtx->csHid.Attr.n.u1Present = 1;
905 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
906
907 pCtx->dsHid.u32Limit = 0x0000ffff;
908 pCtx->dsHid.Attr.n.u1DescType = 1; /* code/data segment */
909 pCtx->dsHid.Attr.n.u1Present = 1;
910 pCtx->dsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
911
912 pCtx->esHid.u32Limit = 0x0000ffff;
913 pCtx->esHid.Attr.n.u1DescType = 1; /* code/data segment */
914 pCtx->esHid.Attr.n.u1Present = 1;
915 pCtx->esHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
916
917 pCtx->fsHid.u32Limit = 0x0000ffff;
918 pCtx->fsHid.Attr.n.u1DescType = 1; /* code/data segment */
919 pCtx->fsHid.Attr.n.u1Present = 1;
920 pCtx->fsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
921
922 pCtx->gsHid.u32Limit = 0x0000ffff;
923 pCtx->gsHid.Attr.n.u1DescType = 1; /* code/data segment */
924 pCtx->gsHid.Attr.n.u1Present = 1;
925 pCtx->gsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
926
927 pCtx->ssHid.u32Limit = 0x0000ffff;
928 pCtx->ssHid.Attr.n.u1Present = 1;
929 pCtx->ssHid.Attr.n.u1DescType = 1; /* code/data segment */
930 pCtx->ssHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
931
932 pCtx->idtr.cbIdt = 0xffff;
933 pCtx->gdtr.cbGdt = 0xffff;
934
935 pCtx->ldtrHid.u32Limit = 0xffff;
936 pCtx->ldtrHid.Attr.n.u1Present = 1;
937 pCtx->ldtrHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
938
939 pCtx->trHid.u32Limit = 0xffff;
940 pCtx->trHid.Attr.n.u1Present = 1;
941 pCtx->trHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
942
943 pCtx->dr[6] = X86_DR6_INIT_VAL;
944 pCtx->dr[7] = X86_DR7_INIT_VAL;
945
946 pCtx->fpu.FTW = 0xff; /* All tags are set, i.e. the regs are empty. */
947 pCtx->fpu.FCW = 0x37f;
948
949 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1. IA-32 Processor States Following Power-up, Reset, or INIT */
950 pCtx->fpu.MXCSR = 0x1F80;
951
952 /* Init PAT MSR */
953 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
954
955 /* Reset EFER; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State
956 * The Intel docs don't mention it.
957 */
958 pCtx->msrEFER = 0;
959}
960
961/**
962 * Resets the CPU.
963 *
964 * @returns VINF_SUCCESS.
965 * @param pVM The VM handle.
966 */
967VMMR3DECL(void) CPUMR3Reset(PVM pVM)
968{
969 for (VMCPUID i = 0; i < pVM->cCpus; i++)
970 {
971 CPUMR3ResetCpu(&pVM->aCpus[i]);
972
973#ifdef VBOX_WITH_CRASHDUMP_MAGIC
974 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(&pVM->aCpus[i]);
975
976 /* Magic marker for searching in crash dumps. */
977 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
978 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
979 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
980#endif
981 }
982}
983
984#ifdef VBOX_WITH_LIVE_MIGRATION
985
986/**
987 * Called both in pass 0 and the final pass.
988 *
989 * @param pVM The VM handle.
990 * @param pSSM The saved state handle.
991 */
992static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
993{
994 /*
995 * Save all the CPU ID leaves here so we can check them for compatability
996 * upon loading.
997 */
998 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
999 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
1000
1001 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
1002 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1003
1004 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
1005 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1006
1007 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1008
1009 /*
1010 * Save a good portion of the raw CPU IDs as well as they may come in
1011 * handy when validating features for raw mode.
1012 */
1013 CPUMCPUID aRawStd[8];
1014 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
1015 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1016 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
1017 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
1018
1019 CPUMCPUID aRawExt[16];
1020 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
1021 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1022 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
1023 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
1024}
1025
1026
1027/**
1028 * Loads the CPU ID leaves saved by pass 0.
1029 *
1030 * @returns VBox status code.
1031 * @param pVM The VM handle.
1032 * @param pSSM The saved state handle.
1033 * @param uVersion The format version.
1034 */
1035static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1036{
1037 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
1038
1039 /*
1040 * Load them into stack buffers first.
1041 */
1042 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
1043 uint32_t cGuestCpuIdStd;
1044 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
1045 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
1046 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1047 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
1048
1049 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
1050 uint32_t cGuestCpuIdExt;
1051 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
1052 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
1053 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1054 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
1055
1056 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
1057 uint32_t cGuestCpuIdCentaur;
1058 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
1059 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
1060 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1061 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
1062
1063 CPUMCPUID GuestCpuIdDef;
1064 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
1065 AssertRCReturn(rc, rc);
1066
1067 CPUMCPUID aRawStd[8];
1068 uint32_t cRawStd;
1069 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
1070 if (cRawStd > RT_ELEMENTS(aRawStd))
1071 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1072 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
1073
1074 CPUMCPUID aRawExt[16];
1075 uint32_t cRawExt;
1076 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
1077 if (cRawExt > RT_ELEMENTS(aRawExt))
1078 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1079 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
1080 AssertRCReturn(rc, rc);
1081
1082 /*
1083 * Note that we support restoring less than the current amount of standard
1084 * leaves because we've been allowed more is newer version of VBox.
1085 *
1086 * So, pad new entries with the default.
1087 */
1088 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
1089 aGuestCpuIdStd[i] = GuestCpuIdDef;
1090
1091 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
1092 aGuestCpuIdExt[i] = GuestCpuIdDef;
1093
1094 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
1095 aGuestCpuIdCentaur[i] = GuestCpuIdDef;
1096
1097 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
1098 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1099
1100 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
1101 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1102
1103 /*
1104 * Get the raw CPU IDs for the current host.
1105 */
1106 CPUMCPUID aHostRawStd[8];
1107 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
1108 ASMCpuId(i, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
1109
1110 CPUMCPUID aHostRawExt[16];
1111 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
1112 ASMCpuId(i | UINT32_C(0x80000000), &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
1113
1114 /*
1115 * Now for the fun part...
1116 */
1117
1118
1119 /*
1120 * We're good, commit the CPU ID leaves.
1121 */
1122 memcmp(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd));
1123 memcmp(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt));
1124 memcmp(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
1125 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
1126
1127 return VINF_SUCCESS;
1128}
1129
1130
1131/**
1132 * Pass 0 live exec callback.
1133 *
1134 * @returns VINF_SSM_DONT_CALL_AGAIN.
1135 * @param pVM The VM handle.
1136 * @param pSSM The saved state handle.
1137 * @param uPass The pass (0).
1138 */
1139static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1140{
1141 AssertReturn(uPass == 0, VERR_INTERNAL_ERROR_4);
1142 cpumR3SaveCpuId(pVM, pSSM);
1143 return VINF_SSM_DONT_CALL_AGAIN;
1144}
1145
1146#endif /* VBOX_WITH_LIVE_MIGRATION */
1147
1148/**
1149 * Execute state save operation.
1150 *
1151 * @returns VBox status code.
1152 * @param pVM VM Handle.
1153 * @param pSSM SSM operation handle.
1154 */
1155static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
1156{
1157 /*
1158 * Save.
1159 */
1160 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1161 {
1162 PVMCPU pVCpu = &pVM->aCpus[i];
1163
1164 SSMR3PutMem(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper));
1165 }
1166
1167 SSMR3PutU32(pSSM, pVM->cCpus);
1168 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1169 {
1170 PVMCPU pVCpu = &pVM->aCpus[i];
1171
1172 SSMR3PutMem(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest));
1173 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
1174 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
1175 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsr, sizeof(pVCpu->cpum.s.GuestMsr));
1176 }
1177
1178#ifdef VBOX_WITH_LIVE_MIGRATION
1179 cpumR3SaveCpuId(pVM, pSSM);
1180 return VINF_SUCCESS;
1181#else
1182
1183 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
1184 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
1185
1186 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
1187 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1188
1189 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
1190 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1191
1192 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1193
1194 /* Add the cpuid for checking that the cpu is unchanged. */
1195 uint32_t au32CpuId[8] = {0};
1196 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1197 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1198 return SSMR3PutMem(pSSM, &au32CpuId[0], sizeof(au32CpuId));
1199#endif
1200}
1201
1202
1203/**
1204 * Load a version 1.6 CPUMCTX structure.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM VM Handle.
1208 * @param pCpumctx16 Version 1.6 CPUMCTX
1209 */
1210static void cpumR3LoadCPUM1_6(PVM pVM, CPUMCTX_VER1_6 *pCpumctx16)
1211{
1212#define CPUMCTX16_LOADREG(RegName) \
1213 pVM->aCpus[0].cpum.s.Guest.RegName = pCpumctx16->RegName;
1214
1215#define CPUMCTX16_LOADDRXREG(RegName) \
1216 pVM->aCpus[0].cpum.s.Guest.dr[RegName] = pCpumctx16->dr##RegName;
1217
1218#define CPUMCTX16_LOADHIDREG(RegName) \
1219 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.u64Base = pCpumctx16->RegName##Hid.u32Base; \
1220 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.u32Limit = pCpumctx16->RegName##Hid.u32Limit; \
1221 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.Attr = pCpumctx16->RegName##Hid.Attr;
1222
1223#define CPUMCTX16_LOADSEGREG(RegName) \
1224 pVM->aCpus[0].cpum.s.Guest.RegName = pCpumctx16->RegName; \
1225 CPUMCTX16_LOADHIDREG(RegName);
1226
1227 pVM->aCpus[0].cpum.s.Guest.fpu = pCpumctx16->fpu;
1228
1229 CPUMCTX16_LOADREG(rax);
1230 CPUMCTX16_LOADREG(rbx);
1231 CPUMCTX16_LOADREG(rcx);
1232 CPUMCTX16_LOADREG(rdx);
1233 CPUMCTX16_LOADREG(rdi);
1234 CPUMCTX16_LOADREG(rsi);
1235 CPUMCTX16_LOADREG(rbp);
1236 CPUMCTX16_LOADREG(esp);
1237 CPUMCTX16_LOADREG(rip);
1238 CPUMCTX16_LOADREG(rflags);
1239
1240 CPUMCTX16_LOADSEGREG(cs);
1241 CPUMCTX16_LOADSEGREG(ds);
1242 CPUMCTX16_LOADSEGREG(es);
1243 CPUMCTX16_LOADSEGREG(fs);
1244 CPUMCTX16_LOADSEGREG(gs);
1245 CPUMCTX16_LOADSEGREG(ss);
1246
1247 CPUMCTX16_LOADREG(r8);
1248 CPUMCTX16_LOADREG(r9);
1249 CPUMCTX16_LOADREG(r10);
1250 CPUMCTX16_LOADREG(r11);
1251 CPUMCTX16_LOADREG(r12);
1252 CPUMCTX16_LOADREG(r13);
1253 CPUMCTX16_LOADREG(r14);
1254 CPUMCTX16_LOADREG(r15);
1255
1256 CPUMCTX16_LOADREG(cr0);
1257 CPUMCTX16_LOADREG(cr2);
1258 CPUMCTX16_LOADREG(cr3);
1259 CPUMCTX16_LOADREG(cr4);
1260
1261 CPUMCTX16_LOADDRXREG(0);
1262 CPUMCTX16_LOADDRXREG(1);
1263 CPUMCTX16_LOADDRXREG(2);
1264 CPUMCTX16_LOADDRXREG(3);
1265 CPUMCTX16_LOADDRXREG(4);
1266 CPUMCTX16_LOADDRXREG(5);
1267 CPUMCTX16_LOADDRXREG(6);
1268 CPUMCTX16_LOADDRXREG(7);
1269
1270 pVM->aCpus[0].cpum.s.Guest.gdtr.cbGdt = pCpumctx16->gdtr.cbGdt;
1271 pVM->aCpus[0].cpum.s.Guest.gdtr.pGdt = pCpumctx16->gdtr.pGdt;
1272 pVM->aCpus[0].cpum.s.Guest.idtr.cbIdt = pCpumctx16->idtr.cbIdt;
1273 pVM->aCpus[0].cpum.s.Guest.idtr.pIdt = pCpumctx16->idtr.pIdt;
1274
1275 CPUMCTX16_LOADREG(ldtr);
1276 CPUMCTX16_LOADREG(tr);
1277
1278 pVM->aCpus[0].cpum.s.Guest.SysEnter = pCpumctx16->SysEnter;
1279
1280 CPUMCTX16_LOADREG(msrEFER);
1281 CPUMCTX16_LOADREG(msrSTAR);
1282 CPUMCTX16_LOADREG(msrPAT);
1283 CPUMCTX16_LOADREG(msrLSTAR);
1284 CPUMCTX16_LOADREG(msrCSTAR);
1285 CPUMCTX16_LOADREG(msrSFMASK);
1286 CPUMCTX16_LOADREG(msrKERNELGSBASE);
1287
1288 CPUMCTX16_LOADHIDREG(ldtr);
1289 CPUMCTX16_LOADHIDREG(tr);
1290
1291#undef CPUMCTX16_LOADSEGREG
1292#undef CPUMCTX16_LOADHIDREG
1293#undef CPUMCTX16_LOADDRXREG
1294#undef CPUMCTX16_LOADREG
1295}
1296
1297
1298/**
1299 * Execute state load operation.
1300 *
1301 * @returns VBox status code.
1302 * @param pVM VM Handle.
1303 * @param pSSM SSM operation handle.
1304 * @param uVersion Data layout version.
1305 * @param uPass The data pass.
1306 */
1307static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1308{
1309 /*
1310 * Validate version.
1311 */
1312 if ( uVersion != CPUM_SAVED_STATE_VERSION
1313 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
1314 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
1315 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1316 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
1317 {
1318 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
1319 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1320 }
1321
1322 if (uPass == SSM_PASS_FINAL)
1323 {
1324 /*
1325 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
1326 * really old SSM file versions.)
1327 */
1328 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1329 SSMR3SetGCPtrSize(pSSM, sizeof(RTGCPTR32));
1330 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
1331 SSMR3SetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
1332
1333 /*
1334 * Restore.
1335 */
1336 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1337 {
1338 PVMCPU pVCpu = &pVM->aCpus[i];
1339 uint32_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
1340 uint32_t uESP = pVCpu->cpum.s.Hyper.esp; /* see VMMR3Relocate(). */
1341
1342 SSMR3GetMem(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper));
1343 pVCpu->cpum.s.Hyper.cr3 = uCR3;
1344 pVCpu->cpum.s.Hyper.esp = uESP;
1345 }
1346
1347 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1348 {
1349 CPUMCTX_VER1_6 cpumctx16;
1350 memset(&pVM->aCpus[0].cpum.s.Guest, 0, sizeof(pVM->aCpus[0].cpum.s.Guest));
1351 SSMR3GetMem(pSSM, &cpumctx16, sizeof(cpumctx16));
1352
1353 /* Save the old cpumctx state into the new one. */
1354 cpumR3LoadCPUM1_6(pVM, &cpumctx16);
1355
1356 SSMR3GetU32(pSSM, &pVM->aCpus[0].cpum.s.fUseFlags);
1357 SSMR3GetU32(pSSM, &pVM->aCpus[0].cpum.s.fChanged);
1358 }
1359 else
1360 {
1361 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
1362 {
1363 uint32_t cCpus;
1364 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
1365 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
1366 VERR_SSM_UNEXPECTED_DATA);
1367 }
1368 AssertLogRelMsgReturn( uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1369 || pVM->cCpus == 1,
1370 ("cCpus=%u\n", pVM->cCpus),
1371 VERR_SSM_UNEXPECTED_DATA);
1372
1373 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1374 {
1375 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.Guest, sizeof(pVM->aCpus[i].cpum.s.Guest));
1376 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fUseFlags);
1377 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fChanged);
1378 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
1379 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.GuestMsr, sizeof(pVM->aCpus[i].cpum.s.GuestMsr));
1380 }
1381 }
1382 }
1383
1384#ifdef VBOX_WITH_LIVE_MIGRATION
1385 /*
1386 * Guest CPU config and CPUID.
1387 */
1388 /** @todo config. */
1389
1390 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
1391 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
1392
1393 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
1394 * actually required. */
1395#endif
1396
1397 /*
1398 * Restore the CPUID leaves.
1399 *
1400 * Note that we support restoring less than the current amount of standard
1401 * leaves because we've been allowed more is newer version of VBox.
1402 */
1403 uint32_t cElements;
1404 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1405 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1406 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1407 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
1408
1409 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1410 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1411 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1412 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1413
1414 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1415 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1416 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1417 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1418
1419 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1420
1421 /*
1422 * Check that the basic cpuid id information is unchanged.
1423 */
1424 /** @todo we should check the 64 bits capabilities too! */
1425 uint32_t au32CpuId[8] = {0};
1426 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1427 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1428 uint32_t au32CpuIdSaved[8];
1429 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
1430 if (RT_SUCCESS(rc))
1431 {
1432 /* Ignore CPU stepping. */
1433 au32CpuId[4] &= 0xfffffff0;
1434 au32CpuIdSaved[4] &= 0xfffffff0;
1435
1436 /* Ignore APIC ID (AMD specs). */
1437 au32CpuId[5] &= ~0xff000000;
1438 au32CpuIdSaved[5] &= ~0xff000000;
1439
1440 /* Ignore the number of Logical CPUs (AMD specs). */
1441 au32CpuId[5] &= ~0x00ff0000;
1442 au32CpuIdSaved[5] &= ~0x00ff0000;
1443
1444 /* Ignore some advanced capability bits, that we don't expose to the guest. */
1445 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1446 | X86_CPUID_FEATURE_ECX_VMX
1447 | X86_CPUID_FEATURE_ECX_SMX
1448 | X86_CPUID_FEATURE_ECX_EST
1449 | X86_CPUID_FEATURE_ECX_TM2
1450 | X86_CPUID_FEATURE_ECX_CNTXID
1451 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1452 | X86_CPUID_FEATURE_ECX_PDCM
1453 | X86_CPUID_FEATURE_ECX_DCA
1454 | X86_CPUID_FEATURE_ECX_X2APIC
1455 );
1456 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1457 | X86_CPUID_FEATURE_ECX_VMX
1458 | X86_CPUID_FEATURE_ECX_SMX
1459 | X86_CPUID_FEATURE_ECX_EST
1460 | X86_CPUID_FEATURE_ECX_TM2
1461 | X86_CPUID_FEATURE_ECX_CNTXID
1462 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1463 | X86_CPUID_FEATURE_ECX_PDCM
1464 | X86_CPUID_FEATURE_ECX_DCA
1465 | X86_CPUID_FEATURE_ECX_X2APIC
1466 );
1467
1468 /* Make sure we don't forget to update the masks when enabling
1469 * features in the future.
1470 */
1471 AssertRelease(!(pVM->cpum.s.aGuestCpuIdStd[1].ecx &
1472 ( X86_CPUID_FEATURE_ECX_DTES64
1473 | X86_CPUID_FEATURE_ECX_VMX
1474 | X86_CPUID_FEATURE_ECX_SMX
1475 | X86_CPUID_FEATURE_ECX_EST
1476 | X86_CPUID_FEATURE_ECX_TM2
1477 | X86_CPUID_FEATURE_ECX_CNTXID
1478 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1479 | X86_CPUID_FEATURE_ECX_PDCM
1480 | X86_CPUID_FEATURE_ECX_DCA
1481 | X86_CPUID_FEATURE_ECX_X2APIC
1482 )));
1483 /* do the compare */
1484 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
1485 {
1486 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1487 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
1488 "Saved=%.*Rhxs\n"
1489 "Real =%.*Rhxs\n",
1490 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1491 sizeof(au32CpuId), au32CpuId));
1492 else
1493 {
1494 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
1495 "Saved=%.*Rhxs\n"
1496 "Real =%.*Rhxs\n",
1497 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1498 sizeof(au32CpuId), au32CpuId));
1499 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
1500 }
1501 }
1502 }
1503
1504 return rc;
1505}
1506
1507
1508/**
1509 * Formats the EFLAGS value into mnemonics.
1510 *
1511 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
1512 * @param efl The EFLAGS value.
1513 */
1514static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
1515{
1516 /*
1517 * Format the flags.
1518 */
1519 static const struct
1520 {
1521 const char *pszSet; const char *pszClear; uint32_t fFlag;
1522 } s_aFlags[] =
1523 {
1524 { "vip",NULL, X86_EFL_VIP },
1525 { "vif",NULL, X86_EFL_VIF },
1526 { "ac", NULL, X86_EFL_AC },
1527 { "vm", NULL, X86_EFL_VM },
1528 { "rf", NULL, X86_EFL_RF },
1529 { "nt", NULL, X86_EFL_NT },
1530 { "ov", "nv", X86_EFL_OF },
1531 { "dn", "up", X86_EFL_DF },
1532 { "ei", "di", X86_EFL_IF },
1533 { "tf", NULL, X86_EFL_TF },
1534 { "nt", "pl", X86_EFL_SF },
1535 { "nz", "zr", X86_EFL_ZF },
1536 { "ac", "na", X86_EFL_AF },
1537 { "po", "pe", X86_EFL_PF },
1538 { "cy", "nc", X86_EFL_CF },
1539 };
1540 char *psz = pszEFlags;
1541 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1542 {
1543 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1544 if (pszAdd)
1545 {
1546 strcpy(psz, pszAdd);
1547 psz += strlen(pszAdd);
1548 *psz++ = ' ';
1549 }
1550 }
1551 psz[-1] = '\0';
1552}
1553
1554
1555/**
1556 * Formats a full register dump.
1557 *
1558 * @param pVM VM Handle.
1559 * @param pCtx The context to format.
1560 * @param pCtxCore The context core to format.
1561 * @param pHlp Output functions.
1562 * @param enmType The dump type.
1563 * @param pszPrefix Register name prefix.
1564 */
1565static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
1566{
1567 /*
1568 * Format the EFLAGS.
1569 */
1570 uint32_t efl = pCtxCore->eflags.u32;
1571 char szEFlags[80];
1572 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1573
1574 /*
1575 * Format the registers.
1576 */
1577 switch (enmType)
1578 {
1579 case CPUMDUMPTYPE_TERSE:
1580 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1581 pHlp->pfnPrintf(pHlp,
1582 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1583 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1584 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1585 "%sr14=%016RX64 %sr15=%016RX64\n"
1586 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1587 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1588 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1589 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1590 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1591 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1592 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1593 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1594 else
1595 pHlp->pfnPrintf(pHlp,
1596 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1597 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1598 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1599 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1600 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1601 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1602 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1603 break;
1604
1605 case CPUMDUMPTYPE_DEFAULT:
1606 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1607 pHlp->pfnPrintf(pHlp,
1608 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1609 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1610 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1611 "%sr14=%016RX64 %sr15=%016RX64\n"
1612 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1613 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1614 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
1615 ,
1616 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1617 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1618 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1619 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1620 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1621 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1622 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1623 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1624 else
1625 pHlp->pfnPrintf(pHlp,
1626 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1627 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1628 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1629 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
1630 ,
1631 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1632 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1633 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1634 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1635 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1636 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1637 break;
1638
1639 case CPUMDUMPTYPE_VERBOSE:
1640 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1641 pHlp->pfnPrintf(pHlp,
1642 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1643 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1644 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1645 "%sr14=%016RX64 %sr15=%016RX64\n"
1646 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1647 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1648 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1649 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1650 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1651 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1652 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1653 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1654 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1655 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1656 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1657 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1658 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1659 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1660 ,
1661 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1662 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1663 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1664 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1665 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1666 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1667 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1668 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1669 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1670 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1671 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1672 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1673 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1674 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1675 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1676 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1677 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1678 else
1679 pHlp->pfnPrintf(pHlp,
1680 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1681 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1682 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1683 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1684 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1685 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1686 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1687 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1688 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1689 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1690 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1691 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1692 ,
1693 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1694 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1695 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
1696 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1697 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
1698 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1699 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1700 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1701 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1702 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1703 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1704 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1705
1706 pHlp->pfnPrintf(pHlp,
1707 "FPU:\n"
1708 "%sFCW=%04x %sFSW=%04x %sFTW=%02x\n"
1709 "%sres1=%02x %sFOP=%04x %sFPUIP=%08x %sCS=%04x %sRsvrd1=%04x\n"
1710 "%sFPUDP=%04x %sDS=%04x %sRsvrd2=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1711 ,
1712 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW,
1713 pszPrefix, pCtx->fpu.huh1, pszPrefix, pCtx->fpu.FOP, pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsvrd1,
1714 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2,
1715 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK);
1716
1717 pHlp->pfnPrintf(pHlp,
1718 "MSR:\n"
1719 "%sEFER =%016RX64\n"
1720 "%sPAT =%016RX64\n"
1721 "%sSTAR =%016RX64\n"
1722 "%sCSTAR =%016RX64\n"
1723 "%sLSTAR =%016RX64\n"
1724 "%sSFMASK =%016RX64\n"
1725 "%sKERNELGSBASE =%016RX64\n",
1726 pszPrefix, pCtx->msrEFER,
1727 pszPrefix, pCtx->msrPAT,
1728 pszPrefix, pCtx->msrSTAR,
1729 pszPrefix, pCtx->msrCSTAR,
1730 pszPrefix, pCtx->msrLSTAR,
1731 pszPrefix, pCtx->msrSFMASK,
1732 pszPrefix, pCtx->msrKERNELGSBASE);
1733 break;
1734 }
1735}
1736
1737
1738/**
1739 * Display all cpu states and any other cpum info.
1740 *
1741 * @param pVM VM Handle.
1742 * @param pHlp The info helper functions.
1743 * @param pszArgs Arguments, ignored.
1744 */
1745static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1746{
1747 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1748 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1749 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1750 cpumR3InfoHost(pVM, pHlp, pszArgs);
1751}
1752
1753
1754/**
1755 * Parses the info argument.
1756 *
1757 * The argument starts with 'verbose', 'terse' or 'default' and then
1758 * continues with the comment string.
1759 *
1760 * @param pszArgs The pointer to the argument string.
1761 * @param penmType Where to store the dump type request.
1762 * @param ppszComment Where to store the pointer to the comment string.
1763 */
1764static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1765{
1766 if (!pszArgs)
1767 {
1768 *penmType = CPUMDUMPTYPE_DEFAULT;
1769 *ppszComment = "";
1770 }
1771 else
1772 {
1773 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
1774 {
1775 pszArgs += 5;
1776 *penmType = CPUMDUMPTYPE_VERBOSE;
1777 }
1778 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
1779 {
1780 pszArgs += 5;
1781 *penmType = CPUMDUMPTYPE_TERSE;
1782 }
1783 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
1784 {
1785 pszArgs += 7;
1786 *penmType = CPUMDUMPTYPE_DEFAULT;
1787 }
1788 else
1789 *penmType = CPUMDUMPTYPE_DEFAULT;
1790 *ppszComment = RTStrStripL(pszArgs);
1791 }
1792}
1793
1794
1795/**
1796 * Display the guest cpu state.
1797 *
1798 * @param pVM VM Handle.
1799 * @param pHlp The info helper functions.
1800 * @param pszArgs Arguments, ignored.
1801 */
1802static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1803{
1804 CPUMDUMPTYPE enmType;
1805 const char *pszComment;
1806 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1807
1808 /* @todo SMP support! */
1809 PVMCPU pVCpu = VMMGetCpu(pVM);
1810 if (!pVCpu)
1811 pVCpu = &pVM->aCpus[0];
1812
1813 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1814
1815 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1816 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
1817}
1818
1819
1820/**
1821 * Display the current guest instruction
1822 *
1823 * @param pVM VM Handle.
1824 * @param pHlp The info helper functions.
1825 * @param pszArgs Arguments, ignored.
1826 */
1827static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1828{
1829 char szInstruction[256];
1830 /* @todo SMP support! */
1831 PVMCPU pVCpu = VMMGetCpu(pVM);
1832 if (!pVCpu)
1833 pVCpu = &pVM->aCpus[0];
1834
1835 int rc = DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1836 if (RT_SUCCESS(rc))
1837 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1838}
1839
1840
1841/**
1842 * Display the hypervisor cpu state.
1843 *
1844 * @param pVM VM Handle.
1845 * @param pHlp The info helper functions.
1846 * @param pszArgs Arguments, ignored.
1847 */
1848static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1849{
1850 CPUMDUMPTYPE enmType;
1851 const char *pszComment;
1852 /* @todo SMP */
1853 PVMCPU pVCpu = &pVM->aCpus[0];
1854
1855 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1856 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1857 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, pVCpu->cpum.s.pHyperCoreR3, pHlp, enmType, ".");
1858 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1859}
1860
1861
1862/**
1863 * Display the host cpu state.
1864 *
1865 * @param pVM VM Handle.
1866 * @param pHlp The info helper functions.
1867 * @param pszArgs Arguments, ignored.
1868 */
1869static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1870{
1871 CPUMDUMPTYPE enmType;
1872 const char *pszComment;
1873 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1874 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1875
1876 /*
1877 * Format the EFLAGS.
1878 */
1879 /* @todo SMP */
1880 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
1881#if HC_ARCH_BITS == 32
1882 uint32_t efl = pCtx->eflags.u32;
1883#else
1884 uint64_t efl = pCtx->rflags;
1885#endif
1886 char szEFlags[80];
1887 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1888
1889 /*
1890 * Format the registers.
1891 */
1892#if HC_ARCH_BITS == 32
1893# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1894 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1895# endif
1896 {
1897 pHlp->pfnPrintf(pHlp,
1898 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1899 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1900 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1901 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1902 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
1903 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1904 ,
1905 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1906 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1907 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1908 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1909 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1910 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr,
1911 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1912 }
1913# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1914 else
1915# endif
1916#endif
1917#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1918 {
1919 pHlp->pfnPrintf(pHlp,
1920 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1921 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1922 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1923 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1924 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1925 "r14=%016RX64 r15=%016RX64\n"
1926 "iopl=%d %31s\n"
1927 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1928 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1929 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1930 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
1931 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
1932 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1933 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1934 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1935 ,
1936 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1937 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1938 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1939 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1940 pCtx->r11, pCtx->r12, pCtx->r13,
1941 pCtx->r14, pCtx->r15,
1942 X86_EFL_GET_IOPL(efl), szEFlags,
1943 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1944 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1945 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1946 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1947 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1948 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1949 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1950 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1951 }
1952#endif
1953}
1954
1955
1956/**
1957 * Get L1 cache / TLS associativity.
1958 */
1959static const char *getCacheAss(unsigned u, char *pszBuf)
1960{
1961 if (u == 0)
1962 return "res0 ";
1963 if (u == 1)
1964 return "direct";
1965 if (u >= 256)
1966 return "???";
1967
1968 RTStrPrintf(pszBuf, 16, "%d way", u);
1969 return pszBuf;
1970}
1971
1972
1973/**
1974 * Get L2 cache soociativity.
1975 */
1976const char *getL2CacheAss(unsigned u)
1977{
1978 switch (u)
1979 {
1980 case 0: return "off ";
1981 case 1: return "direct";
1982 case 2: return "2 way ";
1983 case 3: return "res3 ";
1984 case 4: return "4 way ";
1985 case 5: return "res5 ";
1986 case 6: return "8 way "; case 7: return "res7 ";
1987 case 8: return "16 way";
1988 case 9: return "res9 ";
1989 case 10: return "res10 ";
1990 case 11: return "res11 ";
1991 case 12: return "res12 ";
1992 case 13: return "res13 ";
1993 case 14: return "res14 ";
1994 case 15: return "fully ";
1995 default:
1996 return "????";
1997 }
1998}
1999
2000
2001/**
2002 * Display the guest CpuId leaves.
2003 *
2004 * @param pVM VM Handle.
2005 * @param pHlp The info helper functions.
2006 * @param pszArgs "terse", "default" or "verbose".
2007 */
2008static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2009{
2010 /*
2011 * Parse the argument.
2012 */
2013 unsigned iVerbosity = 1;
2014 if (pszArgs)
2015 {
2016 pszArgs = RTStrStripL(pszArgs);
2017 if (!strcmp(pszArgs, "terse"))
2018 iVerbosity--;
2019 else if (!strcmp(pszArgs, "verbose"))
2020 iVerbosity++;
2021 }
2022
2023 /*
2024 * Start cracking.
2025 */
2026 CPUMCPUID Host;
2027 CPUMCPUID Guest;
2028 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
2029
2030 pHlp->pfnPrintf(pHlp,
2031 " RAW Standard CPUIDs\n"
2032 " Function eax ebx ecx edx\n");
2033 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
2034 {
2035 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
2036 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2037
2038 pHlp->pfnPrintf(pHlp,
2039 "Gst: %08x %08x %08x %08x %08x%s\n"
2040 "Hst: %08x %08x %08x %08x\n",
2041 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2042 i <= cStdMax ? "" : "*",
2043 Host.eax, Host.ebx, Host.ecx, Host.edx);
2044 }
2045
2046 /*
2047 * If verbose, decode it.
2048 */
2049 if (iVerbosity)
2050 {
2051 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
2052 pHlp->pfnPrintf(pHlp,
2053 "Name: %.04s%.04s%.04s\n"
2054 "Supports: 0-%x\n",
2055 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
2056 }
2057
2058 /*
2059 * Get Features.
2060 */
2061 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
2062 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
2063 pVM->cpum.s.aGuestCpuIdStd[0].edx);
2064 if (cStdMax >= 1 && iVerbosity)
2065 {
2066 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
2067 uint32_t uEAX = Guest.eax;
2068
2069 pHlp->pfnPrintf(pHlp,
2070 "Family: %d \tExtended: %d \tEffective: %d\n"
2071 "Model: %d \tExtended: %d \tEffective: %d\n"
2072 "Stepping: %d\n"
2073 "APIC ID: %#04x\n"
2074 "Logical CPUs: %d\n"
2075 "CLFLUSH Size: %d\n"
2076 "Brand ID: %#04x\n",
2077 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
2078 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
2079 ASMGetCpuStepping(uEAX),
2080 (Guest.ebx >> 24) & 0xff,
2081 (Guest.ebx >> 16) & 0xff,
2082 (Guest.ebx >> 8) & 0xff,
2083 (Guest.ebx >> 0) & 0xff);
2084 if (iVerbosity == 1)
2085 {
2086 uint32_t uEDX = Guest.edx;
2087 pHlp->pfnPrintf(pHlp, "Features EDX: ");
2088 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
2089 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
2090 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
2091 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
2092 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
2093 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
2094 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
2095 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
2096 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
2097 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
2098 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
2099 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
2100 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
2101 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
2102 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
2103 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
2104 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
2105 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
2106 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
2107 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
2108 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
2109 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
2110 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
2111 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
2112 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
2113 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
2114 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
2115 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
2116 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
2117 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
2118 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
2119 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
2120 pHlp->pfnPrintf(pHlp, "\n");
2121
2122 uint32_t uECX = Guest.ecx;
2123 pHlp->pfnPrintf(pHlp, "Features ECX: ");
2124 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
2125 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " 1");
2126 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " 2");
2127 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
2128 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
2129 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
2130 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " 6");
2131 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
2132 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
2133 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " 9");
2134 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
2135 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
2136 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " 12");
2137 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
2138 for (unsigned iBit = 14; iBit < 32; iBit++)
2139 if (uECX & RT_BIT(iBit))
2140 pHlp->pfnPrintf(pHlp, " %d", iBit);
2141 pHlp->pfnPrintf(pHlp, "\n");
2142 }
2143 else
2144 {
2145 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2146
2147 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
2148 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
2149 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
2150 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
2151
2152 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2153 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
2154 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
2155 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
2156 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
2157 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
2158 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
2159 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
2160 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
2161 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
2162 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
2163 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
2164 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
2165 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
2166 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
2167 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
2168 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
2169 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
2170 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
2171 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
2172 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
2173 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
2174 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
2175 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
2176 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
2177 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
2178 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
2179 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
2180 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
2181 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technolog = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
2182 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
2183 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
2184 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
2185
2186 pHlp->pfnPrintf(pHlp, "Supports SSE3 or not = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
2187 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u1Reserved1, EcxHost.u1Reserved1);
2188 pHlp->pfnPrintf(pHlp, "DS Area 64-bit layout = %d (%d)\n", EcxGuest.u1DTE64, EcxHost.u1DTE64);
2189 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
2190 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
2191 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
2192 pHlp->pfnPrintf(pHlp, "SMX - Safer Mode Extensions = %d (%d)\n", EcxGuest.u1SMX, EcxHost.u1SMX);
2193 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
2194 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
2195 pHlp->pfnPrintf(pHlp, "Supports Supplemental SSE3 or not = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
2196 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
2197 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved2, EcxHost.u2Reserved2);
2198 pHlp->pfnPrintf(pHlp, "CMPXCHG16B = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
2199 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
2200 pHlp->pfnPrintf(pHlp, "Perf/Debug Capability MSR = %d (%d)\n", EcxGuest.u1PDCM, EcxHost.u1PDCM);
2201 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved3, EcxHost.u2Reserved3);
2202 pHlp->pfnPrintf(pHlp, "Direct Cache Access = %d (%d)\n", EcxGuest.u1DCA, EcxHost.u1DCA);
2203 pHlp->pfnPrintf(pHlp, "Supports SSE4_1 or not = %d (%d)\n", EcxGuest.u1SSE4_1, EcxHost.u1SSE4_1);
2204 pHlp->pfnPrintf(pHlp, "Supports SSE4_2 or not = %d (%d)\n", EcxGuest.u1SSE4_2, EcxHost.u1SSE4_2);
2205 pHlp->pfnPrintf(pHlp, "Supports the x2APIC extensions = %d (%d)\n", EcxGuest.u1x2APIC, EcxHost.u1x2APIC);
2206 pHlp->pfnPrintf(pHlp, "Supports MOVBE = %d (%d)\n", EcxGuest.u1MOVBE, EcxHost.u1MOVBE);
2207 pHlp->pfnPrintf(pHlp, "Supports POPCNT = %d (%d)\n", EcxGuest.u1POPCNT, EcxHost.u1POPCNT);
2208 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved4, EcxHost.u2Reserved4);
2209 pHlp->pfnPrintf(pHlp, "Supports XSAVE = %d (%d)\n", EcxGuest.u1XSAVE, EcxHost.u1XSAVE);
2210 pHlp->pfnPrintf(pHlp, "Supports OSXSAVE = %d (%d)\n", EcxGuest.u1OSXSAVE, EcxHost.u1OSXSAVE);
2211 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u4Reserved5, EcxHost.u4Reserved5);
2212 }
2213 }
2214 if (cStdMax >= 2 && iVerbosity)
2215 {
2216 /** @todo */
2217 }
2218
2219 /*
2220 * Extended.
2221 * Implemented after AMD specs.
2222 */
2223 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
2224
2225 pHlp->pfnPrintf(pHlp,
2226 "\n"
2227 " RAW Extended CPUIDs\n"
2228 " Function eax ebx ecx edx\n");
2229 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
2230 {
2231 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
2232 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2233
2234 pHlp->pfnPrintf(pHlp,
2235 "Gst: %08x %08x %08x %08x %08x%s\n"
2236 "Hst: %08x %08x %08x %08x\n",
2237 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2238 i <= cExtMax ? "" : "*",
2239 Host.eax, Host.ebx, Host.ecx, Host.edx);
2240 }
2241
2242 /*
2243 * Understandable output
2244 */
2245 if (iVerbosity)
2246 {
2247 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
2248 pHlp->pfnPrintf(pHlp,
2249 "Ext Name: %.4s%.4s%.4s\n"
2250 "Ext Supports: 0x80000000-%#010x\n",
2251 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
2252 }
2253
2254 if (iVerbosity && cExtMax >= 1)
2255 {
2256 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
2257 uint32_t uEAX = Guest.eax;
2258 pHlp->pfnPrintf(pHlp,
2259 "Family: %d \tExtended: %d \tEffective: %d\n"
2260 "Model: %d \tExtended: %d \tEffective: %d\n"
2261 "Stepping: %d\n"
2262 "Brand ID: %#05x\n",
2263 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
2264 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
2265 ASMGetCpuStepping(uEAX),
2266 Guest.ebx & 0xfff);
2267
2268 if (iVerbosity == 1)
2269 {
2270 uint32_t uEDX = Guest.edx;
2271 pHlp->pfnPrintf(pHlp, "Features EDX: ");
2272 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
2273 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
2274 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
2275 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
2276 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
2277 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
2278 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
2279 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
2280 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
2281 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
2282 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
2283 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
2284 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
2285 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
2286 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
2287 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
2288 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
2289 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
2290 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
2291 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
2292 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
2293 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
2294 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
2295 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
2296 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
2297 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
2298 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
2299 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
2300 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
2301 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
2302 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
2303 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
2304 pHlp->pfnPrintf(pHlp, "\n");
2305
2306 uint32_t uECX = Guest.ecx;
2307 pHlp->pfnPrintf(pHlp, "Features ECX: ");
2308 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
2309 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
2310 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
2311 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
2312 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
2313 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
2314 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
2315 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
2316 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
2317 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
2318 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
2319 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
2320 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
2321 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
2322 for (unsigned iBit = 5; iBit < 32; iBit++)
2323 if (uECX & RT_BIT(iBit))
2324 pHlp->pfnPrintf(pHlp, " %d", iBit);
2325 pHlp->pfnPrintf(pHlp, "\n");
2326 }
2327 else
2328 {
2329 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2330
2331 uint32_t uEdxGst = Guest.edx;
2332 uint32_t uEdxHst = Host.edx;
2333 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2334 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2335 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2336 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2337 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2338 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2339 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2340 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2341 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2342 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2343 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2344 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2345 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2346 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2347 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2348 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
2349 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
2350 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
2351 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
2352 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
2353 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
2354 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
2355 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
2356 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
2357 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
2358 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
2359 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
2360 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
2361 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
2362 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
2363 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
2364 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
2365 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
2366
2367 uint32_t uEcxGst = Guest.ecx;
2368 uint32_t uEcxHst = Host.ecx;
2369 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
2370 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
2371 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
2372 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
2373 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
2374 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
2375 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
2376 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
2377 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
2378 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
2379 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
2380 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
2381 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
2382 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
2383 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
2384 }
2385 }
2386
2387 if (iVerbosity && cExtMax >= 2)
2388 {
2389 char szString[4*4*3+1] = {0};
2390 uint32_t *pu32 = (uint32_t *)szString;
2391 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
2392 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
2393 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
2394 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
2395 if (cExtMax >= 3)
2396 {
2397 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
2398 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
2399 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
2400 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
2401 }
2402 if (cExtMax >= 4)
2403 {
2404 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
2405 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
2406 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
2407 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
2408 }
2409 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
2410 }
2411
2412 if (iVerbosity && cExtMax >= 5)
2413 {
2414 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
2415 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
2416 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
2417 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
2418 char sz1[32];
2419 char sz2[32];
2420
2421 pHlp->pfnPrintf(pHlp,
2422 "TLB 2/4M Instr/Uni: %s %3d entries\n"
2423 "TLB 2/4M Data: %s %3d entries\n",
2424 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
2425 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
2426 pHlp->pfnPrintf(pHlp,
2427 "TLB 4K Instr/Uni: %s %3d entries\n"
2428 "TLB 4K Data: %s %3d entries\n",
2429 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
2430 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
2431 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
2432 "L1 Instr Cache Lines Per Tag: %d\n"
2433 "L1 Instr Cache Associativity: %s\n"
2434 "L1 Instr Cache Size: %d KB\n",
2435 (uEDX >> 0) & 0xff,
2436 (uEDX >> 8) & 0xff,
2437 getCacheAss((uEDX >> 16) & 0xff, sz1),
2438 (uEDX >> 24) & 0xff);
2439 pHlp->pfnPrintf(pHlp,
2440 "L1 Data Cache Line Size: %d bytes\n"
2441 "L1 Data Cache Lines Per Tag: %d\n"
2442 "L1 Data Cache Associativity: %s\n"
2443 "L1 Data Cache Size: %d KB\n",
2444 (uECX >> 0) & 0xff,
2445 (uECX >> 8) & 0xff,
2446 getCacheAss((uECX >> 16) & 0xff, sz1),
2447 (uECX >> 24) & 0xff);
2448 }
2449
2450 if (iVerbosity && cExtMax >= 6)
2451 {
2452 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
2453 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
2454 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
2455
2456 pHlp->pfnPrintf(pHlp,
2457 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
2458 "L2 TLB 2/4M Data: %s %4d entries\n",
2459 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
2460 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
2461 pHlp->pfnPrintf(pHlp,
2462 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
2463 "L2 TLB 4K Data: %s %4d entries\n",
2464 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
2465 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
2466 pHlp->pfnPrintf(pHlp,
2467 "L2 Cache Line Size: %d bytes\n"
2468 "L2 Cache Lines Per Tag: %d\n"
2469 "L2 Cache Associativity: %s\n"
2470 "L2 Cache Size: %d KB\n",
2471 (uEDX >> 0) & 0xff,
2472 (uEDX >> 8) & 0xf,
2473 getL2CacheAss((uEDX >> 12) & 0xf),
2474 (uEDX >> 16) & 0xffff);
2475 }
2476
2477 if (iVerbosity && cExtMax >= 7)
2478 {
2479 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
2480
2481 pHlp->pfnPrintf(pHlp, "APM Features: ");
2482 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
2483 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
2484 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
2485 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
2486 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
2487 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
2488 for (unsigned iBit = 6; iBit < 32; iBit++)
2489 if (uEDX & RT_BIT(iBit))
2490 pHlp->pfnPrintf(pHlp, " %d", iBit);
2491 pHlp->pfnPrintf(pHlp, "\n");
2492 }
2493
2494 if (iVerbosity && cExtMax >= 8)
2495 {
2496 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
2497 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
2498
2499 pHlp->pfnPrintf(pHlp,
2500 "Physical Address Width: %d bits\n"
2501 "Virtual Address Width: %d bits\n",
2502 (uEAX >> 0) & 0xff,
2503 (uEAX >> 8) & 0xff);
2504 pHlp->pfnPrintf(pHlp,
2505 "Physical Core Count: %d\n",
2506 (uECX >> 0) & 0xff);
2507 }
2508
2509
2510 /*
2511 * Centaur.
2512 */
2513 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
2514
2515 pHlp->pfnPrintf(pHlp,
2516 "\n"
2517 " RAW Centaur CPUIDs\n"
2518 " Function eax ebx ecx edx\n");
2519 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
2520 {
2521 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
2522 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2523
2524 pHlp->pfnPrintf(pHlp,
2525 "Gst: %08x %08x %08x %08x %08x%s\n"
2526 "Hst: %08x %08x %08x %08x\n",
2527 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2528 i <= cCentaurMax ? "" : "*",
2529 Host.eax, Host.ebx, Host.ecx, Host.edx);
2530 }
2531
2532 /*
2533 * Understandable output
2534 */
2535 if (iVerbosity)
2536 {
2537 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
2538 pHlp->pfnPrintf(pHlp,
2539 "Centaur Supports: 0xc0000000-%#010x\n",
2540 Guest.eax);
2541 }
2542
2543 if (iVerbosity && cCentaurMax >= 1)
2544 {
2545 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2546 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
2547 uint32_t uEdxHst = Host.edx;
2548
2549 if (iVerbosity == 1)
2550 {
2551 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
2552 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
2553 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
2554 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
2555 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
2556 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
2557 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
2558 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
2559 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
2560 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2561 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
2562 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
2563 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
2564 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
2565 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
2566 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
2567 for (unsigned iBit = 14; iBit < 32; iBit++)
2568 if (uEdxGst & RT_BIT(iBit))
2569 pHlp->pfnPrintf(pHlp, " %d", iBit);
2570 pHlp->pfnPrintf(pHlp, "\n");
2571 }
2572 else
2573 {
2574 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2575 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2576 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2577 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2578 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2579 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2580 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2581 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2582 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2583 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2584 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2585 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2586 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2587 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2588 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2589 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2590 for (unsigned iBit = 14; iBit < 32; iBit++)
2591 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
2592 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
2593 pHlp->pfnPrintf(pHlp, "\n");
2594 }
2595 }
2596}
2597
2598
2599/**
2600 * Structure used when disassembling and instructions in DBGF.
2601 * This is used so the reader function can get the stuff it needs.
2602 */
2603typedef struct CPUMDISASSTATE
2604{
2605 /** Pointer to the CPU structure. */
2606 PDISCPUSTATE pCpu;
2607 /** The VM handle. */
2608 PVM pVM;
2609 /** The VMCPU handle. */
2610 PVMCPU pVCpu;
2611 /** Pointer to the first byte in the segemnt. */
2612 RTGCUINTPTR GCPtrSegBase;
2613 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
2614 RTGCUINTPTR GCPtrSegEnd;
2615 /** The size of the segment minus 1. */
2616 RTGCUINTPTR cbSegLimit;
2617 /** Pointer to the current page - R3 Ptr. */
2618 void const *pvPageR3;
2619 /** Pointer to the current page - GC Ptr. */
2620 RTGCPTR pvPageGC;
2621 /** The lock information that PGMPhysReleasePageMappingLock needs. */
2622 PGMPAGEMAPLOCK PageMapLock;
2623 /** Whether the PageMapLock is valid or not. */
2624 bool fLocked;
2625 /** 64 bits mode or not. */
2626 bool f64Bits;
2627} CPUMDISASSTATE, *PCPUMDISASSTATE;
2628
2629
2630/**
2631 * Instruction reader.
2632 *
2633 * @returns VBox status code.
2634 * @param PtrSrc Address to read from.
2635 * In our case this is relative to the selector pointed to by the 2nd user argument of uDisCpu.
2636 * @param pu8Dst Where to store the bytes.
2637 * @param cbRead Number of bytes to read.
2638 * @param uDisCpu Pointer to the disassembler cpu state.
2639 * In this context it's always pointer to the Core of a DBGFDISASSTATE.
2640 */
2641static DECLCALLBACK(int) cpumR3DisasInstrRead(RTUINTPTR PtrSrc, uint8_t *pu8Dst, unsigned cbRead, void *uDisCpu)
2642{
2643 PDISCPUSTATE pCpu = (PDISCPUSTATE)uDisCpu;
2644 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pCpu->apvUserData[0];
2645 Assert(cbRead > 0);
2646 for (;;)
2647 {
2648 RTGCUINTPTR GCPtr = PtrSrc + pState->GCPtrSegBase;
2649
2650 /* Need to update the page translation? */
2651 if ( !pState->pvPageR3
2652 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
2653 {
2654 int rc = VINF_SUCCESS;
2655
2656 /* translate the address */
2657 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
2658 if ( MMHyperIsInsideArea(pState->pVM, pState->pvPageGC)
2659 && !HWACCMIsEnabled(pState->pVM))
2660 {
2661 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
2662 if (!pState->pvPageR3)
2663 rc = VERR_INVALID_POINTER;
2664 }
2665 else
2666 {
2667 /* Release mapping lock previously acquired. */
2668 if (pState->fLocked)
2669 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
2670 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
2671 pState->fLocked = RT_SUCCESS_NP(rc);
2672 }
2673 if (RT_FAILURE(rc))
2674 {
2675 pState->pvPageR3 = NULL;
2676 return rc;
2677 }
2678 }
2679
2680 /* check the segemnt limit */
2681 if (!pState->f64Bits && PtrSrc > pState->cbSegLimit)
2682 return VERR_OUT_OF_SELECTOR_BOUNDS;
2683
2684 /* calc how much we can read */
2685 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
2686 if (!pState->f64Bits)
2687 {
2688 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
2689 if (cb > cbSeg && cbSeg)
2690 cb = cbSeg;
2691 }
2692 if (cb > cbRead)
2693 cb = cbRead;
2694
2695 /* read and advance */
2696 memcpy(pu8Dst, (char *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
2697 cbRead -= cb;
2698 if (!cbRead)
2699 return VINF_SUCCESS;
2700 pu8Dst += cb;
2701 PtrSrc += cb;
2702 }
2703}
2704
2705
2706/**
2707 * Disassemble an instruction and return the information in the provided structure.
2708 *
2709 * @returns VBox status code.
2710 * @param pVM VM Handle
2711 * @param pVCpu VMCPU Handle
2712 * @param pCtx CPU context
2713 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
2714 * @param pCpu Disassembly state
2715 * @param pszPrefix String prefix for logging (debug only)
2716 *
2717 */
2718VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
2719{
2720 CPUMDISASSTATE State;
2721 int rc;
2722
2723 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
2724 State.pCpu = pCpu;
2725 State.pvPageGC = 0;
2726 State.pvPageR3 = NULL;
2727 State.pVM = pVM;
2728 State.pVCpu = pVCpu;
2729 State.fLocked = false;
2730 State.f64Bits = false;
2731
2732 /*
2733 * Get selector information.
2734 */
2735 if ( (pCtx->cr0 & X86_CR0_PE)
2736 && pCtx->eflags.Bits.u1VM == 0)
2737 {
2738 if (CPUMAreHiddenSelRegsValid(pVM))
2739 {
2740 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long;
2741 State.GCPtrSegBase = pCtx->csHid.u64Base;
2742 State.GCPtrSegEnd = pCtx->csHid.u32Limit + 1 + (RTGCUINTPTR)pCtx->csHid.u64Base;
2743 State.cbSegLimit = pCtx->csHid.u32Limit;
2744 pCpu->mode = (State.f64Bits)
2745 ? CPUMODE_64BIT
2746 : pCtx->csHid.Attr.n.u1DefBig
2747 ? CPUMODE_32BIT
2748 : CPUMODE_16BIT;
2749 }
2750 else
2751 {
2752 DBGFSELINFO SelInfo;
2753
2754 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs, &SelInfo);
2755 if (RT_FAILURE(rc))
2756 {
2757 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%RGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2758 return rc;
2759 }
2760
2761 /*
2762 * Validate the selector.
2763 */
2764 rc = DBGFR3SelInfoValidateCS(&SelInfo, pCtx->ss);
2765 if (RT_FAILURE(rc))
2766 {
2767 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%RGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2768 return rc;
2769 }
2770 State.GCPtrSegBase = SelInfo.GCPtrBase;
2771 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
2772 State.cbSegLimit = SelInfo.cbLimit;
2773 pCpu->mode = SelInfo.u.Raw.Gen.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT;
2774 }
2775 }
2776 else
2777 {
2778 /* real or V86 mode */
2779 pCpu->mode = CPUMODE_16BIT;
2780 State.GCPtrSegBase = pCtx->cs * 16;
2781 State.GCPtrSegEnd = 0xFFFFFFFF;
2782 State.cbSegLimit = 0xFFFFFFFF;
2783 }
2784
2785 /*
2786 * Disassemble the instruction.
2787 */
2788 pCpu->pfnReadBytes = cpumR3DisasInstrRead;
2789 pCpu->apvUserData[0] = &State;
2790
2791 uint32_t cbInstr;
2792#ifndef LOG_ENABLED
2793 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, NULL);
2794 if (RT_SUCCESS(rc))
2795 {
2796#else
2797 char szOutput[160];
2798 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, &szOutput[0]);
2799 if (RT_SUCCESS(rc))
2800 {
2801 /* log it */
2802 if (pszPrefix)
2803 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
2804 else
2805 Log(("%s", szOutput));
2806#endif
2807 rc = VINF_SUCCESS;
2808 }
2809 else
2810 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs, GCPtrPC, rc));
2811
2812 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2813 if (State.fLocked)
2814 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2815
2816 return rc;
2817}
2818
2819#ifdef DEBUG
2820
2821/**
2822 * Disassemble an instruction and dump it to the log
2823 *
2824 * @returns VBox status code.
2825 * @param pVM VM Handle
2826 * @param pVCpu VMCPU Handle
2827 * @param pCtx CPU context
2828 * @param pc GC instruction pointer
2829 * @param pszPrefix String prefix for logging
2830 *
2831 * @deprecated Use DBGFR3DisasInstrCurrentLog().
2832 */
2833VMMR3DECL(void) CPUMR3DisasmInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR pc, const char *pszPrefix)
2834{
2835 DISCPUSTATE Cpu;
2836 CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pc, &Cpu, pszPrefix);
2837}
2838
2839
2840/**
2841 * Debug helper - Saves guest context on raw mode entry (for fatal dump)
2842 *
2843 * @internal
2844 */
2845VMMR3DECL(void) CPUMR3SaveEntryCtx(PVM pVM)
2846{
2847 /* @todo SMP support!! */
2848 pVM->cpum.s.GuestEntry = *CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
2849}
2850
2851#endif /* DEBUG */
2852
2853/**
2854 * API for controlling a few of the CPU features found in CR4.
2855 *
2856 * Currently only X86_CR4_TSD is accepted as input.
2857 *
2858 * @returns VBox status code.
2859 *
2860 * @param pVM The VM handle.
2861 * @param fOr The CR4 OR mask.
2862 * @param fAnd The CR4 AND mask.
2863 */
2864VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2865{
2866 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2867 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2868
2869 pVM->cpum.s.CR4.OrMask &= fAnd;
2870 pVM->cpum.s.CR4.OrMask |= fOr;
2871
2872 return VINF_SUCCESS;
2873}
2874
2875
2876/**
2877 * Gets a pointer to the array of standard CPUID leaves.
2878 *
2879 * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
2880 *
2881 * @returns Pointer to the standard CPUID leaves (read-only).
2882 * @param pVM The VM handle.
2883 * @remark Intended for PATM.
2884 */
2885VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdStdRCPtr(PVM pVM)
2886{
2887 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
2888}
2889
2890
2891/**
2892 * Gets a pointer to the array of extended CPUID leaves.
2893 *
2894 * CPUMGetGuestCpuIdExtMax() give the size of the array.
2895 *
2896 * @returns Pointer to the extended CPUID leaves (read-only).
2897 * @param pVM The VM handle.
2898 * @remark Intended for PATM.
2899 */
2900VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdExtRCPtr(PVM pVM)
2901{
2902 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
2903}
2904
2905
2906/**
2907 * Gets a pointer to the array of centaur CPUID leaves.
2908 *
2909 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
2910 *
2911 * @returns Pointer to the centaur CPUID leaves (read-only).
2912 * @param pVM The VM handle.
2913 * @remark Intended for PATM.
2914 */
2915VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdCentaurRCPtr(PVM pVM)
2916{
2917 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
2918}
2919
2920
2921/**
2922 * Gets a pointer to the default CPUID leaf.
2923 *
2924 * @returns Pointer to the default CPUID leaf (read-only).
2925 * @param pVM The VM handle.
2926 * @remark Intended for PATM.
2927 */
2928VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM)
2929{
2930 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
2931}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette