VirtualBox

source: vbox/trunk/src/VBox/VMM/CPUM.cpp@ 24264

Last change on this file since 24264 was 23801, checked in by vboxsync, 15 years ago

Main,VMM,Frontends,++: Teminology. Added a bind address for the (target) teleporter.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 129.6 KB
Line 
1/* $Id: CPUM.cpp 23801 2009-10-15 15:00:47Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_cpum CPUM - CPU Monitor / Manager
23 *
24 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
25 * also responsible for lazy FPU handling and some of the context loading
26 * in raw mode.
27 *
28 * There are three CPU contexts, the most important one is the guest one (GC).
29 * When running in raw-mode (RC) there is a special hyper context for the VMM
30 * part that floats around inside the guest address space. When running in
31 * raw-mode, CPUM also maintains a host context for saving and restoring
32 * registers accross world switches. This latter is done in cooperation with the
33 * world switcher (@see pg_vmm).
34 *
35 * @see grp_cpum
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_CPUM
42#include <VBox/cpum.h>
43#include <VBox/cpumdis.h>
44#include <VBox/pgm.h>
45#include <VBox/pdm.h>
46#include <VBox/mm.h>
47#include <VBox/selm.h>
48#include <VBox/dbgf.h>
49#include <VBox/patm.h>
50#include <VBox/hwaccm.h>
51#include <VBox/ssm.h>
52#include "CPUMInternal.h"
53#include <VBox/vm.h>
54
55#include <VBox/param.h>
56#include <VBox/dis.h>
57#include <VBox/err.h>
58#include <VBox/log.h>
59#include <iprt/assert.h>
60#include <iprt/asm.h>
61#include <iprt/string.h>
62#include <iprt/mp.h>
63#include <iprt/cpuset.h>
64
65
66/*******************************************************************************
67* Defined Constants And Macros *
68*******************************************************************************/
69/** The current saved state version. */
70#ifdef VBOX_WITH_LIVE_MIGRATION
71#define CPUM_SAVED_STATE_VERSION 11
72#else
73#define CPUM_SAVED_STATE_VERSION 10
74#endif
75/** The saved state version of 3.0 and 3.1 trunk before the teleportation
76 * changes. */
77#define CPUM_SAVED_STATE_VERSION_VER3_0 10
78/** The saved state version for the 2.1 trunk before the MSR changes. */
79#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
80/** The saved state version of 2.0, used for backwards compatibility. */
81#define CPUM_SAVED_STATE_VERSION_VER2_0 8
82/** The saved state version of 1.6, used for backwards compatability. */
83#define CPUM_SAVED_STATE_VERSION_VER1_6 6
84
85
86/*******************************************************************************
87* Structures and Typedefs *
88*******************************************************************************/
89
90/**
91 * What kind of cpu info dump to perform.
92 */
93typedef enum CPUMDUMPTYPE
94{
95 CPUMDUMPTYPE_TERSE,
96 CPUMDUMPTYPE_DEFAULT,
97 CPUMDUMPTYPE_VERBOSE
98} CPUMDUMPTYPE;
99/** Pointer to a cpu info dump type. */
100typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
101
102
103/*******************************************************************************
104* Internal Functions *
105*******************************************************************************/
106static int cpumR3CpuIdInit(PVM pVM);
107#ifdef VBOX_WITH_LIVE_MIGRATION
108static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
109#endif
110static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
111static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
112static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
113static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
114static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
115static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
116static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
117static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
118
119
120/**
121 * Initializes the CPUM.
122 *
123 * @returns VBox status code.
124 * @param pVM The VM to operate on.
125 */
126VMMR3DECL(int) CPUMR3Init(PVM pVM)
127{
128 LogFlow(("CPUMR3Init\n"));
129
130 /*
131 * Assert alignment and sizes.
132 */
133 AssertCompileMemberAlignment(VM, cpum.s, 32);
134 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
135 AssertCompileSizeAlignment(CPUMCTX, 64);
136 AssertCompileSizeAlignment(CPUMCTXMSR, 64);
137 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
138 AssertCompileMemberAlignment(VM, cpum, 64);
139 AssertCompileMemberAlignment(VM, aCpus, 64);
140 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
141 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
142
143 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
144 pVM->cpum.s.ulOffCPUMCPU = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
145 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.ulOffCPUMCPU == (uintptr_t)&pVM->aCpus[0].cpum);
146
147 /* Calculate the offset from CPUMCPU to CPUM. */
148 for (VMCPUID i = 0; i < pVM->cCpus; i++)
149 {
150 PVMCPU pVCpu = &pVM->aCpus[i];
151
152 /*
153 * Setup any fixed pointers and offsets.
154 */
155 pVCpu->cpum.s.pHyperCoreR3 = CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
156 pVCpu->cpum.s.pHyperCoreR0 = VM_R0_ADDR(pVM, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper));
157
158 pVCpu->cpum.s.ulOffCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
159 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.ulOffCPUM == (uintptr_t)&pVM->cpum);
160 }
161
162 /*
163 * Check that the CPU supports the minimum features we require.
164 */
165 if (!ASMHasCpuId())
166 {
167 Log(("The CPU doesn't support CPUID!\n"));
168 return VERR_UNSUPPORTED_CPU;
169 }
170 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
171 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
172
173 /* Setup the CR4 AND and OR masks used in the switcher */
174 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
175 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
176 {
177 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
178 /* No FXSAVE implies no SSE */
179 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
180 pVM->cpum.s.CR4.OrMask = 0;
181 }
182 else
183 {
184 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
185 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
186 }
187
188 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
189 {
190 Log(("The CPU doesn't support MMX!\n"));
191 return VERR_UNSUPPORTED_CPU;
192 }
193 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
194 {
195 Log(("The CPU doesn't support TSC!\n"));
196 return VERR_UNSUPPORTED_CPU;
197 }
198 /* Bogus on AMD? */
199 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
200 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
201
202 /*
203 * Setup hypervisor startup values.
204 */
205
206 /*
207 * Register saved state data item.
208 */
209#ifdef VBOX_WITH_LIVE_MIGRATION
210 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
211 NULL, cpumR3LiveExec, NULL,
212 NULL, cpumR3SaveExec, NULL,
213 NULL, cpumR3LoadExec, NULL);
214#else
215 int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
216 NULL, NULL, NULL,
217 NULL, cpumR3SaveExec, NULL,
218 NULL, cpumR3LoadExec, NULL);
219#endif
220 if (RT_FAILURE(rc))
221 return rc;
222
223 /* Query the CPU manufacturer. */
224 uint32_t uEAX, uEBX, uECX, uEDX;
225 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
226 if ( uEAX >= 1
227 && uEBX == X86_CPUID_VENDOR_AMD_EBX
228 && uECX == X86_CPUID_VENDOR_AMD_ECX
229 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
230 {
231 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor = CPUMCPUVENDOR_AMD;
232 }
233 else if ( uEAX >= 1
234 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
235 && uECX == X86_CPUID_VENDOR_INTEL_ECX
236 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
237 {
238 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor = CPUMCPUVENDOR_INTEL;
239 }
240 else /** @todo Via */
241 {
242 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor = CPUMCPUVENDOR_UNKNOWN;
243 }
244
245 /*
246 * Register info handlers.
247 */
248 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
249 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
250 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
251 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
252 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
253 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
254
255 /*
256 * Initialize the Guest CPU state.
257 */
258 rc = cpumR3CpuIdInit(pVM);
259 if (RT_FAILURE(rc))
260 return rc;
261 CPUMR3Reset(pVM);
262 return VINF_SUCCESS;
263}
264
265
266/**
267 * Initializes the per-VCPU CPUM.
268 *
269 * @returns VBox status code.
270 * @param pVM The VM to operate on.
271 */
272VMMR3DECL(int) CPUMR3InitCPU(PVM pVM)
273{
274 LogFlow(("CPUMR3InitCPU\n"));
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Initializes the emulated CPU's cpuid information.
281 *
282 * @returns VBox status code.
283 * @param pVM The VM to operate on.
284 */
285static int cpumR3CpuIdInit(PVM pVM)
286{
287 PCPUM pCPUM = &pVM->cpum.s;
288 uint32_t i;
289
290 /*
291 * Get the host CPUIDs.
292 */
293 for (i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
294 ASMCpuId_Idx_ECX(i, 0,
295 &pCPUM->aGuestCpuIdStd[i].eax, &pCPUM->aGuestCpuIdStd[i].ebx,
296 &pCPUM->aGuestCpuIdStd[i].ecx, &pCPUM->aGuestCpuIdStd[i].edx);
297 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
298 ASMCpuId(0x80000000 + i,
299 &pCPUM->aGuestCpuIdExt[i].eax, &pCPUM->aGuestCpuIdExt[i].ebx,
300 &pCPUM->aGuestCpuIdExt[i].ecx, &pCPUM->aGuestCpuIdExt[i].edx);
301 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
302 ASMCpuId(0xc0000000 + i,
303 &pCPUM->aGuestCpuIdCentaur[i].eax, &pCPUM->aGuestCpuIdCentaur[i].ebx,
304 &pCPUM->aGuestCpuIdCentaur[i].ecx, &pCPUM->aGuestCpuIdCentaur[i].edx);
305
306 /*
307 * Only report features we can support.
308 */
309 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
310 | X86_CPUID_FEATURE_EDX_VME
311 | X86_CPUID_FEATURE_EDX_DE
312 | X86_CPUID_FEATURE_EDX_PSE
313 | X86_CPUID_FEATURE_EDX_TSC
314 | X86_CPUID_FEATURE_EDX_MSR
315 //| X86_CPUID_FEATURE_EDX_PAE - not implemented yet.
316 | X86_CPUID_FEATURE_EDX_MCE
317 | X86_CPUID_FEATURE_EDX_CX8
318 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
319 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
320 //| X86_CPUID_FEATURE_EDX_SEP
321 | X86_CPUID_FEATURE_EDX_MTRR
322 | X86_CPUID_FEATURE_EDX_PGE
323 | X86_CPUID_FEATURE_EDX_MCA
324 | X86_CPUID_FEATURE_EDX_CMOV
325 | X86_CPUID_FEATURE_EDX_PAT
326 | X86_CPUID_FEATURE_EDX_PSE36
327 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
328 | X86_CPUID_FEATURE_EDX_CLFSH
329 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
330 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.
331 | X86_CPUID_FEATURE_EDX_MMX
332 | X86_CPUID_FEATURE_EDX_FXSR
333 | X86_CPUID_FEATURE_EDX_SSE
334 | X86_CPUID_FEATURE_EDX_SSE2
335 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
336 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.
337 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
338 //| X86_CPUID_FEATURE_EDX_PBE - no pneding break enabled.
339 | 0;
340 pCPUM->aGuestCpuIdStd[1].ecx &= 0
341 | X86_CPUID_FEATURE_ECX_SSE3
342 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
343 | ((pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
344 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
345 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.
346 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
347 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
348 //| X86_CPUID_FEATURE_ECX_SSSE3 - no SSSE3 support
349 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
350 //| X86_CPUID_FEATURE_ECX_CX16 - no cmpxchg16b
351 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
352 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
353 /* ECX Bit 21 - x2APIC support - not yet. */
354 // | X86_CPUID_FEATURE_ECX_X2APIC
355 /* ECX Bit 23 - POPCOUNT instruction. */
356 //| X86_CPUID_FEATURE_ECX_POPCOUNT
357 | 0;
358
359 /* ASSUMES that this is ALWAYS the AMD define feature set if present. */
360 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
361 | X86_CPUID_AMD_FEATURE_EDX_VME
362 | X86_CPUID_AMD_FEATURE_EDX_DE
363 | X86_CPUID_AMD_FEATURE_EDX_PSE
364 | X86_CPUID_AMD_FEATURE_EDX_TSC
365 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
366 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.
367 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
368 | X86_CPUID_AMD_FEATURE_EDX_CX8
369 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
370 /** @note we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see #1757) */
371 //| X86_CPUID_AMD_FEATURE_EDX_SEP
372 | X86_CPUID_AMD_FEATURE_EDX_MTRR
373 | X86_CPUID_AMD_FEATURE_EDX_PGE
374 | X86_CPUID_AMD_FEATURE_EDX_MCA
375 | X86_CPUID_AMD_FEATURE_EDX_CMOV
376 | X86_CPUID_AMD_FEATURE_EDX_PAT
377 | X86_CPUID_AMD_FEATURE_EDX_PSE36
378 //| X86_CPUID_AMD_FEATURE_EDX_NX - not virtualized, requires PAE.
379 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
380 | X86_CPUID_AMD_FEATURE_EDX_MMX
381 | X86_CPUID_AMD_FEATURE_EDX_FXSR
382 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
383 //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
384 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP - AMD only; turned on when necessary
385 //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - turned on when necessary
386 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
387 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
388 | 0;
389 pCPUM->aGuestCpuIdExt[1].ecx &= 0
390 //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
391 //| X86_CPUID_AMD_FEATURE_ECX_CMPL
392 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.
393 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
394 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
395 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
396 //| X86_CPUID_AMD_FEATURE_ECX_ABM
397 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A
398 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
399 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
400 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
401 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
402 //| X86_CPUID_AMD_FEATURE_ECX_WDT
403 | 0;
404
405 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "SyntheticCpu", &pCPUM->fSyntheticCpu, false);
406 if (pCPUM->fSyntheticCpu)
407 {
408 const char szVendor[13] = "VirtualBox ";
409 const char szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */
410
411 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
412
413 /* Limit the nr of standard leaves; 5 for monitor/mwait */
414 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
415
416 /* 0: Vendor */
417 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)szVendor)[0];
418 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)szVendor)[2];
419 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)szVendor)[1];
420
421 /* 1.eax: Version information. family : model : stepping */
422 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
423
424 /* Leaves 2 - 4 are Intel only - zero them out */
425 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
426 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
427 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
428
429 /* Leaf 5 = monitor/mwait */
430
431 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
432 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
433 /* AMD only - set to zero. */
434 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
435
436 /* 0x800000001: AMD only; shared feature bits are set dynamically. */
437 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
438
439 /* 0x800000002-4: Processor Name String Identifier. */
440 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)szProcessor)[0];
441 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)szProcessor)[1];
442 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)szProcessor)[2];
443 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)szProcessor)[3];
444 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)szProcessor)[4];
445 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)szProcessor)[5];
446 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)szProcessor)[6];
447 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)szProcessor)[7];
448 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)szProcessor)[8];
449 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)szProcessor)[9];
450 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)szProcessor)[10];
451 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)szProcessor)[11];
452
453 /* 0x800000005-7 - reserved -> zero */
454 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
455 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
456 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
457
458 /* 0x800000008: only the max virtual and physical address size. */
459 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
460 }
461
462 /*
463 * Hide HTT, multicode, SMP, whatever.
464 * (APIC-ID := 0 and #LogCpus := 0)
465 */
466 pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
467#ifdef VBOX_WITH_MULTI_CORE
468 if ( pVM->cCpus > 1
469 && pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC)
470 {
471 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
472 pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
473 pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */
474 }
475#endif
476
477 /* Cpuid 2:
478 * Intel: Cache and TLB information
479 * AMD: Reserved
480 * Safe to expose
481 */
482
483 /* Cpuid 3:
484 * Intel: EAX, EBX - reserved
485 * ECX, EDX - Processor Serial Number if available, otherwise reserved
486 * AMD: Reserved
487 * Safe to expose
488 */
489 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
490 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
491
492 /* Cpuid 4:
493 * Intel: Deterministic Cache Parameters Leaf
494 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0
495 * AMD: Reserved
496 * Safe to expose, except for EAX:
497 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
498 * Bits 31-26: Maximum number of processor cores in this physical package**
499 * @Note These SMP values are constant regardless of ECX
500 */
501 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
502 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
503#ifdef VBOX_WITH_MULTI_CORE
504 if ( pVM->cCpus > 1
505 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
506 {
507 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
508 /* One logical processor with possibly multiple cores. */
509 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
510 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */
511 }
512#endif
513
514 /* Cpuid 5: Monitor/mwait Leaf
515 * Intel: ECX, EDX - reserved
516 * EAX, EBX - Smallest and largest monitor line size
517 * AMD: EDX - reserved
518 * EAX, EBX - Smallest and largest monitor line size
519 * ECX - extensions (ignored for now)
520 * Safe to expose
521 */
522 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
523 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
524
525 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
526
527 /*
528 * Determine the default.
529 *
530 * Intel returns values of the highest standard function, while AMD
531 * returns zeros. VIA on the other hand seems to returning nothing or
532 * perhaps some random garbage, we don't try to duplicate this behavior.
533 */
534 ASMCpuId(pCPUM->aGuestCpuIdStd[0].eax + 10,
535 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
536 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
537
538 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
539 * Safe to pass on to the guest.
540 *
541 * Intel: 0x800000005 reserved
542 * 0x800000006 L2 cache information
543 * AMD: 0x800000005 L1 cache information
544 * 0x800000006 L2/L3 cache information
545 */
546
547 /* Cpuid 0x800000007:
548 * AMD: EAX, EBX, ECX - reserved
549 * EDX: Advanced Power Management Information
550 * Intel: Reserved
551 */
552 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
553 {
554 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
555
556 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
557
558 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
559 {
560 /* Only expose the TSC invariant capability bit to the guest. */
561 pCPUM->aGuestCpuIdExt[7].edx &= 0
562 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
563 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
564 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
565 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
566 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
567 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
568 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
569 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
570#if 1
571 /* We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer Linux kernels blindly assume
572 * that the AMD performance counters work if this is set for 64 bits guests. (can't really find a CPUID feature bit for them though)
573 */
574#else
575 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
576#endif
577 | 0;
578 }
579 else
580 pCPUM->aGuestCpuIdExt[7].edx = 0;
581 }
582
583 /* Cpuid 0x800000008:
584 * AMD: EBX, EDX - reserved
585 * EAX: Virtual/Physical address Size
586 * ECX: Number of cores + APICIdCoreIdSize
587 * Intel: EAX: Virtual/Physical address Size
588 * EBX, ECX, EDX - reserved
589 */
590 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
591 {
592 /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
593 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */
594 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
595 * NC (0-7) Number of cores; 0 equals 1 core */
596 pCPUM->aGuestCpuIdExt[8].ecx = 0;
597#ifdef VBOX_WITH_MULTI_CORE
598 if ( pVM->cCpus > 1
599 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
600 {
601 /* Legacy method to determine the number of cores. */
602 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
603 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
604
605 }
606#endif
607 }
608
609 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
610 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
611 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
612 * This option corrsponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
613 */
614 bool fNt4LeafLimit;
615 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "NT4LeafLimit", &fNt4LeafLimit, false);
616 if (fNt4LeafLimit)
617 pCPUM->aGuestCpuIdStd[0].eax = 3;
618
619 /*
620 * Limit it the number of entries and fill the remaining with the defaults.
621 *
622 * The limits are masking off stuff about power saving and similar, this
623 * is perhaps a bit crudely done as there is probably some relatively harmless
624 * info too in these leaves (like words about having a constant TSC).
625 */
626 if (pCPUM->aGuestCpuIdStd[0].eax > 5)
627 pCPUM->aGuestCpuIdStd[0].eax = 5;
628
629 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
630 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
631
632 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
633 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
634 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
635 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
636 : 0;
637 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); i++)
638 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
639
640 /*
641 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestCpuIdDef:
642 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
643 * of processors from (cpuid(4).eax >> 26) + 1.
644 */
645 if (pVM->cCpus == 1)
646 pCPUM->aGuestCpuIdStd[4].eax = 0;
647
648 /*
649 * Centaur stuff (VIA).
650 *
651 * The important part here (we think) is to make sure the 0xc0000000
652 * function returns 0xc0000001. As for the features, we don't currently
653 * let on about any of those... 0xc0000002 seems to be some
654 * temperature/hz/++ stuff, include it as well (static).
655 */
656 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
657 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
658 {
659 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
660 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
661 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
662 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
663 i++)
664 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
665 }
666 else
667 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
668 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
669
670
671 /*
672 * Load CPUID overrides from configuration.
673 */
674 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
675 * Overloads the CPUID leaf values. */
676 PCPUMCPUID pCpuId = &pCPUM->aGuestCpuIdStd[0];
677 uint32_t cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdStd);
678 for (i=0;; )
679 {
680 while (cElements-- > 0)
681 {
682 PCFGMNODE pNode = CFGMR3GetChildF(CFGMR3GetRoot(pVM), "CPUM/CPUID/%RX32", i);
683 if (pNode)
684 {
685 uint32_t u32;
686 int rc = CFGMR3QueryU32(pNode, "eax", &u32);
687 if (RT_SUCCESS(rc))
688 pCpuId->eax = u32;
689 else
690 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
691
692 rc = CFGMR3QueryU32(pNode, "ebx", &u32);
693 if (RT_SUCCESS(rc))
694 pCpuId->ebx = u32;
695 else
696 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
697
698 rc = CFGMR3QueryU32(pNode, "ecx", &u32);
699 if (RT_SUCCESS(rc))
700 pCpuId->ecx = u32;
701 else
702 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
703
704 rc = CFGMR3QueryU32(pNode, "edx", &u32);
705 if (RT_SUCCESS(rc))
706 pCpuId->edx = u32;
707 else
708 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
709 }
710 pCpuId++;
711 i++;
712 }
713
714 /* next */
715 if ((i & UINT32_C(0xc0000000)) == 0)
716 {
717 pCpuId = &pCPUM->aGuestCpuIdExt[0];
718 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
719 i = UINT32_C(0x80000000);
720 }
721 else if ((i & UINT32_C(0xc0000000)) == UINT32_C(0x80000000))
722 {
723 pCpuId = &pCPUM->aGuestCpuIdCentaur[0];
724 cElements = RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
725 i = UINT32_C(0xc0000000);
726 }
727 else
728 break;
729 }
730
731 /* Check if PAE was explicitely enabled by the user. */
732 bool fEnable = false;
733 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable);
734 if (RT_SUCCESS(rc) && fEnable)
735 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
736
737 /*
738 * Log the cpuid and we're good.
739 */
740 RTCPUSET OnlineSet;
741 LogRel(("Logical host processors: %d, processor active mask: %016RX64\n",
742 (int)RTMpGetCount(), RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
743 LogRel(("************************* CPUID dump ************************\n"));
744 DBGFR3Info(pVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
745 LogRel(("\n"));
746 DBGFR3InfoLog(pVM, "cpuid", "verbose"); /* macro */
747 LogRel(("******************** End of CPUID dump **********************\n"));
748 return VINF_SUCCESS;
749}
750
751
752
753
754/**
755 * Applies relocations to data and code managed by this
756 * component. This function will be called at init and
757 * whenever the VMM need to relocate it self inside the GC.
758 *
759 * The CPUM will update the addresses used by the switcher.
760 *
761 * @param pVM The VM.
762 */
763VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
764{
765 LogFlow(("CPUMR3Relocate\n"));
766 for (VMCPUID i = 0; i < pVM->cCpus; i++)
767 {
768 /*
769 * Switcher pointers.
770 */
771 PVMCPU pVCpu = &pVM->aCpus[i];
772 pVCpu->cpum.s.pHyperCoreRC = MMHyperCCToRC(pVM, pVCpu->cpum.s.pHyperCoreR3);
773 Assert(pVCpu->cpum.s.pHyperCoreRC != NIL_RTRCPTR);
774 }
775}
776
777
778/**
779 * Terminates the CPUM.
780 *
781 * Termination means cleaning up and freeing all resources,
782 * the VM it self is at this point powered off or suspended.
783 *
784 * @returns VBox status code.
785 * @param pVM The VM to operate on.
786 */
787VMMR3DECL(int) CPUMR3Term(PVM pVM)
788{
789 CPUMR3TermCPU(pVM);
790 return 0;
791}
792
793
794/**
795 * Terminates the per-VCPU CPUM.
796 *
797 * Termination means cleaning up and freeing all resources,
798 * the VM it self is at this point powered off or suspended.
799 *
800 * @returns VBox status code.
801 * @param pVM The VM to operate on.
802 */
803VMMR3DECL(int) CPUMR3TermCPU(PVM pVM)
804{
805#ifdef VBOX_WITH_CRASHDUMP_MAGIC
806 for (VMCPUID i = 0; i < pVM->cCpus; i++)
807 {
808 PVMCPU pVCpu = &pVM->aCpus[i];
809 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
810
811 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
812 pVCpu->cpum.s.uMagic = 0;
813 pCtx->dr[5] = 0;
814 }
815#endif
816 return 0;
817}
818
819VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
820{
821 /* @todo anything different for VCPU > 0? */
822 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
823
824 /*
825 * Initialize everything to ZERO first.
826 */
827 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
828 memset(pCtx, 0, sizeof(*pCtx));
829 pVCpu->cpum.s.fUseFlags = fUseFlags;
830
831 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
832 pCtx->eip = 0x0000fff0;
833 pCtx->edx = 0x00000600; /* P6 processor */
834 pCtx->eflags.Bits.u1Reserved0 = 1;
835
836 pCtx->cs = 0xf000;
837 pCtx->csHid.u64Base = UINT64_C(0xffff0000);
838 pCtx->csHid.u32Limit = 0x0000ffff;
839 pCtx->csHid.Attr.n.u1DescType = 1; /* code/data segment */
840 pCtx->csHid.Attr.n.u1Present = 1;
841 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
842
843 pCtx->dsHid.u32Limit = 0x0000ffff;
844 pCtx->dsHid.Attr.n.u1DescType = 1; /* code/data segment */
845 pCtx->dsHid.Attr.n.u1Present = 1;
846 pCtx->dsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
847
848 pCtx->esHid.u32Limit = 0x0000ffff;
849 pCtx->esHid.Attr.n.u1DescType = 1; /* code/data segment */
850 pCtx->esHid.Attr.n.u1Present = 1;
851 pCtx->esHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
852
853 pCtx->fsHid.u32Limit = 0x0000ffff;
854 pCtx->fsHid.Attr.n.u1DescType = 1; /* code/data segment */
855 pCtx->fsHid.Attr.n.u1Present = 1;
856 pCtx->fsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
857
858 pCtx->gsHid.u32Limit = 0x0000ffff;
859 pCtx->gsHid.Attr.n.u1DescType = 1; /* code/data segment */
860 pCtx->gsHid.Attr.n.u1Present = 1;
861 pCtx->gsHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
862
863 pCtx->ssHid.u32Limit = 0x0000ffff;
864 pCtx->ssHid.Attr.n.u1Present = 1;
865 pCtx->ssHid.Attr.n.u1DescType = 1; /* code/data segment */
866 pCtx->ssHid.Attr.n.u4Type = X86_SEL_TYPE_RW;
867
868 pCtx->idtr.cbIdt = 0xffff;
869 pCtx->gdtr.cbGdt = 0xffff;
870
871 pCtx->ldtrHid.u32Limit = 0xffff;
872 pCtx->ldtrHid.Attr.n.u1Present = 1;
873 pCtx->ldtrHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
874
875 pCtx->trHid.u32Limit = 0xffff;
876 pCtx->trHid.Attr.n.u1Present = 1;
877 pCtx->trHid.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
878
879 pCtx->dr[6] = X86_DR6_INIT_VAL;
880 pCtx->dr[7] = X86_DR7_INIT_VAL;
881
882 pCtx->fpu.FTW = 0xff; /* All tags are set, i.e. the regs are empty. */
883 pCtx->fpu.FCW = 0x37f;
884
885 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1. IA-32 Processor States Following Power-up, Reset, or INIT */
886 pCtx->fpu.MXCSR = 0x1F80;
887
888 /* Init PAT MSR */
889 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
890
891 /* Reset EFER; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State
892 * The Intel docs don't mention it.
893 */
894 pCtx->msrEFER = 0;
895}
896
897/**
898 * Resets the CPU.
899 *
900 * @returns VINF_SUCCESS.
901 * @param pVM The VM handle.
902 */
903VMMR3DECL(void) CPUMR3Reset(PVM pVM)
904{
905 for (VMCPUID i = 0; i < pVM->cCpus; i++)
906 {
907 CPUMR3ResetCpu(&pVM->aCpus[i]);
908
909#ifdef VBOX_WITH_CRASHDUMP_MAGIC
910 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(&pVM->aCpus[i]);
911
912 /* Magic marker for searching in crash dumps. */
913 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
914 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
915 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
916#endif
917 }
918}
919
920#ifdef VBOX_WITH_LIVE_MIGRATION
921
922/**
923 * Called both in pass 0 and the final pass.
924 *
925 * @param pVM The VM handle.
926 * @param pSSM The saved state handle.
927 */
928static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
929{
930 /*
931 * Save all the CPU ID leaves here so we can check them for compatability
932 * upon loading.
933 */
934 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
935 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
936
937 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
938 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
939
940 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
941 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
942
943 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
944
945 /*
946 * Save a good portion of the raw CPU IDs as well as they may come in
947 * handy when validating features for raw mode.
948 */
949 CPUMCPUID aRawStd[8];
950 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
951 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
952 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
953 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
954
955 CPUMCPUID aRawExt[16];
956 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
957 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
958 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
959 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
960}
961
962
963/**
964 * Loads the CPU ID leaves saved by pass 0.
965 *
966 * @returns VBox status code.
967 * @param pVM The VM handle.
968 * @param pSSM The saved state handle.
969 * @param uVersion The format version.
970 */
971static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
972{
973 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
974
975 /*
976 * Load them into stack buffers first.
977 */
978 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
979 uint32_t cGuestCpuIdStd;
980 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
981 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
982 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
983 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
984
985 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
986 uint32_t cGuestCpuIdExt;
987 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
988 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
989 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
990 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
991
992 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
993 uint32_t cGuestCpuIdCentaur;
994 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
995 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
996 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
997 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
998
999 CPUMCPUID GuestCpuIdDef;
1000 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));
1001 AssertRCReturn(rc, rc);
1002
1003 CPUMCPUID aRawStd[8];
1004 uint32_t cRawStd;
1005 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
1006 if (cRawStd > RT_ELEMENTS(aRawStd))
1007 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1008 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
1009
1010 CPUMCPUID aRawExt[16];
1011 uint32_t cRawExt;
1012 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
1013 if (cRawExt > RT_ELEMENTS(aRawExt))
1014 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1015 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
1016 AssertRCReturn(rc, rc);
1017
1018 /*
1019 * Note that we support restoring less than the current amount of standard
1020 * leaves because we've been allowed more is newer version of VBox.
1021 *
1022 * So, pad new entries with the default.
1023 */
1024 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
1025 aGuestCpuIdStd[i] = GuestCpuIdDef;
1026
1027 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
1028 aGuestCpuIdExt[i] = GuestCpuIdDef;
1029
1030 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
1031 aGuestCpuIdCentaur[i] = GuestCpuIdDef;
1032
1033 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
1034 ASMCpuId(i, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
1035
1036 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
1037 ASMCpuId(i | UINT32_C(0x80000000), &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
1038
1039 /*
1040 * Get the raw CPU IDs for the current host.
1041 */
1042 CPUMCPUID aHostRawStd[8];
1043 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
1044 ASMCpuId(i, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
1045
1046 CPUMCPUID aHostRawExt[16];
1047 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
1048 ASMCpuId(i | UINT32_C(0x80000000), &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
1049
1050 /*
1051 * Now for the fun part...
1052 */
1053
1054
1055 /*
1056 * We're good, commit the CPU ID leaves.
1057 */
1058 memcmp(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd));
1059 memcmp(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt));
1060 memcmp(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
1061 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
1062
1063 return VINF_SUCCESS;
1064}
1065
1066
1067/**
1068 * Pass 0 live exec callback.
1069 *
1070 * @returns VINF_SSM_DONT_CALL_AGAIN.
1071 * @param pVM The VM handle.
1072 * @param pSSM The saved state handle.
1073 * @param uPass The pass (0).
1074 */
1075static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1076{
1077 AssertReturn(uPass == 0, VERR_INTERNAL_ERROR_4);
1078 cpumR3SaveCpuId(pVM, pSSM);
1079 return VINF_SSM_DONT_CALL_AGAIN;
1080}
1081
1082#endif /* VBOX_WITH_LIVE_MIGRATION */
1083
1084/**
1085 * Execute state save operation.
1086 *
1087 * @returns VBox status code.
1088 * @param pVM VM Handle.
1089 * @param pSSM SSM operation handle.
1090 */
1091static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
1092{
1093 /*
1094 * Save.
1095 */
1096 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1097 {
1098 PVMCPU pVCpu = &pVM->aCpus[i];
1099
1100 SSMR3PutMem(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper));
1101 }
1102
1103 SSMR3PutU32(pSSM, pVM->cCpus);
1104 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1105 {
1106 PVMCPU pVCpu = &pVM->aCpus[i];
1107
1108 SSMR3PutMem(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest));
1109 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
1110 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
1111 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsr, sizeof(pVCpu->cpum.s.GuestMsr));
1112 }
1113
1114#ifdef VBOX_WITH_LIVE_MIGRATION
1115 cpumR3SaveCpuId(pVM, pSSM);
1116 return VINF_SUCCESS;
1117#else
1118
1119 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));
1120 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));
1121
1122 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));
1123 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1124
1125 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));
1126 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1127
1128 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1129
1130 /* Add the cpuid for checking that the cpu is unchanged. */
1131 uint32_t au32CpuId[8] = {0};
1132 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1133 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1134 return SSMR3PutMem(pSSM, &au32CpuId[0], sizeof(au32CpuId));
1135#endif
1136}
1137
1138
1139/**
1140 * Load a version 1.6 CPUMCTX structure.
1141 *
1142 * @returns VBox status code.
1143 * @param pVM VM Handle.
1144 * @param pCpumctx16 Version 1.6 CPUMCTX
1145 */
1146static void cpumR3LoadCPUM1_6(PVM pVM, CPUMCTX_VER1_6 *pCpumctx16)
1147{
1148#define CPUMCTX16_LOADREG(RegName) \
1149 pVM->aCpus[0].cpum.s.Guest.RegName = pCpumctx16->RegName;
1150
1151#define CPUMCTX16_LOADDRXREG(RegName) \
1152 pVM->aCpus[0].cpum.s.Guest.dr[RegName] = pCpumctx16->dr##RegName;
1153
1154#define CPUMCTX16_LOADHIDREG(RegName) \
1155 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.u64Base = pCpumctx16->RegName##Hid.u32Base; \
1156 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.u32Limit = pCpumctx16->RegName##Hid.u32Limit; \
1157 pVM->aCpus[0].cpum.s.Guest.RegName##Hid.Attr = pCpumctx16->RegName##Hid.Attr;
1158
1159#define CPUMCTX16_LOADSEGREG(RegName) \
1160 pVM->aCpus[0].cpum.s.Guest.RegName = pCpumctx16->RegName; \
1161 CPUMCTX16_LOADHIDREG(RegName);
1162
1163 pVM->aCpus[0].cpum.s.Guest.fpu = pCpumctx16->fpu;
1164
1165 CPUMCTX16_LOADREG(rax);
1166 CPUMCTX16_LOADREG(rbx);
1167 CPUMCTX16_LOADREG(rcx);
1168 CPUMCTX16_LOADREG(rdx);
1169 CPUMCTX16_LOADREG(rdi);
1170 CPUMCTX16_LOADREG(rsi);
1171 CPUMCTX16_LOADREG(rbp);
1172 CPUMCTX16_LOADREG(esp);
1173 CPUMCTX16_LOADREG(rip);
1174 CPUMCTX16_LOADREG(rflags);
1175
1176 CPUMCTX16_LOADSEGREG(cs);
1177 CPUMCTX16_LOADSEGREG(ds);
1178 CPUMCTX16_LOADSEGREG(es);
1179 CPUMCTX16_LOADSEGREG(fs);
1180 CPUMCTX16_LOADSEGREG(gs);
1181 CPUMCTX16_LOADSEGREG(ss);
1182
1183 CPUMCTX16_LOADREG(r8);
1184 CPUMCTX16_LOADREG(r9);
1185 CPUMCTX16_LOADREG(r10);
1186 CPUMCTX16_LOADREG(r11);
1187 CPUMCTX16_LOADREG(r12);
1188 CPUMCTX16_LOADREG(r13);
1189 CPUMCTX16_LOADREG(r14);
1190 CPUMCTX16_LOADREG(r15);
1191
1192 CPUMCTX16_LOADREG(cr0);
1193 CPUMCTX16_LOADREG(cr2);
1194 CPUMCTX16_LOADREG(cr3);
1195 CPUMCTX16_LOADREG(cr4);
1196
1197 CPUMCTX16_LOADDRXREG(0);
1198 CPUMCTX16_LOADDRXREG(1);
1199 CPUMCTX16_LOADDRXREG(2);
1200 CPUMCTX16_LOADDRXREG(3);
1201 CPUMCTX16_LOADDRXREG(4);
1202 CPUMCTX16_LOADDRXREG(5);
1203 CPUMCTX16_LOADDRXREG(6);
1204 CPUMCTX16_LOADDRXREG(7);
1205
1206 pVM->aCpus[0].cpum.s.Guest.gdtr.cbGdt = pCpumctx16->gdtr.cbGdt;
1207 pVM->aCpus[0].cpum.s.Guest.gdtr.pGdt = pCpumctx16->gdtr.pGdt;
1208 pVM->aCpus[0].cpum.s.Guest.idtr.cbIdt = pCpumctx16->idtr.cbIdt;
1209 pVM->aCpus[0].cpum.s.Guest.idtr.pIdt = pCpumctx16->idtr.pIdt;
1210
1211 CPUMCTX16_LOADREG(ldtr);
1212 CPUMCTX16_LOADREG(tr);
1213
1214 pVM->aCpus[0].cpum.s.Guest.SysEnter = pCpumctx16->SysEnter;
1215
1216 CPUMCTX16_LOADREG(msrEFER);
1217 CPUMCTX16_LOADREG(msrSTAR);
1218 CPUMCTX16_LOADREG(msrPAT);
1219 CPUMCTX16_LOADREG(msrLSTAR);
1220 CPUMCTX16_LOADREG(msrCSTAR);
1221 CPUMCTX16_LOADREG(msrSFMASK);
1222 CPUMCTX16_LOADREG(msrKERNELGSBASE);
1223
1224 CPUMCTX16_LOADHIDREG(ldtr);
1225 CPUMCTX16_LOADHIDREG(tr);
1226
1227#undef CPUMCTX16_LOADSEGREG
1228#undef CPUMCTX16_LOADHIDREG
1229#undef CPUMCTX16_LOADDRXREG
1230#undef CPUMCTX16_LOADREG
1231}
1232
1233
1234/**
1235 * Execute state load operation.
1236 *
1237 * @returns VBox status code.
1238 * @param pVM VM Handle.
1239 * @param pSSM SSM operation handle.
1240 * @param uVersion Data layout version.
1241 * @param uPass The data pass.
1242 */
1243static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1244{
1245 /*
1246 * Validate version.
1247 */
1248 if ( uVersion != CPUM_SAVED_STATE_VERSION
1249 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
1250 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
1251 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1252 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
1253 {
1254 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
1255 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1256 }
1257
1258 if (uPass == SSM_PASS_FINAL)
1259 {
1260 /*
1261 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
1262 * really old SSM file versions.)
1263 */
1264 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1265 SSMR3SetGCPtrSize(pSSM, sizeof(RTGCPTR32));
1266 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
1267 SSMR3SetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
1268
1269 /*
1270 * Restore.
1271 */
1272 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1273 {
1274 PVMCPU pVCpu = &pVM->aCpus[i];
1275 uint32_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
1276 uint32_t uESP = pVCpu->cpum.s.Hyper.esp; /* see VMMR3Relocate(). */
1277
1278 SSMR3GetMem(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper));
1279 pVCpu->cpum.s.Hyper.cr3 = uCR3;
1280 pVCpu->cpum.s.Hyper.esp = uESP;
1281 }
1282
1283 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1284 {
1285 CPUMCTX_VER1_6 cpumctx16;
1286 memset(&pVM->aCpus[0].cpum.s.Guest, 0, sizeof(pVM->aCpus[0].cpum.s.Guest));
1287 SSMR3GetMem(pSSM, &cpumctx16, sizeof(cpumctx16));
1288
1289 /* Save the old cpumctx state into the new one. */
1290 cpumR3LoadCPUM1_6(pVM, &cpumctx16);
1291
1292 SSMR3GetU32(pSSM, &pVM->aCpus[0].cpum.s.fUseFlags);
1293 SSMR3GetU32(pSSM, &pVM->aCpus[0].cpum.s.fChanged);
1294 }
1295 else
1296 {
1297 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
1298 {
1299 uint32_t cCpus;
1300 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
1301 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
1302 VERR_SSM_UNEXPECTED_DATA);
1303 }
1304 AssertLogRelMsgReturn( uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1305 || pVM->cCpus == 1,
1306 ("cCpus=%u\n", pVM->cCpus),
1307 VERR_SSM_UNEXPECTED_DATA);
1308
1309 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1310 {
1311 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.Guest, sizeof(pVM->aCpus[i].cpum.s.Guest));
1312 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fUseFlags);
1313 SSMR3GetU32(pSSM, &pVM->aCpus[i].cpum.s.fChanged);
1314 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
1315 SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.GuestMsr, sizeof(pVM->aCpus[i].cpum.s.GuestMsr));
1316 }
1317 }
1318 }
1319
1320#ifdef VBOX_WITH_LIVE_MIGRATION
1321 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
1322 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
1323
1324 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
1325 * actually required. */
1326#endif
1327
1328 /*
1329 * Restore the CPUID leaves.
1330 *
1331 * Note that we support restoring less than the current amount of standard
1332 * leaves because we've been allowed more is newer version of VBox.
1333 */
1334 uint32_t cElements;
1335 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1336 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1337 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1338 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdStd[0]));
1339
1340 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1341 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1342 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1343 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));
1344
1345 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1346 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1347 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1348 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));
1349
1350 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));
1351
1352 /*
1353 * Check that the basic cpuid id information is unchanged.
1354 */
1355 /** @todo we should check the 64 bits capabilities too! */
1356 uint32_t au32CpuId[8] = {0};
1357 ASMCpuId(0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1358 ASMCpuId(1, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1359 uint32_t au32CpuIdSaved[8];
1360 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
1361 if (RT_SUCCESS(rc))
1362 {
1363 /* Ignore CPU stepping. */
1364 au32CpuId[4] &= 0xfffffff0;
1365 au32CpuIdSaved[4] &= 0xfffffff0;
1366
1367 /* Ignore APIC ID (AMD specs). */
1368 au32CpuId[5] &= ~0xff000000;
1369 au32CpuIdSaved[5] &= ~0xff000000;
1370
1371 /* Ignore the number of Logical CPUs (AMD specs). */
1372 au32CpuId[5] &= ~0x00ff0000;
1373 au32CpuIdSaved[5] &= ~0x00ff0000;
1374
1375 /* Ignore some advanced capability bits, that we don't expose to the guest. */
1376 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1377 | X86_CPUID_FEATURE_ECX_VMX
1378 | X86_CPUID_FEATURE_ECX_SMX
1379 | X86_CPUID_FEATURE_ECX_EST
1380 | X86_CPUID_FEATURE_ECX_TM2
1381 | X86_CPUID_FEATURE_ECX_CNTXID
1382 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1383 | X86_CPUID_FEATURE_ECX_PDCM
1384 | X86_CPUID_FEATURE_ECX_DCA
1385 | X86_CPUID_FEATURE_ECX_X2APIC
1386 );
1387 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1388 | X86_CPUID_FEATURE_ECX_VMX
1389 | X86_CPUID_FEATURE_ECX_SMX
1390 | X86_CPUID_FEATURE_ECX_EST
1391 | X86_CPUID_FEATURE_ECX_TM2
1392 | X86_CPUID_FEATURE_ECX_CNTXID
1393 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1394 | X86_CPUID_FEATURE_ECX_PDCM
1395 | X86_CPUID_FEATURE_ECX_DCA
1396 | X86_CPUID_FEATURE_ECX_X2APIC
1397 );
1398
1399 /* Make sure we don't forget to update the masks when enabling
1400 * features in the future.
1401 */
1402 AssertRelease(!(pVM->cpum.s.aGuestCpuIdStd[1].ecx &
1403 ( X86_CPUID_FEATURE_ECX_DTES64
1404 | X86_CPUID_FEATURE_ECX_VMX
1405 | X86_CPUID_FEATURE_ECX_SMX
1406 | X86_CPUID_FEATURE_ECX_EST
1407 | X86_CPUID_FEATURE_ECX_TM2
1408 | X86_CPUID_FEATURE_ECX_CNTXID
1409 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1410 | X86_CPUID_FEATURE_ECX_PDCM
1411 | X86_CPUID_FEATURE_ECX_DCA
1412 | X86_CPUID_FEATURE_ECX_X2APIC
1413 )));
1414 /* do the compare */
1415 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
1416 {
1417 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1418 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
1419 "Saved=%.*Rhxs\n"
1420 "Real =%.*Rhxs\n",
1421 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1422 sizeof(au32CpuId), au32CpuId));
1423 else
1424 {
1425 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
1426 "Saved=%.*Rhxs\n"
1427 "Real =%.*Rhxs\n",
1428 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1429 sizeof(au32CpuId), au32CpuId));
1430 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
1431 }
1432 }
1433 }
1434
1435 return rc;
1436}
1437
1438
1439/**
1440 * Formats the EFLAGS value into mnemonics.
1441 *
1442 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
1443 * @param efl The EFLAGS value.
1444 */
1445static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
1446{
1447 /*
1448 * Format the flags.
1449 */
1450 static const struct
1451 {
1452 const char *pszSet; const char *pszClear; uint32_t fFlag;
1453 } s_aFlags[] =
1454 {
1455 { "vip",NULL, X86_EFL_VIP },
1456 { "vif",NULL, X86_EFL_VIF },
1457 { "ac", NULL, X86_EFL_AC },
1458 { "vm", NULL, X86_EFL_VM },
1459 { "rf", NULL, X86_EFL_RF },
1460 { "nt", NULL, X86_EFL_NT },
1461 { "ov", "nv", X86_EFL_OF },
1462 { "dn", "up", X86_EFL_DF },
1463 { "ei", "di", X86_EFL_IF },
1464 { "tf", NULL, X86_EFL_TF },
1465 { "nt", "pl", X86_EFL_SF },
1466 { "nz", "zr", X86_EFL_ZF },
1467 { "ac", "na", X86_EFL_AF },
1468 { "po", "pe", X86_EFL_PF },
1469 { "cy", "nc", X86_EFL_CF },
1470 };
1471 char *psz = pszEFlags;
1472 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1473 {
1474 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1475 if (pszAdd)
1476 {
1477 strcpy(psz, pszAdd);
1478 psz += strlen(pszAdd);
1479 *psz++ = ' ';
1480 }
1481 }
1482 psz[-1] = '\0';
1483}
1484
1485
1486/**
1487 * Formats a full register dump.
1488 *
1489 * @param pVM VM Handle.
1490 * @param pCtx The context to format.
1491 * @param pCtxCore The context core to format.
1492 * @param pHlp Output functions.
1493 * @param enmType The dump type.
1494 * @param pszPrefix Register name prefix.
1495 */
1496static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)
1497{
1498 /*
1499 * Format the EFLAGS.
1500 */
1501 uint32_t efl = pCtxCore->eflags.u32;
1502 char szEFlags[80];
1503 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1504
1505 /*
1506 * Format the registers.
1507 */
1508 switch (enmType)
1509 {
1510 case CPUMDUMPTYPE_TERSE:
1511 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1512 pHlp->pfnPrintf(pHlp,
1513 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1514 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1515 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1516 "%sr14=%016RX64 %sr15=%016RX64\n"
1517 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1518 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1519 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1520 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1521 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1522 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1523 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1524 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1525 else
1526 pHlp->pfnPrintf(pHlp,
1527 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1528 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1529 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1530 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1531 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1532 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1533 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, efl);
1534 break;
1535
1536 case CPUMDUMPTYPE_DEFAULT:
1537 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1538 pHlp->pfnPrintf(pHlp,
1539 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1540 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1541 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1542 "%sr14=%016RX64 %sr15=%016RX64\n"
1543 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1544 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1545 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
1546 ,
1547 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1548 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1549 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1550 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1551 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1552 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1553 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1554 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1555 else
1556 pHlp->pfnPrintf(pHlp,
1557 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1558 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1559 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1560 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
1561 ,
1562 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1563 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1564 pszPrefix, (RTSEL)pCtxCore->cs, pszPrefix, (RTSEL)pCtxCore->ss, pszPrefix, (RTSEL)pCtxCore->ds, pszPrefix, (RTSEL)pCtxCore->es,
1565 pszPrefix, (RTSEL)pCtxCore->fs, pszPrefix, (RTSEL)pCtxCore->gs, pszPrefix, (RTSEL)pCtx->tr, pszPrefix, efl,
1566 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1567 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, (RTSEL)pCtx->ldtr);
1568 break;
1569
1570 case CPUMDUMPTYPE_VERBOSE:
1571 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1572 pHlp->pfnPrintf(pHlp,
1573 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1574 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1575 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1576 "%sr14=%016RX64 %sr15=%016RX64\n"
1577 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1578 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1579 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1580 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1581 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1582 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1583 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1584 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1585 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1586 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1587 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1588 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1589 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1590 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1591 ,
1592 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1593 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1594 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1595 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1596 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1597 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1598 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1599 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1600 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1601 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1602 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1603 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1604 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1605 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1606 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1607 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1608 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1609 else
1610 pHlp->pfnPrintf(pHlp,
1611 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1612 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1613 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1614 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1615 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1616 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1617 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1618 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1619 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1620 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1621 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1622 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1623 ,
1624 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1625 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1626 pszPrefix, (RTSEL)pCtxCore->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
1627 pszPrefix, (RTSEL)pCtxCore->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1628 pszPrefix, (RTSEL)pCtxCore->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
1629 pszPrefix, (RTSEL)pCtxCore->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1630 pszPrefix, (RTSEL)pCtxCore->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1631 pszPrefix, (RTSEL)pCtxCore->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1632 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1633 pszPrefix, (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1634 pszPrefix, (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1635 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1636
1637 pHlp->pfnPrintf(pHlp,
1638 "FPU:\n"
1639 "%sFCW=%04x %sFSW=%04x %sFTW=%02x\n"
1640 "%sres1=%02x %sFOP=%04x %sFPUIP=%08x %sCS=%04x %sRsvrd1=%04x\n"
1641 "%sFPUDP=%04x %sDS=%04x %sRsvrd2=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1642 ,
1643 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW,
1644 pszPrefix, pCtx->fpu.huh1, pszPrefix, pCtx->fpu.FOP, pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsvrd1,
1645 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2,
1646 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK);
1647
1648 pHlp->pfnPrintf(pHlp,
1649 "MSR:\n"
1650 "%sEFER =%016RX64\n"
1651 "%sPAT =%016RX64\n"
1652 "%sSTAR =%016RX64\n"
1653 "%sCSTAR =%016RX64\n"
1654 "%sLSTAR =%016RX64\n"
1655 "%sSFMASK =%016RX64\n"
1656 "%sKERNELGSBASE =%016RX64\n",
1657 pszPrefix, pCtx->msrEFER,
1658 pszPrefix, pCtx->msrPAT,
1659 pszPrefix, pCtx->msrSTAR,
1660 pszPrefix, pCtx->msrCSTAR,
1661 pszPrefix, pCtx->msrLSTAR,
1662 pszPrefix, pCtx->msrSFMASK,
1663 pszPrefix, pCtx->msrKERNELGSBASE);
1664 break;
1665 }
1666}
1667
1668
1669/**
1670 * Display all cpu states and any other cpum info.
1671 *
1672 * @param pVM VM Handle.
1673 * @param pHlp The info helper functions.
1674 * @param pszArgs Arguments, ignored.
1675 */
1676static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1677{
1678 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1679 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1680 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1681 cpumR3InfoHost(pVM, pHlp, pszArgs);
1682}
1683
1684
1685/**
1686 * Parses the info argument.
1687 *
1688 * The argument starts with 'verbose', 'terse' or 'default' and then
1689 * continues with the comment string.
1690 *
1691 * @param pszArgs The pointer to the argument string.
1692 * @param penmType Where to store the dump type request.
1693 * @param ppszComment Where to store the pointer to the comment string.
1694 */
1695static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1696{
1697 if (!pszArgs)
1698 {
1699 *penmType = CPUMDUMPTYPE_DEFAULT;
1700 *ppszComment = "";
1701 }
1702 else
1703 {
1704 if (!strncmp(pszArgs, "verbose", sizeof("verbose") - 1))
1705 {
1706 pszArgs += 5;
1707 *penmType = CPUMDUMPTYPE_VERBOSE;
1708 }
1709 else if (!strncmp(pszArgs, "terse", sizeof("terse") - 1))
1710 {
1711 pszArgs += 5;
1712 *penmType = CPUMDUMPTYPE_TERSE;
1713 }
1714 else if (!strncmp(pszArgs, "default", sizeof("default") - 1))
1715 {
1716 pszArgs += 7;
1717 *penmType = CPUMDUMPTYPE_DEFAULT;
1718 }
1719 else
1720 *penmType = CPUMDUMPTYPE_DEFAULT;
1721 *ppszComment = RTStrStripL(pszArgs);
1722 }
1723}
1724
1725
1726/**
1727 * Display the guest cpu state.
1728 *
1729 * @param pVM VM Handle.
1730 * @param pHlp The info helper functions.
1731 * @param pszArgs Arguments, ignored.
1732 */
1733static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1734{
1735 CPUMDUMPTYPE enmType;
1736 const char *pszComment;
1737 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1738
1739 /* @todo SMP support! */
1740 PVMCPU pVCpu = VMMGetCpu(pVM);
1741 if (!pVCpu)
1742 pVCpu = &pVM->aCpus[0];
1743
1744 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1745
1746 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1747 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
1748}
1749
1750
1751/**
1752 * Display the current guest instruction
1753 *
1754 * @param pVM VM Handle.
1755 * @param pHlp The info helper functions.
1756 * @param pszArgs Arguments, ignored.
1757 */
1758static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1759{
1760 char szInstruction[256];
1761 /* @todo SMP support! */
1762 PVMCPU pVCpu = VMMGetCpu(pVM);
1763 if (!pVCpu)
1764 pVCpu = &pVM->aCpus[0];
1765
1766 int rc = DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1767 if (RT_SUCCESS(rc))
1768 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1769}
1770
1771
1772/**
1773 * Display the hypervisor cpu state.
1774 *
1775 * @param pVM VM Handle.
1776 * @param pHlp The info helper functions.
1777 * @param pszArgs Arguments, ignored.
1778 */
1779static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1780{
1781 CPUMDUMPTYPE enmType;
1782 const char *pszComment;
1783 /* @todo SMP */
1784 PVMCPU pVCpu = &pVM->aCpus[0];
1785
1786 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1787 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1788 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, pVCpu->cpum.s.pHyperCoreR3, pHlp, enmType, ".");
1789 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1790}
1791
1792
1793/**
1794 * Display the host cpu state.
1795 *
1796 * @param pVM VM Handle.
1797 * @param pHlp The info helper functions.
1798 * @param pszArgs Arguments, ignored.
1799 */
1800static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1801{
1802 CPUMDUMPTYPE enmType;
1803 const char *pszComment;
1804 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1805 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1806
1807 /*
1808 * Format the EFLAGS.
1809 */
1810 /* @todo SMP */
1811 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
1812#if HC_ARCH_BITS == 32
1813 uint32_t efl = pCtx->eflags.u32;
1814#else
1815 uint64_t efl = pCtx->rflags;
1816#endif
1817 char szEFlags[80];
1818 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1819
1820 /*
1821 * Format the registers.
1822 */
1823#if HC_ARCH_BITS == 32
1824# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1825 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1826# endif
1827 {
1828 pHlp->pfnPrintf(pHlp,
1829 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1830 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1831 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1832 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1833 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
1834 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1835 ,
1836 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1837 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1838 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1839 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1840 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1841 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr,
1842 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1843 }
1844# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1845 else
1846# endif
1847#endif
1848#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1849 {
1850 pHlp->pfnPrintf(pHlp,
1851 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1852 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1853 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1854 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1855 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1856 "r14=%016RX64 r15=%016RX64\n"
1857 "iopl=%d %31s\n"
1858 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1859 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1860 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1861 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
1862 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
1863 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1864 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1865 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1866 ,
1867 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1868 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1869 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1870 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1871 pCtx->r11, pCtx->r12, pCtx->r13,
1872 pCtx->r14, pCtx->r15,
1873 X86_EFL_GET_IOPL(efl), szEFlags,
1874 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl,
1875 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1876 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1877 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1878 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1879 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1880 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1881 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1882 }
1883#endif
1884}
1885
1886
1887/**
1888 * Get L1 cache / TLS associativity.
1889 */
1890static const char *getCacheAss(unsigned u, char *pszBuf)
1891{
1892 if (u == 0)
1893 return "res0 ";
1894 if (u == 1)
1895 return "direct";
1896 if (u >= 256)
1897 return "???";
1898
1899 RTStrPrintf(pszBuf, 16, "%d way", u);
1900 return pszBuf;
1901}
1902
1903
1904/**
1905 * Get L2 cache soociativity.
1906 */
1907const char *getL2CacheAss(unsigned u)
1908{
1909 switch (u)
1910 {
1911 case 0: return "off ";
1912 case 1: return "direct";
1913 case 2: return "2 way ";
1914 case 3: return "res3 ";
1915 case 4: return "4 way ";
1916 case 5: return "res5 ";
1917 case 6: return "8 way "; case 7: return "res7 ";
1918 case 8: return "16 way";
1919 case 9: return "res9 ";
1920 case 10: return "res10 ";
1921 case 11: return "res11 ";
1922 case 12: return "res12 ";
1923 case 13: return "res13 ";
1924 case 14: return "res14 ";
1925 case 15: return "fully ";
1926 default:
1927 return "????";
1928 }
1929}
1930
1931
1932/**
1933 * Display the guest CpuId leaves.
1934 *
1935 * @param pVM VM Handle.
1936 * @param pHlp The info helper functions.
1937 * @param pszArgs "terse", "default" or "verbose".
1938 */
1939static DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1940{
1941 /*
1942 * Parse the argument.
1943 */
1944 unsigned iVerbosity = 1;
1945 if (pszArgs)
1946 {
1947 pszArgs = RTStrStripL(pszArgs);
1948 if (!strcmp(pszArgs, "terse"))
1949 iVerbosity--;
1950 else if (!strcmp(pszArgs, "verbose"))
1951 iVerbosity++;
1952 }
1953
1954 /*
1955 * Start cracking.
1956 */
1957 CPUMCPUID Host;
1958 CPUMCPUID Guest;
1959 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdStd[0].eax;
1960
1961 pHlp->pfnPrintf(pHlp,
1962 " RAW Standard CPUIDs\n"
1963 " Function eax ebx ecx edx\n");
1964 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd); i++)
1965 {
1966 Guest = pVM->cpum.s.aGuestCpuIdStd[i];
1967 ASMCpuId_Idx_ECX(i, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
1968
1969 pHlp->pfnPrintf(pHlp,
1970 "Gst: %08x %08x %08x %08x %08x%s\n"
1971 "Hst: %08x %08x %08x %08x\n",
1972 i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
1973 i <= cStdMax ? "" : "*",
1974 Host.eax, Host.ebx, Host.ecx, Host.edx);
1975 }
1976
1977 /*
1978 * If verbose, decode it.
1979 */
1980 if (iVerbosity)
1981 {
1982 Guest = pVM->cpum.s.aGuestCpuIdStd[0];
1983 pHlp->pfnPrintf(pHlp,
1984 "Name: %.04s%.04s%.04s\n"
1985 "Supports: 0-%x\n",
1986 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
1987 }
1988
1989 /*
1990 * Get Features.
1991 */
1992 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdStd[0].ebx,
1993 pVM->cpum.s.aGuestCpuIdStd[0].ecx,
1994 pVM->cpum.s.aGuestCpuIdStd[0].edx);
1995 if (cStdMax >= 1 && iVerbosity)
1996 {
1997 Guest = pVM->cpum.s.aGuestCpuIdStd[1];
1998 uint32_t uEAX = Guest.eax;
1999
2000 pHlp->pfnPrintf(pHlp,
2001 "Family: %d \tExtended: %d \tEffective: %d\n"
2002 "Model: %d \tExtended: %d \tEffective: %d\n"
2003 "Stepping: %d\n"
2004 "APIC ID: %#04x\n"
2005 "Logical CPUs: %d\n"
2006 "CLFLUSH Size: %d\n"
2007 "Brand ID: %#04x\n",
2008 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
2009 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
2010 ASMGetCpuStepping(uEAX),
2011 (Guest.ebx >> 24) & 0xff,
2012 (Guest.ebx >> 16) & 0xff,
2013 (Guest.ebx >> 8) & 0xff,
2014 (Guest.ebx >> 0) & 0xff);
2015 if (iVerbosity == 1)
2016 {
2017 uint32_t uEDX = Guest.edx;
2018 pHlp->pfnPrintf(pHlp, "Features EDX: ");
2019 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
2020 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
2021 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
2022 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
2023 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
2024 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
2025 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
2026 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
2027 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
2028 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
2029 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
2030 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SEP");
2031 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
2032 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
2033 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
2034 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
2035 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
2036 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
2037 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " PSN");
2038 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " CLFSH");
2039 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " 20");
2040 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " DS");
2041 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ACPI");
2042 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
2043 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
2044 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " SSE");
2045 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " SSE2");
2046 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " SS");
2047 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " HTT");
2048 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " TM");
2049 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " 30");
2050 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " PBE");
2051 pHlp->pfnPrintf(pHlp, "\n");
2052
2053 uint32_t uECX = Guest.ecx;
2054 pHlp->pfnPrintf(pHlp, "Features ECX: ");
2055 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3");
2056 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " 1");
2057 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " 2");
2058 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " MONITOR");
2059 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " DS-CPL");
2060 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " VMX");
2061 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " 6");
2062 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " EST");
2063 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " TM2");
2064 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " 9");
2065 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " CNXT-ID");
2066 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " 11");
2067 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " 12");
2068 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " CX16");
2069 for (unsigned iBit = 14; iBit < 32; iBit++)
2070 if (uECX & RT_BIT(iBit))
2071 pHlp->pfnPrintf(pHlp, " %d", iBit);
2072 pHlp->pfnPrintf(pHlp, "\n");
2073 }
2074 else
2075 {
2076 ASMCpuId(1, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2077
2078 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.edx;
2079 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.ecx;
2080 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
2081 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
2082
2083 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2084 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", EdxGuest.u1FPU, EdxHost.u1FPU);
2085 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", EdxGuest.u1VME, EdxHost.u1VME);
2086 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", EdxGuest.u1DE, EdxHost.u1DE);
2087 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", EdxGuest.u1PSE, EdxHost.u1PSE);
2088 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", EdxGuest.u1TSC, EdxHost.u1TSC);
2089 pHlp->pfnPrintf(pHlp, "MSR - Model Specific Registers = %d (%d)\n", EdxGuest.u1MSR, EdxHost.u1MSR);
2090 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", EdxGuest.u1PAE, EdxHost.u1PAE);
2091 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", EdxGuest.u1MCE, EdxHost.u1MCE);
2092 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", EdxGuest.u1CX8, EdxHost.u1CX8);
2093 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", EdxGuest.u1APIC, EdxHost.u1APIC);
2094 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved1, EdxHost.u1Reserved1);
2095 pHlp->pfnPrintf(pHlp, "SEP - SYSENTER and SYSEXIT = %d (%d)\n", EdxGuest.u1SEP, EdxHost.u1SEP);
2096 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", EdxGuest.u1MTRR, EdxHost.u1MTRR);
2097 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", EdxGuest.u1PGE, EdxHost.u1PGE);
2098 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", EdxGuest.u1MCA, EdxHost.u1MCA);
2099 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", EdxGuest.u1CMOV, EdxHost.u1CMOV);
2100 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", EdxGuest.u1PAT, EdxHost.u1PAT);
2101 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", EdxGuest.u1PSE36, EdxHost.u1PSE36);
2102 pHlp->pfnPrintf(pHlp, "PSN - Processor Serial Number = %d (%d)\n", EdxGuest.u1PSN, EdxHost.u1PSN);
2103 pHlp->pfnPrintf(pHlp, "CLFSH - CLFLUSH Instruction. = %d (%d)\n", EdxGuest.u1CLFSH, EdxHost.u1CLFSH);
2104 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EdxGuest.u1Reserved2, EdxHost.u1Reserved2);
2105 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", EdxGuest.u1DS, EdxHost.u1DS);
2106 pHlp->pfnPrintf(pHlp, "ACPI - Thermal Mon. & Soft. Clock Ctrl.= %d (%d)\n", EdxGuest.u1ACPI, EdxHost.u1ACPI);
2107 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", EdxGuest.u1MMX, EdxHost.u1MMX);
2108 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", EdxGuest.u1FXSR, EdxHost.u1FXSR);
2109 pHlp->pfnPrintf(pHlp, "SSE - SSE Support = %d (%d)\n", EdxGuest.u1SSE, EdxHost.u1SSE);
2110 pHlp->pfnPrintf(pHlp, "SSE2 - SSE2 Support = %d (%d)\n", EdxGuest.u1SSE2, EdxHost.u1SSE2);
2111 pHlp->pfnPrintf(pHlp, "SS - Self Snoop = %d (%d)\n", EdxGuest.u1SS, EdxHost.u1SS);
2112 pHlp->pfnPrintf(pHlp, "HTT - Hyper-Threading Technolog = %d (%d)\n", EdxGuest.u1HTT, EdxHost.u1HTT);
2113 pHlp->pfnPrintf(pHlp, "TM - Thermal Monitor = %d (%d)\n", EdxGuest.u1TM, EdxHost.u1TM);
2114 pHlp->pfnPrintf(pHlp, "30 - Reserved = %d (%d)\n", EdxGuest.u1Reserved3, EdxHost.u1Reserved3);
2115 pHlp->pfnPrintf(pHlp, "PBE - Pending Break Enable = %d (%d)\n", EdxGuest.u1PBE, EdxHost.u1PBE);
2116
2117 pHlp->pfnPrintf(pHlp, "Supports SSE3 or not = %d (%d)\n", EcxGuest.u1SSE3, EcxHost.u1SSE3);
2118 pHlp->pfnPrintf(pHlp, "Reserved = %d (%d)\n", EcxGuest.u1Reserved1, EcxHost.u1Reserved1);
2119 pHlp->pfnPrintf(pHlp, "DS Area 64-bit layout = %d (%d)\n", EcxGuest.u1DTE64, EcxHost.u1DTE64);
2120 pHlp->pfnPrintf(pHlp, "Supports MONITOR/MWAIT = %d (%d)\n", EcxGuest.u1Monitor, EcxHost.u1Monitor);
2121 pHlp->pfnPrintf(pHlp, "CPL-DS - CPL Qualified Debug Store = %d (%d)\n", EcxGuest.u1CPLDS, EcxHost.u1CPLDS);
2122 pHlp->pfnPrintf(pHlp, "VMX - Virtual Machine Technology = %d (%d)\n", EcxGuest.u1VMX, EcxHost.u1VMX);
2123 pHlp->pfnPrintf(pHlp, "SMX - Safer Mode Extensions = %d (%d)\n", EcxGuest.u1SMX, EcxHost.u1SMX);
2124 pHlp->pfnPrintf(pHlp, "Enhanced SpeedStep Technology = %d (%d)\n", EcxGuest.u1EST, EcxHost.u1EST);
2125 pHlp->pfnPrintf(pHlp, "Terminal Monitor 2 = %d (%d)\n", EcxGuest.u1TM2, EcxHost.u1TM2);
2126 pHlp->pfnPrintf(pHlp, "Supports Supplemental SSE3 or not = %d (%d)\n", EcxGuest.u1SSSE3, EcxHost.u1SSSE3);
2127 pHlp->pfnPrintf(pHlp, "L1 Context ID = %d (%d)\n", EcxGuest.u1CNTXID, EcxHost.u1CNTXID);
2128 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved2, EcxHost.u2Reserved2);
2129 pHlp->pfnPrintf(pHlp, "CMPXCHG16B = %d (%d)\n", EcxGuest.u1CX16, EcxHost.u1CX16);
2130 pHlp->pfnPrintf(pHlp, "xTPR Update Control = %d (%d)\n", EcxGuest.u1TPRUpdate, EcxHost.u1TPRUpdate);
2131 pHlp->pfnPrintf(pHlp, "Perf/Debug Capability MSR = %d (%d)\n", EcxGuest.u1PDCM, EcxHost.u1PDCM);
2132 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved3, EcxHost.u2Reserved3);
2133 pHlp->pfnPrintf(pHlp, "Direct Cache Access = %d (%d)\n", EcxGuest.u1DCA, EcxHost.u1DCA);
2134 pHlp->pfnPrintf(pHlp, "Supports SSE4_1 or not = %d (%d)\n", EcxGuest.u1SSE4_1, EcxHost.u1SSE4_1);
2135 pHlp->pfnPrintf(pHlp, "Supports SSE4_2 or not = %d (%d)\n", EcxGuest.u1SSE4_2, EcxHost.u1SSE4_2);
2136 pHlp->pfnPrintf(pHlp, "Supports the x2APIC extensions = %d (%d)\n", EcxGuest.u1x2APIC, EcxHost.u1x2APIC);
2137 pHlp->pfnPrintf(pHlp, "Supports MOVBE = %d (%d)\n", EcxGuest.u1MOVBE, EcxHost.u1MOVBE);
2138 pHlp->pfnPrintf(pHlp, "Supports POPCNT = %d (%d)\n", EcxGuest.u1POPCNT, EcxHost.u1POPCNT);
2139 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u2Reserved4, EcxHost.u2Reserved4);
2140 pHlp->pfnPrintf(pHlp, "Supports XSAVE = %d (%d)\n", EcxGuest.u1XSAVE, EcxHost.u1XSAVE);
2141 pHlp->pfnPrintf(pHlp, "Supports OSXSAVE = %d (%d)\n", EcxGuest.u1OSXSAVE, EcxHost.u1OSXSAVE);
2142 pHlp->pfnPrintf(pHlp, "Reserved = %#x (%#x)\n",EcxGuest.u4Reserved5, EcxHost.u4Reserved5);
2143 }
2144 }
2145 if (cStdMax >= 2 && iVerbosity)
2146 {
2147 /** @todo */
2148 }
2149
2150 /*
2151 * Extended.
2152 * Implemented after AMD specs.
2153 */
2154 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdExt[0].eax & 0xffff;
2155
2156 pHlp->pfnPrintf(pHlp,
2157 "\n"
2158 " RAW Extended CPUIDs\n"
2159 " Function eax ebx ecx edx\n");
2160 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt); i++)
2161 {
2162 Guest = pVM->cpum.s.aGuestCpuIdExt[i];
2163 ASMCpuId(0x80000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2164
2165 pHlp->pfnPrintf(pHlp,
2166 "Gst: %08x %08x %08x %08x %08x%s\n"
2167 "Hst: %08x %08x %08x %08x\n",
2168 0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2169 i <= cExtMax ? "" : "*",
2170 Host.eax, Host.ebx, Host.ecx, Host.edx);
2171 }
2172
2173 /*
2174 * Understandable output
2175 */
2176 if (iVerbosity)
2177 {
2178 Guest = pVM->cpum.s.aGuestCpuIdExt[0];
2179 pHlp->pfnPrintf(pHlp,
2180 "Ext Name: %.4s%.4s%.4s\n"
2181 "Ext Supports: 0x80000000-%#010x\n",
2182 &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
2183 }
2184
2185 if (iVerbosity && cExtMax >= 1)
2186 {
2187 Guest = pVM->cpum.s.aGuestCpuIdExt[1];
2188 uint32_t uEAX = Guest.eax;
2189 pHlp->pfnPrintf(pHlp,
2190 "Family: %d \tExtended: %d \tEffective: %d\n"
2191 "Model: %d \tExtended: %d \tEffective: %d\n"
2192 "Stepping: %d\n"
2193 "Brand ID: %#05x\n",
2194 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, ASMGetCpuFamily(uEAX),
2195 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
2196 ASMGetCpuStepping(uEAX),
2197 Guest.ebx & 0xfff);
2198
2199 if (iVerbosity == 1)
2200 {
2201 uint32_t uEDX = Guest.edx;
2202 pHlp->pfnPrintf(pHlp, "Features EDX: ");
2203 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU");
2204 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " VME");
2205 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " DE");
2206 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " PSE");
2207 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TSC");
2208 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " MSR");
2209 if (uEDX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " PAE");
2210 if (uEDX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MCE");
2211 if (uEDX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " CX8");
2212 if (uEDX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " APIC");
2213 if (uEDX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " 10");
2214 if (uEDX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SCR");
2215 if (uEDX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " MTRR");
2216 if (uEDX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PGE");
2217 if (uEDX & RT_BIT(14)) pHlp->pfnPrintf(pHlp, " MCA");
2218 if (uEDX & RT_BIT(15)) pHlp->pfnPrintf(pHlp, " CMOV");
2219 if (uEDX & RT_BIT(16)) pHlp->pfnPrintf(pHlp, " PAT");
2220 if (uEDX & RT_BIT(17)) pHlp->pfnPrintf(pHlp, " PSE36");
2221 if (uEDX & RT_BIT(18)) pHlp->pfnPrintf(pHlp, " 18");
2222 if (uEDX & RT_BIT(19)) pHlp->pfnPrintf(pHlp, " 19");
2223 if (uEDX & RT_BIT(20)) pHlp->pfnPrintf(pHlp, " NX");
2224 if (uEDX & RT_BIT(21)) pHlp->pfnPrintf(pHlp, " 21");
2225 if (uEDX & RT_BIT(22)) pHlp->pfnPrintf(pHlp, " ExtMMX");
2226 if (uEDX & RT_BIT(23)) pHlp->pfnPrintf(pHlp, " MMX");
2227 if (uEDX & RT_BIT(24)) pHlp->pfnPrintf(pHlp, " FXSR");
2228 if (uEDX & RT_BIT(25)) pHlp->pfnPrintf(pHlp, " FastFXSR");
2229 if (uEDX & RT_BIT(26)) pHlp->pfnPrintf(pHlp, " Page1GB");
2230 if (uEDX & RT_BIT(27)) pHlp->pfnPrintf(pHlp, " RDTSCP");
2231 if (uEDX & RT_BIT(28)) pHlp->pfnPrintf(pHlp, " 28");
2232 if (uEDX & RT_BIT(29)) pHlp->pfnPrintf(pHlp, " LongMode");
2233 if (uEDX & RT_BIT(30)) pHlp->pfnPrintf(pHlp, " Ext3DNow");
2234 if (uEDX & RT_BIT(31)) pHlp->pfnPrintf(pHlp, " 3DNow");
2235 pHlp->pfnPrintf(pHlp, "\n");
2236
2237 uint32_t uECX = Guest.ecx;
2238 pHlp->pfnPrintf(pHlp, "Features ECX: ");
2239 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
2240 if (uECX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " CMPL");
2241 if (uECX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " SVM");
2242 if (uECX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " ExtAPIC");
2243 if (uECX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " CR8L");
2244 if (uECX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " ABM");
2245 if (uECX & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " SSE4A");
2246 if (uECX & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " MISALNSSE");
2247 if (uECX & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " 3DNOWPRF");
2248 if (uECX & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " OSVW");
2249 if (uECX & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " IBS");
2250 if (uECX & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " SSE5");
2251 if (uECX & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " SKINIT");
2252 if (uECX & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " WDT");
2253 for (unsigned iBit = 5; iBit < 32; iBit++)
2254 if (uECX & RT_BIT(iBit))
2255 pHlp->pfnPrintf(pHlp, " %d", iBit);
2256 pHlp->pfnPrintf(pHlp, "\n");
2257 }
2258 else
2259 {
2260 ASMCpuId(0x80000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2261
2262 uint32_t uEdxGst = Guest.edx;
2263 uint32_t uEdxHst = Host.edx;
2264 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2265 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2266 pHlp->pfnPrintf(pHlp, "VME - Virtual 8086 Mode Enhancements = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2267 pHlp->pfnPrintf(pHlp, "DE - Debugging extensions = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2268 pHlp->pfnPrintf(pHlp, "PSE - Page Size Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2269 pHlp->pfnPrintf(pHlp, "TSC - Time Stamp Counter = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2270 pHlp->pfnPrintf(pHlp, "MSR - K86 Model Specific Registers = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2271 pHlp->pfnPrintf(pHlp, "PAE - Physical Address Extension = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2272 pHlp->pfnPrintf(pHlp, "MCE - Machine Check Exception = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2273 pHlp->pfnPrintf(pHlp, "CX8 - CMPXCHG8B instruction = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2274 pHlp->pfnPrintf(pHlp, "APIC - APIC On-Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2275 pHlp->pfnPrintf(pHlp, "10 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2276 pHlp->pfnPrintf(pHlp, "SEP - SYSCALL and SYSRET = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2277 pHlp->pfnPrintf(pHlp, "MTRR - Memory Type Range Registers = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2278 pHlp->pfnPrintf(pHlp, "PGE - PTE Global Bit = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2279 pHlp->pfnPrintf(pHlp, "MCA - Machine Check Architecture = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
2280 pHlp->pfnPrintf(pHlp, "CMOV - Conditional Move Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
2281 pHlp->pfnPrintf(pHlp, "PAT - Page Attribute Table = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
2282 pHlp->pfnPrintf(pHlp, "PSE-36 - 36-bit Page Size Extention = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
2283 pHlp->pfnPrintf(pHlp, "18 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
2284 pHlp->pfnPrintf(pHlp, "19 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
2285 pHlp->pfnPrintf(pHlp, "NX - No-Execute Page Protection = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
2286 pHlp->pfnPrintf(pHlp, "DS - Debug Store = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
2287 pHlp->pfnPrintf(pHlp, "AXMMX - AMD Extensions to MMX Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
2288 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
2289 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
2290 pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
2291 pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
2292 pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));
2293 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28)));
2294 pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));
2295 pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));
2296 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));
2297
2298 uint32_t uEcxGst = Guest.ecx;
2299 uint32_t uEcxHst = Host.ecx;
2300 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0)));
2301 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1)));
2302 pHlp->pfnPrintf(pHlp, "SVM - AMD VM Extensions = %d (%d)\n", !!(uEcxGst & RT_BIT( 2)), !!(uEcxHst & RT_BIT( 2)));
2303 pHlp->pfnPrintf(pHlp, "APIC registers starting at 0x400 = %d (%d)\n", !!(uEcxGst & RT_BIT( 3)), !!(uEcxHst & RT_BIT( 3)));
2304 pHlp->pfnPrintf(pHlp, "AltMovCR8 - LOCK MOV CR0 means MOV CR8 = %d (%d)\n", !!(uEcxGst & RT_BIT( 4)), !!(uEcxHst & RT_BIT( 4)));
2305 pHlp->pfnPrintf(pHlp, "Advanced bit manipulation = %d (%d)\n", !!(uEcxGst & RT_BIT( 5)), !!(uEcxHst & RT_BIT( 5)));
2306 pHlp->pfnPrintf(pHlp, "SSE4A instruction support = %d (%d)\n", !!(uEcxGst & RT_BIT( 6)), !!(uEcxHst & RT_BIT( 6)));
2307 pHlp->pfnPrintf(pHlp, "Misaligned SSE mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 7)), !!(uEcxHst & RT_BIT( 7)));
2308 pHlp->pfnPrintf(pHlp, "PREFETCH and PREFETCHW instruction = %d (%d)\n", !!(uEcxGst & RT_BIT( 8)), !!(uEcxHst & RT_BIT( 8)));
2309 pHlp->pfnPrintf(pHlp, "OS visible workaround = %d (%d)\n", !!(uEcxGst & RT_BIT( 9)), !!(uEcxHst & RT_BIT( 9)));
2310 pHlp->pfnPrintf(pHlp, "Instruction based sampling = %d (%d)\n", !!(uEcxGst & RT_BIT(10)), !!(uEcxHst & RT_BIT(10)));
2311 pHlp->pfnPrintf(pHlp, "SSE5 support = %d (%d)\n", !!(uEcxGst & RT_BIT(11)), !!(uEcxHst & RT_BIT(11)));
2312 pHlp->pfnPrintf(pHlp, "SKINIT, STGI, and DEV support = %d (%d)\n", !!(uEcxGst & RT_BIT(12)), !!(uEcxHst & RT_BIT(12)));
2313 pHlp->pfnPrintf(pHlp, "Watchdog timer support. = %d (%d)\n", !!(uEcxGst & RT_BIT(13)), !!(uEcxHst & RT_BIT(13)));
2314 pHlp->pfnPrintf(pHlp, "31:14 - Reserved = %#x (%#x)\n", uEcxGst >> 14, uEcxHst >> 14);
2315 }
2316 }
2317
2318 if (iVerbosity && cExtMax >= 2)
2319 {
2320 char szString[4*4*3+1] = {0};
2321 uint32_t *pu32 = (uint32_t *)szString;
2322 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].eax;
2323 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ebx;
2324 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].ecx;
2325 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[2].edx;
2326 if (cExtMax >= 3)
2327 {
2328 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].eax;
2329 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ebx;
2330 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].ecx;
2331 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[3].edx;
2332 }
2333 if (cExtMax >= 4)
2334 {
2335 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].eax;
2336 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ebx;
2337 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].ecx;
2338 *pu32++ = pVM->cpum.s.aGuestCpuIdExt[4].edx;
2339 }
2340 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString);
2341 }
2342
2343 if (iVerbosity && cExtMax >= 5)
2344 {
2345 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[5].eax;
2346 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[5].ebx;
2347 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[5].ecx;
2348 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[5].edx;
2349 char sz1[32];
2350 char sz2[32];
2351
2352 pHlp->pfnPrintf(pHlp,
2353 "TLB 2/4M Instr/Uni: %s %3d entries\n"
2354 "TLB 2/4M Data: %s %3d entries\n",
2355 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
2356 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
2357 pHlp->pfnPrintf(pHlp,
2358 "TLB 4K Instr/Uni: %s %3d entries\n"
2359 "TLB 4K Data: %s %3d entries\n",
2360 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
2361 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
2362 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
2363 "L1 Instr Cache Lines Per Tag: %d\n"
2364 "L1 Instr Cache Associativity: %s\n"
2365 "L1 Instr Cache Size: %d KB\n",
2366 (uEDX >> 0) & 0xff,
2367 (uEDX >> 8) & 0xff,
2368 getCacheAss((uEDX >> 16) & 0xff, sz1),
2369 (uEDX >> 24) & 0xff);
2370 pHlp->pfnPrintf(pHlp,
2371 "L1 Data Cache Line Size: %d bytes\n"
2372 "L1 Data Cache Lines Per Tag: %d\n"
2373 "L1 Data Cache Associativity: %s\n"
2374 "L1 Data Cache Size: %d KB\n",
2375 (uECX >> 0) & 0xff,
2376 (uECX >> 8) & 0xff,
2377 getCacheAss((uECX >> 16) & 0xff, sz1),
2378 (uECX >> 24) & 0xff);
2379 }
2380
2381 if (iVerbosity && cExtMax >= 6)
2382 {
2383 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[6].eax;
2384 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdExt[6].ebx;
2385 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[6].edx;
2386
2387 pHlp->pfnPrintf(pHlp,
2388 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
2389 "L2 TLB 2/4M Data: %s %4d entries\n",
2390 getL2CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
2391 getL2CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
2392 pHlp->pfnPrintf(pHlp,
2393 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
2394 "L2 TLB 4K Data: %s %4d entries\n",
2395 getL2CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
2396 getL2CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
2397 pHlp->pfnPrintf(pHlp,
2398 "L2 Cache Line Size: %d bytes\n"
2399 "L2 Cache Lines Per Tag: %d\n"
2400 "L2 Cache Associativity: %s\n"
2401 "L2 Cache Size: %d KB\n",
2402 (uEDX >> 0) & 0xff,
2403 (uEDX >> 8) & 0xf,
2404 getL2CacheAss((uEDX >> 12) & 0xf),
2405 (uEDX >> 16) & 0xffff);
2406 }
2407
2408 if (iVerbosity && cExtMax >= 7)
2409 {
2410 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdExt[7].edx;
2411
2412 pHlp->pfnPrintf(pHlp, "APM Features: ");
2413 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " TS");
2414 if (uEDX & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " FID");
2415 if (uEDX & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " VID");
2416 if (uEDX & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " TTP");
2417 if (uEDX & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " TM");
2418 if (uEDX & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " STC");
2419 for (unsigned iBit = 6; iBit < 32; iBit++)
2420 if (uEDX & RT_BIT(iBit))
2421 pHlp->pfnPrintf(pHlp, " %d", iBit);
2422 pHlp->pfnPrintf(pHlp, "\n");
2423 }
2424
2425 if (iVerbosity && cExtMax >= 8)
2426 {
2427 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdExt[8].eax;
2428 uint32_t uECX = pVM->cpum.s.aGuestCpuIdExt[8].ecx;
2429
2430 pHlp->pfnPrintf(pHlp,
2431 "Physical Address Width: %d bits\n"
2432 "Virtual Address Width: %d bits\n",
2433 (uEAX >> 0) & 0xff,
2434 (uEAX >> 8) & 0xff);
2435 pHlp->pfnPrintf(pHlp,
2436 "Physical Core Count: %d\n",
2437 (uECX >> 0) & 0xff);
2438 }
2439
2440
2441 /*
2442 * Centaur.
2443 */
2444 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdCentaur[0].eax & 0xffff;
2445
2446 pHlp->pfnPrintf(pHlp,
2447 "\n"
2448 " RAW Centaur CPUIDs\n"
2449 " Function eax ebx ecx edx\n");
2450 for (unsigned i = 0; i < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); i++)
2451 {
2452 Guest = pVM->cpum.s.aGuestCpuIdCentaur[i];
2453 ASMCpuId(0xc0000000 | i, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2454
2455 pHlp->pfnPrintf(pHlp,
2456 "Gst: %08x %08x %08x %08x %08x%s\n"
2457 "Hst: %08x %08x %08x %08x\n",
2458 0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
2459 i <= cCentaurMax ? "" : "*",
2460 Host.eax, Host.ebx, Host.ecx, Host.edx);
2461 }
2462
2463 /*
2464 * Understandable output
2465 */
2466 if (iVerbosity)
2467 {
2468 Guest = pVM->cpum.s.aGuestCpuIdCentaur[0];
2469 pHlp->pfnPrintf(pHlp,
2470 "Centaur Supports: 0xc0000000-%#010x\n",
2471 Guest.eax);
2472 }
2473
2474 if (iVerbosity && cCentaurMax >= 1)
2475 {
2476 ASMCpuId(0xc0000001, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
2477 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdExt[1].edx;
2478 uint32_t uEdxHst = Host.edx;
2479
2480 if (iVerbosity == 1)
2481 {
2482 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
2483 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
2484 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
2485 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
2486 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
2487 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
2488 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
2489 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
2490 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
2491 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2492 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
2493 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
2494 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
2495 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
2496 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
2497 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
2498 for (unsigned iBit = 14; iBit < 32; iBit++)
2499 if (uEdxGst & RT_BIT(iBit))
2500 pHlp->pfnPrintf(pHlp, " %d", iBit);
2501 pHlp->pfnPrintf(pHlp, "\n");
2502 }
2503 else
2504 {
2505 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
2506 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
2507 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
2508 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
2509 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
2510 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
2511 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
2512 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
2513 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
2514 /* possibly indicating MM/HE and MM/HE-E on older chips... */
2515 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
2516 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
2517 pHlp->pfnPrintf(pHlp, "PHE - Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
2518 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
2519 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
2520 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
2521 for (unsigned iBit = 14; iBit < 32; iBit++)
2522 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
2523 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
2524 pHlp->pfnPrintf(pHlp, "\n");
2525 }
2526 }
2527}
2528
2529
2530/**
2531 * Structure used when disassembling and instructions in DBGF.
2532 * This is used so the reader function can get the stuff it needs.
2533 */
2534typedef struct CPUMDISASSTATE
2535{
2536 /** Pointer to the CPU structure. */
2537 PDISCPUSTATE pCpu;
2538 /** The VM handle. */
2539 PVM pVM;
2540 /** The VMCPU handle. */
2541 PVMCPU pVCpu;
2542 /** Pointer to the first byte in the segemnt. */
2543 RTGCUINTPTR GCPtrSegBase;
2544 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
2545 RTGCUINTPTR GCPtrSegEnd;
2546 /** The size of the segment minus 1. */
2547 RTGCUINTPTR cbSegLimit;
2548 /** Pointer to the current page - R3 Ptr. */
2549 void const *pvPageR3;
2550 /** Pointer to the current page - GC Ptr. */
2551 RTGCPTR pvPageGC;
2552 /** The lock information that PGMPhysReleasePageMappingLock needs. */
2553 PGMPAGEMAPLOCK PageMapLock;
2554 /** Whether the PageMapLock is valid or not. */
2555 bool fLocked;
2556 /** 64 bits mode or not. */
2557 bool f64Bits;
2558} CPUMDISASSTATE, *PCPUMDISASSTATE;
2559
2560
2561/**
2562 * Instruction reader.
2563 *
2564 * @returns VBox status code.
2565 * @param PtrSrc Address to read from.
2566 * In our case this is relative to the selector pointed to by the 2nd user argument of uDisCpu.
2567 * @param pu8Dst Where to store the bytes.
2568 * @param cbRead Number of bytes to read.
2569 * @param uDisCpu Pointer to the disassembler cpu state.
2570 * In this context it's always pointer to the Core of a DBGFDISASSTATE.
2571 */
2572static DECLCALLBACK(int) cpumR3DisasInstrRead(RTUINTPTR PtrSrc, uint8_t *pu8Dst, unsigned cbRead, void *uDisCpu)
2573{
2574 PDISCPUSTATE pCpu = (PDISCPUSTATE)uDisCpu;
2575 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pCpu->apvUserData[0];
2576 Assert(cbRead > 0);
2577 for (;;)
2578 {
2579 RTGCUINTPTR GCPtr = PtrSrc + pState->GCPtrSegBase;
2580
2581 /* Need to update the page translation? */
2582 if ( !pState->pvPageR3
2583 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
2584 {
2585 int rc = VINF_SUCCESS;
2586
2587 /* translate the address */
2588 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
2589 if ( MMHyperIsInsideArea(pState->pVM, pState->pvPageGC)
2590 && !HWACCMIsEnabled(pState->pVM))
2591 {
2592 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
2593 if (!pState->pvPageR3)
2594 rc = VERR_INVALID_POINTER;
2595 }
2596 else
2597 {
2598 /* Release mapping lock previously acquired. */
2599 if (pState->fLocked)
2600 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
2601 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
2602 pState->fLocked = RT_SUCCESS_NP(rc);
2603 }
2604 if (RT_FAILURE(rc))
2605 {
2606 pState->pvPageR3 = NULL;
2607 return rc;
2608 }
2609 }
2610
2611 /* check the segemnt limit */
2612 if (!pState->f64Bits && PtrSrc > pState->cbSegLimit)
2613 return VERR_OUT_OF_SELECTOR_BOUNDS;
2614
2615 /* calc how much we can read */
2616 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
2617 if (!pState->f64Bits)
2618 {
2619 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
2620 if (cb > cbSeg && cbSeg)
2621 cb = cbSeg;
2622 }
2623 if (cb > cbRead)
2624 cb = cbRead;
2625
2626 /* read and advance */
2627 memcpy(pu8Dst, (char *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
2628 cbRead -= cb;
2629 if (!cbRead)
2630 return VINF_SUCCESS;
2631 pu8Dst += cb;
2632 PtrSrc += cb;
2633 }
2634}
2635
2636
2637/**
2638 * Disassemble an instruction and return the information in the provided structure.
2639 *
2640 * @returns VBox status code.
2641 * @param pVM VM Handle
2642 * @param pVCpu VMCPU Handle
2643 * @param pCtx CPU context
2644 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
2645 * @param pCpu Disassembly state
2646 * @param pszPrefix String prefix for logging (debug only)
2647 *
2648 */
2649VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
2650{
2651 CPUMDISASSTATE State;
2652 int rc;
2653
2654 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
2655 State.pCpu = pCpu;
2656 State.pvPageGC = 0;
2657 State.pvPageR3 = NULL;
2658 State.pVM = pVM;
2659 State.pVCpu = pVCpu;
2660 State.fLocked = false;
2661 State.f64Bits = false;
2662
2663 /*
2664 * Get selector information.
2665 */
2666 if ( (pCtx->cr0 & X86_CR0_PE)
2667 && pCtx->eflags.Bits.u1VM == 0)
2668 {
2669 if (CPUMAreHiddenSelRegsValid(pVM))
2670 {
2671 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long;
2672 State.GCPtrSegBase = pCtx->csHid.u64Base;
2673 State.GCPtrSegEnd = pCtx->csHid.u32Limit + 1 + (RTGCUINTPTR)pCtx->csHid.u64Base;
2674 State.cbSegLimit = pCtx->csHid.u32Limit;
2675 pCpu->mode = (State.f64Bits)
2676 ? CPUMODE_64BIT
2677 : pCtx->csHid.Attr.n.u1DefBig
2678 ? CPUMODE_32BIT
2679 : CPUMODE_16BIT;
2680 }
2681 else
2682 {
2683 DBGFSELINFO SelInfo;
2684
2685 rc = SELMR3GetShadowSelectorInfo(pVM, pCtx->cs, &SelInfo);
2686 if (RT_FAILURE(rc))
2687 {
2688 AssertMsgFailed(("SELMR3GetShadowSelectorInfo failed for %04X:%RGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2689 return rc;
2690 }
2691
2692 /*
2693 * Validate the selector.
2694 */
2695 rc = DBGFR3SelInfoValidateCS(&SelInfo, pCtx->ss);
2696 if (RT_FAILURE(rc))
2697 {
2698 AssertMsgFailed(("SELMSelInfoValidateCS failed for %04X:%RGv rc=%d\n", pCtx->cs, GCPtrPC, rc));
2699 return rc;
2700 }
2701 State.GCPtrSegBase = SelInfo.GCPtrBase;
2702 State.GCPtrSegEnd = SelInfo.cbLimit + 1 + (RTGCUINTPTR)SelInfo.GCPtrBase;
2703 State.cbSegLimit = SelInfo.cbLimit;
2704 pCpu->mode = SelInfo.u.Raw.Gen.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT;
2705 }
2706 }
2707 else
2708 {
2709 /* real or V86 mode */
2710 pCpu->mode = CPUMODE_16BIT;
2711 State.GCPtrSegBase = pCtx->cs * 16;
2712 State.GCPtrSegEnd = 0xFFFFFFFF;
2713 State.cbSegLimit = 0xFFFFFFFF;
2714 }
2715
2716 /*
2717 * Disassemble the instruction.
2718 */
2719 pCpu->pfnReadBytes = cpumR3DisasInstrRead;
2720 pCpu->apvUserData[0] = &State;
2721
2722 uint32_t cbInstr;
2723#ifndef LOG_ENABLED
2724 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, NULL);
2725 if (RT_SUCCESS(rc))
2726 {
2727#else
2728 char szOutput[160];
2729 rc = DISInstr(pCpu, GCPtrPC, 0, &cbInstr, &szOutput[0]);
2730 if (RT_SUCCESS(rc))
2731 {
2732 /* log it */
2733 if (pszPrefix)
2734 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
2735 else
2736 Log(("%s", szOutput));
2737#endif
2738 rc = VINF_SUCCESS;
2739 }
2740 else
2741 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs, GCPtrPC, rc));
2742
2743 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
2744 if (State.fLocked)
2745 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
2746
2747 return rc;
2748}
2749
2750#ifdef DEBUG
2751
2752/**
2753 * Disassemble an instruction and dump it to the log
2754 *
2755 * @returns VBox status code.
2756 * @param pVM VM Handle
2757 * @param pVCpu VMCPU Handle
2758 * @param pCtx CPU context
2759 * @param pc GC instruction pointer
2760 * @param pszPrefix String prefix for logging
2761 *
2762 * @deprecated Use DBGFR3DisasInstrCurrentLog().
2763 */
2764VMMR3DECL(void) CPUMR3DisasmInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR pc, const char *pszPrefix)
2765{
2766 DISCPUSTATE Cpu;
2767 CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pc, &Cpu, pszPrefix);
2768}
2769
2770
2771/**
2772 * Debug helper - Saves guest context on raw mode entry (for fatal dump)
2773 *
2774 * @internal
2775 */
2776VMMR3DECL(void) CPUMR3SaveEntryCtx(PVM pVM)
2777{
2778 /* @todo SMP support!! */
2779 pVM->cpum.s.GuestEntry = *CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
2780}
2781
2782#endif /* DEBUG */
2783
2784/**
2785 * API for controlling a few of the CPU features found in CR4.
2786 *
2787 * Currently only X86_CR4_TSD is accepted as input.
2788 *
2789 * @returns VBox status code.
2790 *
2791 * @param pVM The VM handle.
2792 * @param fOr The CR4 OR mask.
2793 * @param fAnd The CR4 AND mask.
2794 */
2795VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2796{
2797 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2798 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2799
2800 pVM->cpum.s.CR4.OrMask &= fAnd;
2801 pVM->cpum.s.CR4.OrMask |= fOr;
2802
2803 return VINF_SUCCESS;
2804}
2805
2806
2807/**
2808 * Gets a pointer to the array of standard CPUID leaves.
2809 *
2810 * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
2811 *
2812 * @returns Pointer to the standard CPUID leaves (read-only).
2813 * @param pVM The VM handle.
2814 * @remark Intended for PATM.
2815 */
2816VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdStdRCPtr(PVM pVM)
2817{
2818 return RCPTRTYPE(PCCPUMCPUID)VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdStd[0]);
2819}
2820
2821
2822/**
2823 * Gets a pointer to the array of extended CPUID leaves.
2824 *
2825 * CPUMGetGuestCpuIdExtMax() give the size of the array.
2826 *
2827 * @returns Pointer to the extended CPUID leaves (read-only).
2828 * @param pVM The VM handle.
2829 * @remark Intended for PATM.
2830 */
2831VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdExtRCPtr(PVM pVM)
2832{
2833 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]);
2834}
2835
2836
2837/**
2838 * Gets a pointer to the array of centaur CPUID leaves.
2839 *
2840 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
2841 *
2842 * @returns Pointer to the centaur CPUID leaves (read-only).
2843 * @param pVM The VM handle.
2844 * @remark Intended for PATM.
2845 */
2846VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdCentaurRCPtr(PVM pVM)
2847{
2848 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]);
2849}
2850
2851
2852/**
2853 * Gets a pointer to the default CPUID leaf.
2854 *
2855 * @returns Pointer to the default CPUID leaf (read-only).
2856 * @param pVM The VM handle.
2857 * @remark Intended for PATM.
2858 */
2859VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM)
2860{
2861 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef);
2862}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette