VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 107044

Last change on this file since 107044 was 106731, checked in by vboxsync, 4 weeks ago

VMM/IEM: Workaround for profile build issue on win.arm64. jiraref:VBP-1253

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.6 KB
Line 
1/* $Id: IEMR3.cpp 106731 2024-10-27 22:12:34Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75static FNDBGFINFOARGVINT iemR3InfoTbTop;
76#endif
77#ifdef VBOX_WITH_DEBUGGER
78static void iemR3RegisterDebuggerCommands(void);
79#endif
80
81
82#if !defined(VBOX_VMM_TARGET_ARMV8)
83static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
84{
85 switch (enmTargetCpu)
86 {
87#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
88 CASE_RET_STR(IEMTARGETCPU_8086);
89 CASE_RET_STR(IEMTARGETCPU_V20);
90 CASE_RET_STR(IEMTARGETCPU_186);
91 CASE_RET_STR(IEMTARGETCPU_286);
92 CASE_RET_STR(IEMTARGETCPU_386);
93 CASE_RET_STR(IEMTARGETCPU_486);
94 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
95 CASE_RET_STR(IEMTARGETCPU_PPRO);
96 CASE_RET_STR(IEMTARGETCPU_CURRENT);
97#undef CASE_RET_STR
98 default: return "Unknown";
99 }
100}
101#endif
102
103
104#if defined(RT_ARCH_ARM64) && defined(_MSC_VER)
105# pragma warning(disable:4883) /* profile build: IEMR3.cpp(114) : warning C4883: 'IEMR3Init': function size suppresses optimizations*/
106#endif
107
108/**
109 * Initializes the interpreted execution manager.
110 *
111 * This must be called after CPUM as we're quering information from CPUM about
112 * the guest and host CPUs.
113 *
114 * @returns VBox status code.
115 * @param pVM The cross context VM structure.
116 */
117VMMR3DECL(int) IEMR3Init(PVM pVM)
118{
119 /*
120 * Read configuration.
121 */
122#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
123 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
124 int rc;
125#endif
126
127#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
128 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
129 * Controls whether the custom VBox specific CPUID host call interface is
130 * enabled or not. */
131# ifdef DEBUG_bird
132 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
133# else
134 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
135# endif
136 AssertLogRelRCReturn(rc, rc);
137#endif
138
139#ifdef VBOX_WITH_IEM_RECOMPILER
140 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
141 * Max number of TBs per EMT. */
142 uint32_t cMaxTbs = 0;
143 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
144 AssertLogRelRCReturn(rc, rc);
145 if (cMaxTbs < _16K || cMaxTbs > _8M)
146 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
147 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
148
149 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
150 * Initial (minimum) number of TBs per EMT in ring-3. */
151 uint32_t cInitialTbs = 0;
152 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
153 AssertLogRelRCReturn(rc, rc);
154 if (cInitialTbs < _16K || cInitialTbs > _8M)
155 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
156 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
157
158 /* Check that the two values makes sense together. Expect user/api to do
159 the right thing or get lost. */
160 if (cInitialTbs > cMaxTbs)
161 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
162 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
163 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
164
165 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
166 * Max executable memory for recompiled code per EMT. */
167 uint64_t cbMaxExec = 0;
168 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
169 AssertLogRelRCReturn(rc, rc);
170 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
171 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
172 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
173 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
174
175 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
176 * The executable memory allocator chunk size. */
177 uint32_t cbChunkExec = 0;
178 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
179 AssertLogRelRCReturn(rc, rc);
180 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
181 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
182 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
183 cbChunkExec, cbChunkExec, _1M, _256M);
184
185 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
186 * The initial executable memory allocator size (per EMT). The value is
187 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
188 uint64_t cbInitialExec = 0;
189 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
190 AssertLogRelRCReturn(rc, rc);
191 if (cbInitialExec > cbMaxExec)
192 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
193 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
194 cbInitialExec, cbInitialExec, cbMaxExec);
195
196 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
197 * The translation block use count value to do native recompilation at.
198 * Set to zero to disable native recompilation. */
199 uint32_t uTbNativeRecompileAtUsedCount = 16;
200 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
201 AssertLogRelRCReturn(rc, rc);
202
203 /** @cfgm{/IEM/HostICacheInvalidationViaHostAPI, bool, false}
204 * Whether to use any available host OS API for flushing the instruction cache
205 * after completing an translation block. */
206 bool fFlag = false;
207 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationViaHostAPI", &fFlag, false);
208 AssertLogRelRCReturn(rc, rc);
209 uint8_t fHostICacheInvalidation = fFlag ? IEMNATIVE_ICACHE_F_USE_HOST_API : 0;
210
211 /** @cfgm{/IEM/HostICacheInvalidationEndWithIsb, bool, false}
212 * Whether to include an ISB in the instruction cache invalidation sequence
213 * after completing an translation block. */
214 fFlag = false;
215 rc = CFGMR3QueryBoolDef(pIem, "HostICacheInvalidationEndWithIsb", &fFlag, false);
216 AssertLogRelRCReturn(rc, rc);
217 if (fFlag)
218 fHostICacheInvalidation |= IEMNATIVE_ICACHE_F_END_WITH_ISH;
219
220#endif /* VBOX_WITH_IEM_RECOMPILER*/
221
222 /*
223 * Initialize per-CPU data and register statistics.
224 */
225#if 1
226 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
227 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
228#else
229 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
230 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
231#endif
232
233 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
234 {
235 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
236 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
237
238 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
239#ifndef VBOX_VMM_TARGET_ARMV8
240 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
241#endif
242 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
243#ifndef VBOX_VMM_TARGET_ARMV8
244 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
245 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
246 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
247 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
248#endif
249
250#ifndef VBOX_VMM_TARGET_ARMV8
251 pVCpu->iem.s.cTbsTillNextTimerPoll = 128;
252 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128;
253#endif
254
255 /*
256 * Host and guest CPU information.
257 */
258 if (idCpu == 0)
259 {
260 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
261 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
262#if !defined(VBOX_VMM_TARGET_ARMV8)
263 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
264 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
265 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
266# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
267 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
268 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
269 else
270# endif
271 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
272#else
273 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
274 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
275#endif
276
277#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
278 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
279 {
280 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
281 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
282 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
283 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
284 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
285 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
286 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
287 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
288 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
289 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
290 }
291 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
292 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
293 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
294#else
295 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
296 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
297 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
298#endif
299 }
300 else
301 {
302 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
303 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
304 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
305 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
306#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
307 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
308#endif
309 }
310
311 /*
312 * Mark all buffers free.
313 */
314 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
315 while (iMemMap-- > 0)
316 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
317
318#ifdef VBOX_WITH_IEM_RECOMPILER
319 /*
320 * Recompiler state and configuration distribution.
321 */
322 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
323 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
324 pVCpu->iem.s.fHostICacheInvalidation = fHostICacheInvalidation;
325#endif
326
327#ifdef IEM_WITH_TLB_TRACE
328 /*
329 * Allocate trace buffer.
330 */
331 pVCpu->iem.s.idxTlbTraceEntry = 0;
332 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
333 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
334 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
335 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
336#endif
337 }
338
339
340#ifdef VBOX_WITH_IEM_RECOMPILER
341 /*
342 * Initialize the TB allocator and cache (/ hash table).
343 *
344 * This is done by each EMT to try get more optimal thread/numa locality of
345 * the allocations.
346 */
347 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
348 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
349 AssertLogRelRCReturn(rc, rc);
350#endif
351
352 /*
353 * Register statistics.
354 */
355 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
356 {
357#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
358 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
359 char szPat[128];
360 RT_NOREF_PV(szPat); /* lazy bird */
361 char szVal[128];
362 RT_NOREF_PV(szVal); /* lazy bird */
363
364 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
365 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
366 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
367 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
368 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
369 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
370 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
371 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
372 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
373 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
374 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
375 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
376 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
377 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
378 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
379 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
380 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
381 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
382 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
383 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
384
385 /* Code TLB: */
386 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
387 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
388 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
389 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
390 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
391 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
392 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
393 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
394 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
395 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
396
397 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
398 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
399 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
400 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
401 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
402 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
403
404 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
405 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
406 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
407 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
408 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
410
411 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
412 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
413 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
414 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
415 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
416 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
417
418 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
419 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
420 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
421 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
422 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
423 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
424
425 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
426 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
427 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
428 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
429 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
430 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
431# ifdef IEM_WITH_TLB_STATISTICS
432 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
433 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
434# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
435 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
436 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
437 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
438 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
439# endif
440
441 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
442 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
443 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
444
445 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
446 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
447 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
448
449 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
450 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
451 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
452 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
453
454# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
455 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
456 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
457 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
458 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
459 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
460 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
461 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
462 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
463 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
464 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
465 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
466 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
467 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
468 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
469 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
470
471 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
472 "Code TLB native misses on new page",
473 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
474 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
475 "Code TLB native misses on new page w/ offset",
476 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
477# endif
478# endif /* IEM_WITH_TLB_STATISTICS */
479
480 /* Data TLB organized as best we can... */
481 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
482 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
483 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
484 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
485 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
486 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
487 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
488 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
489 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
490 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
491
492 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
493 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
494 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
495 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
496 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
497 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
498
499 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
500 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
501 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
502 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
503 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
504 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
505
506 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
507 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
508 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
509 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
510 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
511 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
512
513 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
514 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
515 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
516 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
517 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
518 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
519
520 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
521 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
522 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
523 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
524 "Data TLB global loads",
525 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
526 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
527 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
528 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
529 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
530 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
531 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
532 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
533 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
534 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
535
536 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
537 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
538 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
539 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
540 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
541 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
542 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
543 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
544 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
545 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
546 "Data TLB global loads",
547 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
548
549# ifdef IEM_WITH_TLB_STATISTICS
550# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
551 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
552 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
553 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
554 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
555 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
556 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
557 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
558 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
559 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
560 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
561 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
562 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
563 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
564 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
565 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
566# endif
567# endif
568
569# ifdef IEM_WITH_TLB_STATISTICS
570 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
571 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
572 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
573 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
574 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
575 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
576# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
577 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
578 "Data TLB native stack access hits",
579 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
580 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
581 "Data TLB native data fetch hits",
582 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
583 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
584 "Data TLB native data store hits",
585 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
586 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
587 "Data TLB native mapped data hits",
588 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
589# endif
590 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
591 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
592 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
593
594# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
595 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
596 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
597 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
598# endif
599
600 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
601 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
602 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
603
604 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
605 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
606 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
607 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
608
609# endif /* IEM_WITH_TLB_STATISTICS */
610
611
612#ifdef VBOX_WITH_IEM_RECOMPILER
613 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
614 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
615 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
616 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
617 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
618 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
619# ifdef VBOX_WITH_STATISTICS
620 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
621 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
622 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
623 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
624# endif
625
626# ifdef VBOX_WITH_STATISTICS
627 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
628 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll", idCpu);
629 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
630 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
631 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
632 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
633 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
634 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
635 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
636 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
637 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
638 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
639 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_OCCURENCE,
640 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
641 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
642 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
643# endif
644 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
645 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu);
646
647 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
648 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
649 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
650 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
651 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
652# ifdef VBOX_WITH_STATISTICS
653 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
654 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
655# endif
656 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
657 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
658 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
659 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
660 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
661 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
662 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
663 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
664 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
665 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
666 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
667 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
668 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
669 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
670 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
671 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
672
673 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
674 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
675 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
676
677 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
678 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
679 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
680 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
681 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
682 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
683 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
684 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
685# ifdef VBOX_WITH_STATISTICS
686 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
687 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
688# endif
689
690 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
691 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
692 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
693 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
694 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
695 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
696
697 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
698 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
699 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
700 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
701 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
702 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
703 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
704 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
705 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
706 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
707
708 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
709 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
710 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected2, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
711 "Detected loop full TB but looping back to before the first TB instruction",
712 "/IEM/CPU%u/re/LoopFullTbDetected2", idCpu);
713 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
714 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
715
716 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
717 "Number of times the exec memory allocator failed to allocate a large enough buffer",
718 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
719
720 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
721 "Number of threaded calls per TB that have been properly recompiled to native code",
722 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
723 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
724 "Number of threaded calls per TB that could not be recompiler to native code",
725 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
726 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
727 "Number of threaded calls that could not be recompiler to native code",
728 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
729
730 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
731 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
732 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
733 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
734
735# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
736# ifdef VBOX_WITH_STATISTICS
737 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
738 "Number of calls to iemNativeRegAllocFindFree.",
739 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
740# endif
741 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
742 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
743 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
744# ifdef VBOX_WITH_STATISTICS
745 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
746 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
747 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
748 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
749 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
750 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
751 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
752 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
753 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
754
755# define REG_NATIVE_EFL_GROUP(a_Lower, a_Camel) do { \
756 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponed ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
757 "Postponed all status flag updating, " #a_Lower " instructions", \
758 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
759 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkipped ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
760 "Skipped all status flag updating, " #a_Lower " instructions", \
761 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
762 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflTotal ## a_Camel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, \
763 "Total number of " #a_Lower " intructions with status flag updating", \
764 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
765 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Total", idCpu); \
766 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Postponed", idCpu); \
767 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
768 "Postponed all status flag updating, " #a_Lower " instructions, percentage", \
769 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "PostponedPct", idCpu); \
770 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "Skipped", idCpu); \
771 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat, \
772 "Skipped all status flag updating, " #a_Lower " instructions, percentage", \
773 "/IEM/CPU%u/re/NativeEFlags/" #a_Camel "SkippedPct", idCpu); \
774 } while (0)
775 REG_NATIVE_EFL_GROUP(arithmetic, Arithmetic);
776 REG_NATIVE_EFL_GROUP(logical, Logical);
777 REG_NATIVE_EFL_GROUP(shift, Shift);
778# undef REG_NATIVE_EFL_GROUP
779
780 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflPostponedEmits, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
781 "Postponed EFLAGS calculation emits", "/IEM/CPU%u/re/NativeEFlags/ZZEmits", idCpu);
782
783 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
784 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippable", idCpu);
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippable", idCpu);
786 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippable", idCpu);
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippable", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippable", idCpu);
789
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfRequired", idCpu);
791 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfRequired", idCpu);
792 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfRequired", idCpu);
793 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfRequired", idCpu);
794 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfRequired", idCpu);
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfRequired", idCpu);
796
797# ifdef IEMLIVENESS_EXTENDED_LAYOUT
798 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/CfDelayable", idCpu);
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/PfDelayable", idCpu);
800 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/AfDelayable", idCpu);
801 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfDelayable", idCpu);
802 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/SfDelayable", idCpu);
803 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlags/OfDelayable", idCpu);
804# endif
805
806 /* Sum up all status bits ('_' is a sorting hack). */
807 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fSkippable*", idCpu);
808 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
809 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
810
811 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fRequired*", idCpu);
812 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
813 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
814
815# ifdef IEMLIVENESS_EXTENDED_LAYOUT
816 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?fDelayable*", idCpu);
817 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
818 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
819# endif
820
821 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/?f*", idCpu);
822 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
823 "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
824
825 /* Corresponding ratios / percentages of the totals. */
826 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
827 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippable", idCpu);
828 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
829 "Total skippable EFLAGS status bit updating percentage",
830 "/IEM/CPU%u/re/NativeLivenessEFlags/totalSkippablePct", idCpu);
831
832 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/totalTotal", idCpu);
833 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequired", idCpu);
834 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
835 "Total required EFLAGS status bit updating percentage",
836 "/IEM/CPU%u/re/NativeLivenessEFlags/totalRequiredPct", idCpu);
837
838# ifdef IEMLIVENESS_EXTENDED_LAYOUT
839 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayable", idCpu);
840 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
841 "Total potentially delayable EFLAGS status bit updating percentage",
842 "/IEM/CPU%u/re/NativeLivenessEFlags/totalDelayablePct", idCpu);
843# endif
844
845 /* Ratios of individual bits. */
846 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags/Cf*", idCpu) - 3;
847 Assert(szPat[offFlagChar] == 'C');
848 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippable", idCpu);
849 Assert(szVal[offFlagChar] == 'C');
850 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/CfSkippablePct", idCpu);
851 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/PfSkippablePct", idCpu);
852 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/AfSkippablePct", idCpu);
853 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/ZfSkippablePct", idCpu);
854 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/SfSkippablePct", idCpu);
855 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlags/OfSkippablePct", idCpu);
856
857 /* PC updates total and skipped, with PCT ratio. */
858 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
859 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
860 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
861 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
862 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
863 "Delayed RIP updating percentage",
864 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
865
866# endif /* VBOX_WITH_STATISTICS */
867# ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
868 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
869 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
870 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
871# endif
872# ifdef VBOX_WITH_STATISTICS
873 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
874 "Number of calls to iemNativeSimdRegAllocFindFree.",
875 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
876 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
877 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
878 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
879 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
880 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
881 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
882 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
883 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
884 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
885 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
886 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
887 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
888
889 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
890 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
891 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
892 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
893 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
894 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
896 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
897
898 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
899 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
900 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
901 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
902 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
903 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
904 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
905 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
906
907 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
908 "Number of times the TB finishes execution completely",
909 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
910# endif /* VBOX_WITH_STATISTICS */
911 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
912 "Number of times the TB finished through the ReturnBreak label",
913 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
914 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
915 "Number of times the TB finished through the ReturnBreak label",
916 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
917 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
918 "Number of times the TB finished through the ReturnWithFlags label",
919 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
920 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
921 "Number of times the TB finished with some other status value",
922 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
923 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
924 "Number of times the TB finished via long jump / throw",
925 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
926 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
927 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
928 "Number of times the TB finished through the ObsoleteTb label",
929 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
930 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
931 "Number of times the TB finished through the NeedCsLimChecking label",
932 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
933 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
934 "Number of times the TB finished through the CheckBranchMiss label",
935 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
936 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
937 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
938# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
939# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
940# else
941# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
942# endif
943 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
944 "Number of times the TB finished raising a #DE exception",
945 RAISE_PREFIX "RaiseDe", idCpu);
946 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
947 "Number of times the TB finished raising a #UD exception",
948 RAISE_PREFIX "RaiseUd", idCpu);
949 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
950 "Number of times the TB finished raising a SSE related exception",
951 RAISE_PREFIX "RaiseSseRelated", idCpu);
952 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
953 "Number of times the TB finished raising a AVX related exception",
954 RAISE_PREFIX "RaiseAvxRelated", idCpu);
955 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
956 "Number of times the TB finished raising a SSE/AVX floating point related exception",
957 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
958 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
959 "Number of times the TB finished raising a #NM exception",
960 RAISE_PREFIX "RaiseNm", idCpu);
961 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
962 "Number of times the TB finished raising a #GP(0) exception",
963 RAISE_PREFIX "RaiseGp0", idCpu);
964 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
965 "Number of times the TB finished raising a #MF exception",
966 RAISE_PREFIX "RaiseMf", idCpu);
967 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
968 "Number of times the TB finished raising a #XF exception",
969 RAISE_PREFIX "RaiseXf", idCpu);
970
971# ifdef VBOX_WITH_STATISTICS
972 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
973 "Number of full TB loops.",
974 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
975# endif
976
977 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
978 "Direct linking #1 with IRQ check succeeded",
979 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
980 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
981 "Direct linking #1 w/o IRQ check succeeded",
982 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
983# ifdef VBOX_WITH_STATISTICS
984 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
985 "Direct linking #1 failed: No TB in lookup table",
986 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
987 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
988 "Direct linking #1 failed: GCPhysPc mismatch",
989 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
990 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
991 "Direct linking #1 failed: TB flags mismatch",
992 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
993 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
994 "Direct linking #1 failed: IRQ or FF pending",
995 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
996# endif
997
998 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
999 "Direct linking #2 with IRQ check succeeded",
1000 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
1001 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1002 "Direct linking #2 w/o IRQ check succeeded",
1003 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
1004# ifdef VBOX_WITH_STATISTICS
1005 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1006 "Direct linking #2 failed: No TB in lookup table",
1007 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
1008 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1009 "Direct linking #2 failed: GCPhysPc mismatch",
1010 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
1011 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1012 "Direct linking #2 failed: TB flags mismatch",
1013 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
1014 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1015 "Direct linking #2 failed: IRQ or FF pending",
1016 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
1017# endif
1018
1019 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
1020 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
1021 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
1022 "/IEM/CPU%u/re/NativeTbExit", idCpu);
1023
1024
1025# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
1026
1027
1028# ifdef VBOX_WITH_STATISTICS
1029 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1030 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
1031 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1032 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
1033 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1034 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
1035 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1036 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
1037# endif
1038
1039
1040#endif /* VBOX_WITH_IEM_RECOMPILER */
1041
1042 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
1043 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1044 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
1045 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
1046 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
1047 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
1048
1049# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
1050 /* Instruction statistics: */
1051# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
1052 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1053 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
1054 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
1055 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
1056# include "IEMInstructionStatisticsTmpl.h"
1057# undef IEM_DO_INSTR_STAT
1058# endif
1059
1060# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1061 /* Threaded function statistics: */
1062 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
1063 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
1064 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
1065# endif
1066
1067
1068 for (unsigned i = 1; i < RT_ELEMENTS(pVCpu->iem.s.aStatAdHoc); i++)
1069 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatAdHoc[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1070 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/AdHoc/%02u", idCpu, i);
1071
1072#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
1073 }
1074
1075#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1076 /*
1077 * Register the per-VM VMX APIC-access page handler type.
1078 */
1079 if (pVM->cpum.ro.GuestFeatures.fVmx)
1080 {
1081 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1082 iemVmxApicAccessPageHandler,
1083 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1084 AssertLogRelRCReturn(rc, rc);
1085 }
1086#endif
1087
1088 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1089 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1090#ifdef IEM_WITH_TLB_TRACE
1091 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1092#endif
1093#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1094 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1095 DBGFR3InfoRegisterInternalArgv(pVM, "tbtop", "IEM translation blocks most used or most recently used",
1096 iemR3InfoTbTop, DBGFINFO_FLAGS_RUN_ON_EMT);
1097#endif
1098#ifdef VBOX_WITH_DEBUGGER
1099 iemR3RegisterDebuggerCommands();
1100#endif
1101
1102 return VINF_SUCCESS;
1103}
1104
1105
1106VMMR3DECL(int) IEMR3Term(PVM pVM)
1107{
1108 NOREF(pVM);
1109#ifdef IEM_WITH_TLB_TRACE
1110 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1111 {
1112 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1113 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1114 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1115 }
1116#endif
1117#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
1118 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1119 iemThreadedSaveTbForProfilingCleanup(pVM->apCpusR3[idCpu]);
1120#endif
1121 return VINF_SUCCESS;
1122}
1123
1124
1125VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1126{
1127 RT_NOREF(pVM);
1128}
1129
1130
1131/**
1132 * Gets the name of a generic IEM exit code.
1133 *
1134 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1135 * @param uExit The IEM exit to name.
1136 */
1137VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1138{
1139 static const char * const s_apszNames[] =
1140 {
1141 /* external interrupts */
1142 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1143 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1144 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1145 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1146 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1147 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1148 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1149 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1150 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1151 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1152 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1153 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1154 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1155 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1156 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1157 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1158 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1159 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1160 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1161 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1162 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1163 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1164 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1165 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1166 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1167 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1168 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1169 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1170 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1171 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1172 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1173 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1174 /* software interrups */
1175 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1176 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1177 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1178 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1179 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1180 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1181 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1182 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1183 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1184 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1185 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1186 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1187 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1188 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1189 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1190 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1191 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1192 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1193 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1194 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1195 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1196 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1197 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1198 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1199 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1200 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1201 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1202 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1203 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1204 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1205 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1206 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1207 };
1208 if (uExit < RT_ELEMENTS(s_apszNames))
1209 return s_apszNames[uExit];
1210 return NULL;
1211}
1212
1213
1214/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1215static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1216{
1217 if (*pfHeader)
1218 return;
1219 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1220 *pfHeader = true;
1221}
1222
1223
1224#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1225#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1226
1227/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1228static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1229 uint32_t uSlot, uint32_t fFlags)
1230{
1231#ifndef VBOX_VMM_TARGET_ARMV8
1232 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1233#else
1234 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1235#endif
1236 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1237 return;
1238
1239 /* The address needs to be sign extended, thus the shifting fun here.*/
1240 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1241 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1242 const char *pszValid = "";
1243#ifndef VBOX_VMM_TARGET_ARMV8
1244 char szTmp[128];
1245 if (fFlags & IEMR3INFOTLB_F_CHECK)
1246 {
1247 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1248 PGMPTWALKFAST WalkFast;
1249 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1250 pszValid = szTmp;
1251 if (RT_FAILURE(rc))
1252 switch (rc)
1253 {
1254 case VERR_PAGE_TABLE_NOT_PRESENT:
1255 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1256 {
1257 case 1: pszValid = " stale(page-not-present)"; break;
1258 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1259 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1260 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1261 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1262 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1263 }
1264 break;
1265 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1266 }
1267 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1268 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1269 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1270 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1271 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1272 | fInvSlotG ) )
1273 pszValid = " still-valid";
1274 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1275 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1276 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1277 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1278 {
1279 case X86_PTE_A:
1280 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1281 break;
1282 case X86_PTE_D:
1283 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1284 break;
1285 case X86_PTE_D | X86_PTE_A:
1286 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1287 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1288 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1289 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1290 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1291 break;
1292 default: AssertFailed(); break;
1293 }
1294 else
1295 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1296 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1297 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1298 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1299 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1300 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1301 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1302 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1303 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1304 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1305 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1306 }
1307#else
1308 RT_NOREF(pVCpu);
1309#endif
1310
1311 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1312 uSlot,
1313 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1314 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1315 : "expired",
1316 GCPtr, /* -> */
1317 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1318 /* / */
1319 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1320 /* */
1321 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1322 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1323 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1324 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1325 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1326 !(uSlot & 1) ? "-" : "G",
1327 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1328 /* / */
1329 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1330 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1331 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1332 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1333 /* / */
1334 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1335 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1336 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1337 pszValid);
1338}
1339
1340
1341/** Displays one or more TLB slots. */
1342static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1343 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1344{
1345 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1346 {
1347 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1348 {
1349 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1350 cSlots, RT_ELEMENTS(pTlb->aEntries));
1351 cSlots = RT_ELEMENTS(pTlb->aEntries);
1352 }
1353
1354 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1355 while (cSlots-- > 0)
1356 {
1357 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1358 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1359 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1360 }
1361 }
1362 else
1363 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1364 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1365}
1366
1367
1368/** Displays the TLB slot for the given address. */
1369static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1370 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1371{
1372 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1373
1374 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1375#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1376 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1377#else
1378 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1379#endif
1380 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1381#ifndef VBOX_VMM_TARGET_ARMV8
1382 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1383#endif
1384 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1385 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1386 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1387 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1388
1389#ifndef VBOX_VMM_TARGET_ARMV8
1390 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1391 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1392 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1393 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1394#endif
1395}
1396
1397
1398/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1399static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1400{
1401 /*
1402 * This is entirely argument driven.
1403 */
1404 static RTGETOPTDEF const s_aOptions[] =
1405 {
1406 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1407 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1408 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1409 { "all", 'A', RTGETOPT_REQ_NOTHING },
1410 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1411 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1412 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1413 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1414 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1415 };
1416
1417 RTGETOPTSTATE State;
1418 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1419 AssertRCReturnVoid(rc);
1420
1421 uint32_t cActionArgs = 0;
1422 bool fNeedHeader = true;
1423 bool fAddressMode = true;
1424 uint32_t fFlags = 0;
1425 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1426 PVMCPU pVCpu = pVCpuCall;
1427 if (!pVCpu)
1428 pVCpu = VMMGetCpuById(pVM, 0);
1429
1430 RTGETOPTUNION ValueUnion;
1431 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1432 {
1433 switch (rc)
1434 {
1435 case 'c':
1436 if (ValueUnion.u32 >= pVM->cCpus)
1437 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1438 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1439 {
1440 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1441 fNeedHeader = true;
1442 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1443 {
1444 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1445 ValueUnion.u32, pVCpuCall->idCpu);
1446 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1447 }
1448 }
1449 break;
1450
1451 case 'C':
1452 if (!pVCpuCall)
1453 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1454 else if (pVCpu != pVCpuCall)
1455 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1456 pVCpu->idCpu, pVCpuCall->idCpu);
1457 else
1458 fFlags |= IEMR3INFOTLB_F_CHECK;
1459 break;
1460
1461 case 'a':
1462 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1463 ValueUnion.u64, fFlags, &fNeedHeader);
1464 fAddressMode = true;
1465 cActionArgs++;
1466 break;
1467
1468 case 'A':
1469 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1470 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1471 cActionArgs++;
1472 break;
1473
1474 case 'r':
1475 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1476 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1477 fAddressMode = false;
1478 cActionArgs++;
1479 break;
1480
1481 case 's':
1482 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1483 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1484 fAddressMode = false;
1485 cActionArgs++;
1486 break;
1487
1488 case 'v':
1489 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1490 break;
1491
1492 case VINF_GETOPT_NOT_OPTION:
1493 if (fAddressMode)
1494 {
1495 uint64_t uAddr;
1496 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1497 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1498 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1499 uAddr, fFlags, &fNeedHeader);
1500 else
1501 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1502 }
1503 else
1504 {
1505 uint32_t uSlot;
1506 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1507 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1508 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1509 uSlot, 1, fFlags, &fNeedHeader);
1510 else
1511 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1512 }
1513 cActionArgs++;
1514 break;
1515
1516 case 'h':
1517 pHlp->pfnPrintf(pHlp,
1518 "Usage: info %ctlb [options]\n"
1519 "\n"
1520 "Options:\n"
1521 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1522 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1523 " -C,--check\n"
1524 " Check valid entries against guest PTs.\n"
1525 " -A, --all, all\n"
1526 " Display all the TLB entries (default if no other args).\n"
1527 " -a<virt>, --address=<virt>\n"
1528 " Shows the TLB entry for the specified guest virtual address.\n"
1529 " -r<slot:count>, --range=<slot:count>\n"
1530 " Shows the TLB entries for the specified slot range.\n"
1531 " -s<slot>,--slot=<slot>\n"
1532 " Shows the given TLB slot.\n"
1533 " -v,--only-valid\n"
1534 " Only show valid TLB entries (TAG, not phys)\n"
1535 "\n"
1536 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1537 "defaulting to addresses if not preceeded by any of those options.\n"
1538 , fITlb ? 'i' : 'd');
1539 return;
1540
1541 default:
1542 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1543 return;
1544 }
1545 }
1546
1547 /*
1548 * If no action taken, we display all (-A) by default.
1549 */
1550 if (!cActionArgs)
1551 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1552 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1553}
1554
1555
1556/**
1557 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1558 */
1559static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1560{
1561 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1562}
1563
1564
1565/**
1566 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1567 */
1568static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1569{
1570 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1571}
1572
1573
1574#ifdef IEM_WITH_TLB_TRACE
1575/**
1576 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1577 */
1578static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1579{
1580 /*
1581 * Parse arguments.
1582 */
1583 static RTGETOPTDEF const s_aOptions[] =
1584 {
1585 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1586 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1587 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1588 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1589 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1590 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1591 };
1592
1593 RTGETOPTSTATE State;
1594 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1595 AssertRCReturnVoid(rc);
1596
1597 uint32_t cLimit = UINT32_MAX;
1598 bool fStopAtGlobalFlush = false;
1599 bool fResolveRip = false;
1600 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1601 PVMCPU pVCpu = pVCpuCall;
1602 if (!pVCpu)
1603 pVCpu = VMMGetCpuById(pVM, 0);
1604
1605 RTGETOPTUNION ValueUnion;
1606 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1607 {
1608 switch (rc)
1609 {
1610 case 'c':
1611 if (ValueUnion.u32 >= pVM->cCpus)
1612 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1613 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1614 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1615 break;
1616
1617 case 'l':
1618 cLimit = ValueUnion.u32;
1619 break;
1620
1621 case 'g':
1622 fStopAtGlobalFlush = true;
1623 break;
1624
1625 case 'r':
1626 fResolveRip = true;
1627 break;
1628
1629 case 'h':
1630 pHlp->pfnPrintf(pHlp,
1631 "Usage: info tlbtrace [options] [n]\n"
1632 "\n"
1633 "Options:\n"
1634 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1635 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1636 " [n], -l<n>, --last=<n>\n"
1637 " Limit display to the last N entries. Default: all\n"
1638 " -g, --stop-at-global-flush\n"
1639 " Stop after the first global flush entry.\n"
1640 " -r, --resolve-rip\n"
1641 " Resolve symbols for the flattened RIP addresses.\n"
1642 );
1643 return;
1644
1645 case VINF_GETOPT_NOT_OPTION:
1646 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1647 if (RT_SUCCESS(rc))
1648 break;
1649 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1650 return;
1651
1652 default:
1653 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1654 return;
1655 }
1656 }
1657
1658 /*
1659 * Get the details.
1660 */
1661 AssertReturnVoid(pVCpu);
1662 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1663 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1664 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1665 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1666 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1667 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1668 if (cLeft && paEntries)
1669 {
1670 /*
1671 * Display the entries.
1672 */
1673 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1674 while (cLeft-- > 0)
1675 {
1676 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1677 const char *pszSymbol = "";
1678 union
1679 {
1680 RTDBGSYMBOL Symbol;
1681 char ach[sizeof(RTDBGSYMBOL) + 32];
1682 } uBuf;
1683 if (fResolveRip)
1684 {
1685 RTGCINTPTR offDisp = 0;
1686 DBGFADDRESS Addr;
1687 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1688 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1689 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1690 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1691 &offDisp, &uBuf.Symbol, NULL);
1692 if (RT_SUCCESS(rc))
1693 {
1694 /* Add displacement. */
1695 if (offDisp)
1696 {
1697 size_t const cchName = strlen(uBuf.Symbol.szName);
1698 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1699 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1700 if (offDisp > 0)
1701 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1702 else
1703 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1704 }
1705
1706 /* Put a space before it. */
1707 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1708 char *pszName = uBuf.Symbol.szName;
1709 *--pszName = ' ';
1710 pszSymbol = pszName;
1711 }
1712 }
1713 static const char *s_apszTlbType[2] = { "code", "data" };
1714 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1715 switch (pCur->enmType)
1716 {
1717 case kIemTlbTraceType_InvlPg:
1718 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1719 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1720 break;
1721 case kIemTlbTraceType_EvictSlot:
1722 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1723 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1724 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1725 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1726 pCur->u64Param2, pszSymbol);
1727 break;
1728 case kIemTlbTraceType_LargeEvictSlot:
1729 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1730 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1731 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1732 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1733 pCur->u64Param2, pszSymbol);
1734 break;
1735 case kIemTlbTraceType_LargeScan:
1736 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1737 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1738 break;
1739
1740 case kIemTlbTraceType_Flush:
1741 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1742 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1743 break;
1744 case kIemTlbTraceType_FlushGlobal:
1745 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1746 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1747 if (fStopAtGlobalFlush)
1748 return;
1749 break;
1750 case kIemTlbTraceType_Load:
1751 case kIemTlbTraceType_LoadGlobal:
1752 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1753 idx, pCur->rip,
1754 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1755 pCur->u64Param,
1756 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1757 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1758 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1759 break;
1760
1761 case kIemTlbTraceType_Load_Cr0:
1762 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1763 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1764 break;
1765 case kIemTlbTraceType_Load_Cr3:
1766 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1767 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1768 break;
1769 case kIemTlbTraceType_Load_Cr4:
1770 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1771 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1772 break;
1773 case kIemTlbTraceType_Load_Efer:
1774 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1775 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1776 break;
1777
1778 case kIemTlbTraceType_Irq:
1779 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1780 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1781 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1782 pszSymbol);
1783 break;
1784 case kIemTlbTraceType_Xcpt:
1785 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1786 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1787 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1788 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1789 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1790 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1791 else
1792 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1793 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1794 break;
1795 case kIemTlbTraceType_IRet:
1796 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1797 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1798 break;
1799
1800 case kIemTlbTraceType_Tb_Compile:
1801 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1802 idx, pCur->rip, pCur->u64Param, pszSymbol);
1803 break;
1804 case kIemTlbTraceType_Tb_Exec_Threaded:
1805 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1806 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1807 break;
1808 case kIemTlbTraceType_Tb_Exec_Native:
1809 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1810 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1811 break;
1812
1813 case kIemTlbTraceType_User0:
1814 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1815 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1816 break;
1817 case kIemTlbTraceType_User1:
1818 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1819 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1820 break;
1821 case kIemTlbTraceType_User2:
1822 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1823 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1824 break;
1825 case kIemTlbTraceType_User3:
1826 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1827 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1828 break;
1829
1830 case kIemTlbTraceType_Invalid:
1831 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1832 break;
1833 }
1834 }
1835 }
1836 else
1837 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1838}
1839#endif /* IEM_WITH_TLB_TRACE */
1840
1841#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1842
1843/**
1844 * Get get compile time flat PC for the TB.
1845 */
1846DECL_FORCE_INLINE(RTGCPTR) iemR3GetTbFlatPc(PCIEMTB pTb)
1847{
1848#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
1849 if (pTb->fFlags & IEMTB_F_TYPE_NATIVE)
1850 {
1851 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
1852 return pDbgInfo ? pDbgInfo->FlatPc : RTGCPTR_MAX;
1853 }
1854#endif
1855 return pTb->FlatPc;
1856}
1857
1858
1859/**
1860 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1861 */
1862static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1863{
1864 /*
1865 * Parse arguments.
1866 */
1867 static RTGETOPTDEF const s_aOptions[] =
1868 {
1869 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1870 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1871 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1872 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1873 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1874 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1875 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1876 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1877 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1878 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1879 { "--tb", 't', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1880 { "--tb-id", 't', RTGETOPT_REQ_UINT32 },
1881 };
1882
1883 RTGETOPTSTATE State;
1884 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1885 AssertRCReturnVoid(rc);
1886
1887 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1888 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1889 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1890 RTGCPHYS GCVirt = NIL_RTGCPTR;
1891 uint32_t fFlags = UINT32_MAX;
1892 uint32_t idTb = UINT32_MAX;
1893
1894 RTGETOPTUNION ValueUnion;
1895 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1896 {
1897 switch (rc)
1898 {
1899 case 'c':
1900 if (ValueUnion.u32 >= pVM->cCpus)
1901 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1902 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1903 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1904 break;
1905
1906 case 'a':
1907 GCVirt = ValueUnion.u64;
1908 GCPhysPc = NIL_RTGCPHYS;
1909 idTb = UINT32_MAX;
1910 break;
1911
1912 case 'p':
1913 GCVirt = NIL_RTGCPHYS;
1914 GCPhysPc = ValueUnion.u64;
1915 idTb = UINT32_MAX;
1916 break;
1917
1918 case 'f':
1919 fFlags = ValueUnion.u32;
1920 break;
1921
1922 case 't':
1923 GCVirt = NIL_RTGCPHYS;
1924 GCPhysPc = NIL_RTGCPHYS;
1925 idTb = ValueUnion.u32;
1926 break;
1927
1928 case VINF_GETOPT_NOT_OPTION:
1929 {
1930 if ( (ValueUnion.psz[0] == 'T' || ValueUnion.psz[0] == 't')
1931 && (ValueUnion.psz[1] == 'B' || ValueUnion.psz[1] == 'b')
1932 && ValueUnion.psz[2] == '#')
1933 {
1934 rc = RTStrToUInt32Full(&ValueUnion.psz[3], 0, &idTb);
1935 if (RT_SUCCESS(rc))
1936 {
1937 GCVirt = NIL_RTGCPHYS;
1938 GCPhysPc = NIL_RTGCPHYS;
1939 break;
1940 }
1941 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to TD ID: %Rrc\n", ValueUnion.psz, rc);
1942 }
1943 else
1944 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1945 return;
1946 }
1947
1948 case 'h':
1949 pHlp->pfnPrintf(pHlp,
1950 "Usage: info tb [options]\n"
1951 "\n"
1952 "Options:\n"
1953 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1954 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1955 " -a<virt>, --address=<virt>\n"
1956 " Shows the TB for the specified guest virtual address.\n"
1957 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1958 " Shows the TB for the specified guest physical address.\n"
1959 " -t<id>, --tb=<id>, --tb-id=<id>, TD#<id>\n"
1960 " Show the TB specified by the identifier/number (from tbtop).\n"
1961 " -f<flags>,--flags=<flags>\n"
1962 " The TB flags value (hex) to use when looking up the TB.\n"
1963 "\n"
1964 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1965 return;
1966
1967 default:
1968 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1969 return;
1970 }
1971 }
1972
1973 /* Currently, only do work on the same EMT. */
1974 if (pVCpu != pVCpuThis)
1975 {
1976 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1977 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1978 return;
1979 }
1980
1981 /*
1982 * Defaults.
1983 */
1984 if (GCPhysPc == NIL_RTGCPHYS && idTb == UINT32_MAX)
1985 {
1986 if (GCVirt == NIL_RTGCPTR)
1987 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1988 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1989 if (RT_FAILURE(rc))
1990 {
1991 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1992 return;
1993 }
1994 }
1995 if (fFlags == UINT32_MAX && idTb == UINT32_MAX)
1996 {
1997 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1998 fFlags = iemCalcExecFlags(pVCpu);
1999 if (pVM->cCpus == 1)
2000 fFlags |= IEM_F_X86_DISREGARD_LOCK;
2001 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
2002 fFlags |= IEMTB_F_INHIBIT_SHADOW;
2003 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
2004 fFlags |= IEMTB_F_INHIBIT_NMI;
2005 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
2006 {
2007 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
2008 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
2009 fFlags |= IEMTB_F_CS_LIM_CHECKS;
2010 }
2011 }
2012
2013 PCIEMTB pTb;
2014 if (idTb == UINT32_MAX)
2015 {
2016 /*
2017 * Do the lookup...
2018 *
2019 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
2020 * have much choice since we don't want to increase use counters and
2021 * trigger native recompilation.
2022 */
2023 fFlags &= IEMTB_F_KEY_MASK;
2024 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
2025 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
2026 pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
2027 while (pTb)
2028 {
2029 if (pTb->GCPhysPc == GCPhysPc)
2030 {
2031 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
2032 {
2033 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
2034 break;
2035 }
2036 }
2037 pTb = pTb->pNext;
2038 }
2039 if (!pTb)
2040 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
2041 }
2042 else
2043 {
2044 /*
2045 * Use the TB ID for indexing.
2046 */
2047 pTb = NULL;
2048 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2049 if (pTbAllocator)
2050 {
2051 size_t const idxTbChunk = idTb / pTbAllocator->cTbsPerChunk;
2052 size_t const idxTbInChunk = idTb % pTbAllocator->cTbsPerChunk;
2053 if (idxTbChunk < pTbAllocator->cAllocatedChunks)
2054 pTb = &pTbAllocator->aChunks[idxTbChunk].paTbs[idxTbInChunk];
2055 else
2056 pHlp->pfnPrintf(pHlp, "Invalid TB ID: %u (%#x)\n", idTb, idTb);
2057 }
2058 }
2059
2060 if (pTb)
2061 {
2062 /*
2063 * Disassemble according to type.
2064 */
2065 size_t const idxTbChunk = pTb->idxAllocChunk;
2066 size_t const idxTbNo = (pTb - &pVCpu->iem.s.pTbAllocatorR3->aChunks[idxTbChunk].paTbs[0])
2067 + idxTbChunk * pVCpu->iem.s.pTbAllocatorR3->cTbsPerChunk;
2068 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2069 {
2070# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2071 case IEMTB_F_TYPE_NATIVE:
2072 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - native\n",
2073 GCPhysPc, iemR3GetTbFlatPc(pTb), fFlags, pVCpu->idCpu, idxTbNo, pTb);
2074 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2075 break;
2076# endif
2077
2078 case IEMTB_F_TYPE_THREADED:
2079 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - threaded\n",
2080 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb);
2081 iemThreadedDisassembleTb(pTb, pHlp);
2082 break;
2083
2084 default:
2085 pHlp->pfnPrintf(pHlp, "PC=%RGp (%%%RGv) fFlags=%#x on #%u: TB#%#zx/%p - ??? %#x\n",
2086 GCPhysPc, pTb->FlatPc, fFlags, pVCpu->idCpu, idxTbNo, pTb, pTb->fFlags);
2087 break;
2088 }
2089 }
2090}
2091
2092
2093/**
2094 * @callback_method_impl{FNDBGFINFOARGVINT, tbtop}
2095 */
2096static DECLCALLBACK(void) iemR3InfoTbTop(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
2097{
2098 /*
2099 * Parse arguments.
2100 */
2101 static RTGETOPTDEF const s_aOptions[] =
2102 {
2103 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
2104 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
2105 { "--dis", 'd', RTGETOPT_REQ_NOTHING },
2106 { "--disas", 'd', RTGETOPT_REQ_NOTHING },
2107 { "--disasm", 'd', RTGETOPT_REQ_NOTHING },
2108 { "--disassemble", 'd', RTGETOPT_REQ_NOTHING },
2109 { "--no-dis", 'D', RTGETOPT_REQ_NOTHING },
2110 { "--no-disas", 'D', RTGETOPT_REQ_NOTHING },
2111 { "--no-disasm", 'D', RTGETOPT_REQ_NOTHING },
2112 { "--no-disassemble", 'D', RTGETOPT_REQ_NOTHING },
2113 { "--most-freq", 'f', RTGETOPT_REQ_NOTHING },
2114 { "--most-frequent", 'f', RTGETOPT_REQ_NOTHING },
2115 { "--most-frequently", 'f', RTGETOPT_REQ_NOTHING },
2116 { "--most-frequently-used", 'f', RTGETOPT_REQ_NOTHING },
2117 { "--most-recent", 'r', RTGETOPT_REQ_NOTHING },
2118 { "--most-recently", 'r', RTGETOPT_REQ_NOTHING },
2119 { "--most-recently-used", 'r', RTGETOPT_REQ_NOTHING },
2120 { "--count", 'n', RTGETOPT_REQ_UINT32 },
2121 };
2122
2123 RTGETOPTSTATE State;
2124 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
2125 AssertRCReturnVoid(rc);
2126
2127 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
2128 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
2129 enum { kTbTop_MostFrequentlyUsed, kTbTop_MostRececentlyUsed }
2130 enmTop = kTbTop_MostFrequentlyUsed;
2131 bool fDisassemble = false;
2132 uint32_t const cTopDefault = 64;
2133 uint32_t const cTopMin = 1;
2134 uint32_t const cTopMax = 1024;
2135 uint32_t cTop = cTopDefault;
2136
2137 RTGETOPTUNION ValueUnion;
2138 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
2139 {
2140 switch (rc)
2141 {
2142 case 'c':
2143 if (ValueUnion.u32 >= pVM->cCpus)
2144 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
2145 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
2146 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
2147 break;
2148
2149 case 'd':
2150 fDisassemble = true;
2151 break;
2152
2153 case 'D':
2154 fDisassemble = true;
2155 break;
2156
2157 case 'f':
2158 enmTop = kTbTop_MostFrequentlyUsed;
2159 break;
2160
2161 case 'r':
2162 enmTop = kTbTop_MostRececentlyUsed;
2163 break;
2164
2165 case VINF_GETOPT_NOT_OPTION:
2166 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cTop);
2167 if (RT_FAILURE(rc))
2168 {
2169 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
2170 return;
2171 }
2172 ValueUnion.u32 = cTop;
2173 RT_FALL_THROUGH();
2174 case 'n':
2175 if (!ValueUnion.u32)
2176 cTop = cTopDefault;
2177 else
2178 {
2179 cTop = RT_MAX(RT_MIN(ValueUnion.u32, cTopMax), cTopMin);
2180 if (cTop != ValueUnion.u32)
2181 pHlp->pfnPrintf(pHlp, "warning: adjusted %u to %u (valid range: [%u..%u], 0 for default (%d))",
2182 ValueUnion.u32, cTop, cTopMin, cTopMax, cTopDefault);
2183 }
2184 break;
2185
2186 case 'h':
2187 pHlp->pfnPrintf(pHlp,
2188 "Usage: info tbtop [options]\n"
2189 "\n"
2190 "Options:\n"
2191 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
2192 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
2193 " -d, --dis[as[m]], --disassemble\n"
2194 " Show full TB disassembly.\n"
2195 " -D, --no-dis[as[m]], --no-disassemble\n"
2196 " Do not show TB diassembly. The default.\n"
2197 " -f, --most-freq[ent[ly[-used]]]\n"
2198 " Shows the most frequently used TBs (IEMTB::cUsed). The default.\n"
2199 " -r, --most-recent[ly[-used]]\n"
2200 " Shows the most recently used TBs (IEMTB::msLastUsed).\n"
2201 " -n<num>, --count=<num>\n"
2202 " The number of TBs to display. Default: %u\n"
2203 " This is also what non-option arguments will be taken as.\n"
2204 , cTopDefault);
2205 return;
2206
2207 default:
2208 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
2209 return;
2210 }
2211 }
2212
2213 /* Currently, only do work on the same EMT. */
2214 if (pVCpu != pVCpuThis)
2215 {
2216 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
2217 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
2218 return;
2219 }
2220
2221 /*
2222 * Collect the data by scanning the TB allocation map.
2223 */
2224 struct IEMTBTOPENTRY
2225 {
2226 /** Pointer to the translation block. */
2227 PCIEMTB pTb;
2228 /** The sorting key. */
2229 uint64_t uSortKey;
2230 } aTop[cTopMax] = { { NULL, 0 }, };
2231 uint32_t cValid = 0;
2232 PIEMTBALLOCATOR pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
2233 if (pTbAllocator)
2234 {
2235 uint32_t const cTbsPerChunk = pTbAllocator->cTbsPerChunk;
2236 for (uint32_t iChunk = 0; iChunk < pTbAllocator->cAllocatedChunks; iChunk++)
2237 {
2238 for (uint32_t iTb = 0; iTb < cTbsPerChunk; iTb++)
2239 {
2240 PCIEMTB const pTb = &pTbAllocator->aChunks[iChunk].paTbs[iTb];
2241 AssertContinue(pTb);
2242 if (pTb->fFlags & IEMTB_F_TYPE_MASK)
2243 {
2244 /* Extract and compose the sort key. */
2245 uint64_t const uSortKey = enmTop == kTbTop_MostFrequentlyUsed
2246 ? RT_MAKE_U64(pTb->msLastUsed, pTb->cUsed)
2247 : RT_MAKE_U64(pTb->cUsed, pTb->msLastUsed);
2248
2249 /*
2250 * Discard the key if it's smaller than the smallest in the table when it is full.
2251 */
2252 if ( cValid >= cTop
2253 && uSortKey <= aTop[cTop - 1].uSortKey)
2254 { /* discard it */ }
2255 else
2256 {
2257 /*
2258 * Do binary search to find the insert location
2259 */
2260 uint32_t idx;
2261 if (cValid > 0)
2262 {
2263 uint32_t idxEnd = cValid;
2264 uint32_t idxStart = 0;
2265 idx = cValid / 2;
2266 for (;;)
2267 {
2268 if (uSortKey > aTop[idx].uSortKey)
2269 {
2270 if (idx > idxStart)
2271 idxEnd = idx;
2272 else
2273 break;
2274 }
2275 else if (uSortKey < aTop[idx].uSortKey)
2276 {
2277 idx += 1;
2278 if (idx < idxEnd)
2279 idxStart = idx;
2280 else
2281 break;
2282 }
2283 else
2284 {
2285 do
2286 idx++;
2287 while (idx < cValid && uSortKey == aTop[idx].uSortKey);
2288 break;
2289 }
2290 idx = idxStart + (idxEnd - idxStart) / 2;
2291 }
2292 AssertContinue(idx < RT_ELEMENTS(aTop));
2293
2294 /*
2295 * Shift entries as needed.
2296 */
2297 if (cValid >= cTop)
2298 {
2299 if (idx != cTop - 1U)
2300 memmove(&aTop[idx + 1], &aTop[idx], (cTop - idx - 1) * sizeof(aTop[0]));
2301 }
2302 else
2303 {
2304 if (idx != cValid)
2305 memmove(&aTop[idx + 1], &aTop[idx], (cValid - idx) * sizeof(aTop[0]));
2306 cValid++;
2307 }
2308 }
2309 else
2310 {
2311 /* Special case: The first insertion. */
2312 cValid = 1;
2313 idx = 0;
2314 }
2315
2316 /*
2317 * Fill in the new entry.
2318 */
2319 aTop[idx].uSortKey = uSortKey;
2320 aTop[idx].pTb = pTb;
2321 }
2322 }
2323 }
2324 }
2325 }
2326
2327 /*
2328 * Display the result.
2329 */
2330 if (cTop > cValid)
2331 cTop = cValid;
2332 pHlp->pfnPrintf(pHlp, "Displaying the top %u TBs for CPU #%u ordered by %s:\n",
2333 cTop, pVCpu->idCpu, enmTop == kTbTop_MostFrequentlyUsed ? "cUsed" : "msLastUsed");
2334 if (fDisassemble)
2335 pHlp->pfnPrintf(pHlp, "================================================================================\n");
2336
2337 for (uint32_t idx = 0; idx < cTop; idx++)
2338 {
2339 if (fDisassemble && idx)
2340 pHlp->pfnPrintf(pHlp, "\n------------------------------- %u -------------------------------\n", idx);
2341
2342 PCIEMTB const pTb = aTop[idx].pTb;
2343 size_t const idxTbChunk = pTb->idxAllocChunk;
2344 Assert(idxTbChunk < pTbAllocator->cAllocatedChunks);
2345 size_t const idxTbNo = (pTb - &pTbAllocator->aChunks[idxTbChunk].paTbs[0])
2346 + idxTbChunk * pTbAllocator->cTbsPerChunk;
2347 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
2348 {
2349# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
2350 case IEMTB_F_TYPE_NATIVE:
2351 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - native\n",
2352 idxTbNo, pTb->GCPhysPc, iemR3GetTbFlatPc(pTb), pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2353 if (fDisassemble)
2354 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
2355 break;
2356# endif
2357
2358 case IEMTB_F_TYPE_THREADED:
2359 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - threaded\n",
2360 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2361 if (fDisassemble)
2362 iemThreadedDisassembleTb(pTb, pHlp);
2363 break;
2364
2365 default:
2366 pHlp->pfnPrintf(pHlp, "TB#%#zx: PC=%RGp (%%%RGv) cUsed=%u msLastUsed=%u fFlags=%#010x - ???\n",
2367 idxTbNo, pTb->GCPhysPc, pTb->FlatPc, pTb->cUsed, pTb->msLastUsed, pTb->fFlags);
2368 break;
2369 }
2370 }
2371}
2372
2373#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
2374
2375
2376#ifdef VBOX_WITH_DEBUGGER
2377
2378/** @callback_method_impl{FNDBGCCMD,
2379 * Implements the '.alliem' command. }
2380 */
2381static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2382{
2383 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
2384 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2385 if (pVCpu)
2386 {
2387 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
2388 return VINF_SUCCESS;
2389 }
2390 RT_NOREF(paArgs, cArgs);
2391 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
2392}
2393
2394
2395/**
2396 * Called by IEMR3Init to register debugger commands.
2397 */
2398static void iemR3RegisterDebuggerCommands(void)
2399{
2400 /*
2401 * Register debugger commands.
2402 */
2403 static DBGCCMD const s_aCmds[] =
2404 {
2405 {
2406 /* .pszCmd = */ "iemflushtlb",
2407 /* .cArgsMin = */ 0,
2408 /* .cArgsMax = */ 0,
2409 /* .paArgDescs = */ NULL,
2410 /* .cArgDescs = */ 0,
2411 /* .fFlags = */ 0,
2412 /* .pfnHandler = */ iemR3DbgFlushTlbs,
2413 /* .pszSyntax = */ "",
2414 /* .pszDescription = */ "Flushed the code and data TLBs"
2415 },
2416 };
2417
2418 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
2419 AssertLogRelRC(rc);
2420}
2421
2422#endif /* VBOX_WITH_DEBUGGER */
2423
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette