VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

Last change on this file was 104412, checked in by vboxsync, 3 weeks ago

VMM/IEM: Adjusted the TB exit statistics a bit more, adding a few new one, making more of the release stats that doesn't go into the TB, and organizing them to try avoid counting the same exit more than once. [build fix] bugref:10376 bugref:10653

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 80.7 KB
Line 
1/* $Id: IEMR3.cpp 104412 2024-04-24 10:59:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#if defined(VBOX_VMM_TARGET_ARMV8)
39# include "IEMInternal-armv8.h"
40#else
41# include "IEMInternal.h"
42#endif
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/vmapi.h>
45#include <VBox/err.h>
46#ifdef VBOX_WITH_DEBUGGER
47# include <VBox/dbg.h>
48#endif
49
50#include <iprt/assert.h>
51#include <iprt/getopt.h>
52#include <iprt/string.h>
53
54#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
55# include "IEMN8veRecompiler.h"
56# include "IEMThreadedFunctions.h"
57# include "IEMInline.h"
58#endif
59
60
61/*********************************************************************************************************************************
62* Internal Functions *
63*********************************************************************************************************************************/
64static FNDBGFINFOARGVINT iemR3InfoITlb;
65static FNDBGFINFOARGVINT iemR3InfoDTlb;
66#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
67static FNDBGFINFOARGVINT iemR3InfoTb;
68#endif
69#ifdef VBOX_WITH_DEBUGGER
70static void iemR3RegisterDebuggerCommands(void);
71#endif
72
73
74#if !defined(VBOX_VMM_TARGET_ARMV8)
75static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
76{
77 switch (enmTargetCpu)
78 {
79#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
80 CASE_RET_STR(IEMTARGETCPU_8086);
81 CASE_RET_STR(IEMTARGETCPU_V20);
82 CASE_RET_STR(IEMTARGETCPU_186);
83 CASE_RET_STR(IEMTARGETCPU_286);
84 CASE_RET_STR(IEMTARGETCPU_386);
85 CASE_RET_STR(IEMTARGETCPU_486);
86 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
87 CASE_RET_STR(IEMTARGETCPU_PPRO);
88 CASE_RET_STR(IEMTARGETCPU_CURRENT);
89#undef CASE_RET_STR
90 default: return "Unknown";
91 }
92}
93#endif
94
95
96/**
97 * Initializes the interpreted execution manager.
98 *
99 * This must be called after CPUM as we're quering information from CPUM about
100 * the guest and host CPUs.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3DECL(int) IEMR3Init(PVM pVM)
106{
107 /*
108 * Read configuration.
109 */
110#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
111 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
112 int rc;
113#endif
114
115#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
116 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
117 * Controls whether the custom VBox specific CPUID host call interface is
118 * enabled or not. */
119# ifdef DEBUG_bird
120 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
121# else
122 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
123# endif
124 AssertLogRelRCReturn(rc, rc);
125#endif
126
127#ifdef VBOX_WITH_IEM_RECOMPILER
128 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
129 * Max number of TBs per EMT. */
130 uint32_t cMaxTbs = 0;
131 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
132 AssertLogRelRCReturn(rc, rc);
133 if (cMaxTbs < _16K || cMaxTbs > _8M)
134 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
135 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
136
137 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
138 * Initial (minimum) number of TBs per EMT in ring-3. */
139 uint32_t cInitialTbs = 0;
140 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
141 AssertLogRelRCReturn(rc, rc);
142 if (cInitialTbs < _16K || cInitialTbs > _8M)
143 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
144 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
145
146 /* Check that the two values makes sense together. Expect user/api to do
147 the right thing or get lost. */
148 if (cInitialTbs > cMaxTbs)
149 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
150 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
151 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
152
153 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
154 * Max executable memory for recompiled code per EMT. */
155 uint64_t cbMaxExec = 0;
156 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
157 AssertLogRelRCReturn(rc, rc);
158 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
159 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
160 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
161 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
162
163 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
164 * The executable memory allocator chunk size. */
165 uint32_t cbChunkExec = 0;
166 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
167 AssertLogRelRCReturn(rc, rc);
168 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
169 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
170 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
171 cbChunkExec, cbChunkExec, _1M, _256M);
172
173 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
174 * The initial executable memory allocator size (per EMT). The value is
175 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
176 uint64_t cbInitialExec = 0;
177 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
178 AssertLogRelRCReturn(rc, rc);
179 if (cbInitialExec > cbMaxExec)
180 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
181 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
182 cbInitialExec, cbInitialExec, cbMaxExec);
183
184 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
185 * The translation block use count value to do native recompilation at. */
186 uint32_t uTbNativeRecompileAtUsedCount = 16;
187 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
188 AssertLogRelRCReturn(rc, rc);
189
190#endif /* VBOX_WITH_IEM_RECOMPILER*/
191
192 /*
193 * Initialize per-CPU data and register statistics.
194 */
195 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
196 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
197
198 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
199 {
200 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
201 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
202
203 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
204 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
205
206 /*
207 * Host and guest CPU information.
208 */
209 if (idCpu == 0)
210 {
211 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
212 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
213#if !defined(VBOX_VMM_TARGET_ARMV8)
214 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
215 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
216 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
217# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
218 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
219 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
220 else
221# endif
222 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
223#else
224 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
225 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
226#endif
227
228#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
229 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
230 {
231 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
232 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
233 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
234 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
235 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
236 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
237 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
238 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
239 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
240 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
241 }
242 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
243 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
244 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
245#else
246 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
247 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
248 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
249#endif
250 }
251 else
252 {
253 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
254 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
255 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
256 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
257#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
258 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
259#endif
260 }
261
262 /*
263 * Mark all buffers free.
264 */
265 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
266 while (iMemMap-- > 0)
267 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
268
269#ifdef VBOX_WITH_IEM_RECOMPILER
270 /*
271 * Distribute recompiler configuration.
272 */
273 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
274#endif
275 }
276
277
278#ifdef VBOX_WITH_IEM_RECOMPILER
279 /*
280 * Initialize the TB allocator and cache (/ hash table).
281 *
282 * This is done by each EMT to try get more optimal thread/numa locality of
283 * the allocations.
284 */
285 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
286 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
287 AssertLogRelRCReturn(rc, rc);
288#endif
289
290 /*
291 * Register statistics.
292 */
293 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
294 {
295#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
296 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
297 char szPat[128];
298 RT_NOREF_PV(szPat); /* lazy bird */
299
300 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
301 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
302 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
303 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
304 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
305 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
306 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
307 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
308 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
309 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
310 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
311 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
312 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
313 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
314 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
315 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
316 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
317 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
318 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
319 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
320
321 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
322 "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu);
323 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
324 "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu);
325 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
326 "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
327 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
328 "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);
329
330 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
331 "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu);
332 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
333 "Data TLB safe read path", "/IEM/CPU%u/DataTlb-SafeReads", idCpu);
334 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
335 "Data TLB safe write path", "/IEM/CPU%u/DataTlb-SafeWrites", idCpu);
336 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
337 "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu);
338 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
339 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu);
340
341# ifdef VBOX_WITH_STATISTICS
342 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
343 "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu);
344 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
345 "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits-Other", idCpu);
346# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
347 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
348 "Data TLB native stack access hits", "/IEM/CPU%u/DataTlb-Hits-Native-Stack", idCpu);
349 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
350 "Data TLB native data fetch hits", "/IEM/CPU%u/DataTlb-Hits-Native-Fetch", idCpu);
351 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
352 "Data TLB native data store hits", "/IEM/CPU%u/DataTlb-Hits-Native-Store", idCpu);
353 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
354 "Data TLB native mapped data hits", "/IEM/CPU%u/DataTlb-Hits-Native-Mapped", idCpu);
355# endif
356 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
357 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
358 "Data TLB hits total", "/IEM/CPU%u/DataTlb-Hits", idCpu);
359
360 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Safe*", idCpu);
361 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
362 "Data TLB actual misses", "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
363 char szVal[128];
364 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/DataTlb-SafeTotal", idCpu);
365 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/DataTlb-Hits-*", idCpu);
366 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
367 "Data TLB actual miss rate", "/IEM/CPU%u/DataTlb-SafeRate", idCpu);
368
369# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
370 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
371 "Code TLB native misses on new page", "/IEM/CPU%u/CodeTlb-Misses-New-Page", idCpu);
372 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
373 "Code TLB native misses on new page w/ offset", "/IEM/CPU%u/CodeTlb-Misses-New-Page-With-Offset", idCpu);
374 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
375 "Code TLB native hits on new page", "/IEM/CPU%u/CodeTlb-Hits-New-Page", idCpu);
376 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
377 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/CodeTlb-Hits-New-Page-With-Offset", idCpu);
378# endif
379# endif
380
381#ifdef VBOX_WITH_IEM_RECOMPILER
382 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
383 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
384 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
385 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
386 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
387 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
388# ifdef VBOX_WITH_STATISTICS
389 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
390 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
391 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
392 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
393# endif
394
395 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
396 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
397 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
398 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
399 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
400# ifdef VBOX_WITH_STATISTICS
401 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
402 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
403# endif
404 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
405 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
406 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
407 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
408 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
410 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
411 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
412 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
413 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
414 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
415 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
416 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
417 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
418 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
419 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
420
421 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
422 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
423 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
424
425 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
426 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
427 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
428 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
429 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
430 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
431 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
432 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
433# ifdef VBOX_WITH_STATISTICS
434 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
435 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
436# endif
437
438 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
439 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
440 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
441 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
442 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
443 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
444
445 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
446 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
447 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
448 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
449 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
450 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
451 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
452 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
453# ifdef VBOX_WITH_STATISTICS
454 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
455 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
456#endif
457
458 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
459 "Number of times the exec memory allocator failed to allocate a large enough buffer",
460 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
461
462 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
463 "Number of threaded calls per TB that have been properly recompiled to native code",
464 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
465 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
466 "Number of threaded calls per TB that could not be recompiler to native code",
467 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
468 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
469 "Number of threaded calls that could not be recompiler to native code",
470 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
471
472 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
473 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
474 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
475 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
476
477# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
478# ifdef VBOX_WITH_STATISTICS
479 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
480 "Number of calls to iemNativeRegAllocFindFree.",
481 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
482# endif
483 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
484 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
485 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
486# ifdef VBOX_WITH_STATISTICS
487 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
488 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
489 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
490 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
491 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
492 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
493 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
494 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
495 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
496
497 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedArithmetic, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
498 "Skipped all status flag updating, arithmetic instructions",
499 "/IEM/CPU%u/re/NativeEFlagsSkippedArithmetic", idCpu);
500 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedLogical, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
501 "Skipped all status flag updating, logical instructions",
502 "/IEM/CPU%u/re/NativeEFlagsSkippedLogical", idCpu);
503
504 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
505 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
506 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippable", idCpu);
507 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippable", idCpu);
508 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippable", idCpu);
509 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippable", idCpu);
510
511 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfRequired", idCpu);
512 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfRequired", idCpu);
513 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfRequired", idCpu);
514 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfRequired", idCpu);
515 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfRequired", idCpu);
516 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfRequired", idCpu);
517
518# ifdef IEMLIVENESS_EXTENDED_LAYOUT
519 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfDelayable", idCpu);
520 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfDelayable", idCpu);
521 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfDelayable", idCpu);
522 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfDelayable", idCpu);
523 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfDelayable", idCpu);
524 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfDelayable", idCpu);
525# endif
526
527 /* Sum up all status bits ('_' is a sorting hack). */
528 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fSkippable*", idCpu);
529 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
530 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
531
532 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fRequired*", idCpu);
533 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
534 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusRequired", idCpu);
535
536# ifdef IEMLIVENESS_EXTENDED_LAYOUT
537 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fDelayable*", idCpu);
538 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
539 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
540# endif
541
542 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?f*", idCpu);
543 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
544 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
545
546 /* Ratio of the status bit skippables. */
547 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
548 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
549 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
550 "Total skippable EFLAGS status bit updating percentage",
551 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippablePct", idCpu);
552
553# ifdef IEMLIVENESS_EXTENDED_LAYOUT
554 /* Ratio of the status bit skippables. */
555 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
556 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
557 "Total potentially delayable EFLAGS status bit updating percentage",
558 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayablePct", idCpu);
559# endif
560
561 /* Ratios of individual bits. */
562 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlagsCf*", idCpu) - 3;
563 Assert(szPat[offFlagChar] == 'C');
564 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
565 Assert(szVal[offFlagChar] == 'C');
566 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippablePct", idCpu);
567 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippablePct", idCpu);
568 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippablePct", idCpu);
569 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippablePct", idCpu);
570 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippablePct", idCpu);
571 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippablePct", idCpu);
572
573 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
574 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
575
576# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
577 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
578 "Number of calls to iemNativeSimdRegAllocFindFree.",
579 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
580 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
581 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
582 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
583 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
584 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
585 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
586 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
587 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
588 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
589 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
590 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
591 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
592
593 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
594 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
595 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
596 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
597 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
598 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
599 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
600 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
601
602 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
603 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
604 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
605 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
606 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
607 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
608 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
609 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
610# endif
611
612 /* Ratio of the status bit skippables. */
613 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
614 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
615 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
616 "Delayed RIP updating percentage",
617 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
618
619 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
620 "Number of times the TB finishes execution completely",
621 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
622# endif /* VBOX_WITH_STATISTICS */
623 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
624 "Number of times the TB finished through the ReturnBreak label",
625 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
626 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
627 "Number of times the TB finished through the ReturnBreak label",
628 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
629 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
630 "Number of times the TB finished through the ReturnWithFlags label",
631 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
632 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
633 "Number of times the TB finished with some other status value",
634 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
635 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
636 "Number of times the TB finished via long jump / throw",
637 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
638 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
639 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
640 "Number of times the TB finished through the ObsoleteTb label",
641 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
642 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
643 "Number of times the TB finished through the NeedCsLimChecking label",
644 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
645 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
646 "Number of times the TB finished through the CheckBranchMiss label",
647 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
648 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
649 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
650# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
651# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
652# else
653# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
654# endif
655 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
656 "Number of times the TB finished raising a #DE exception",
657 RAISE_PREFIX "RaiseDe", idCpu);
658 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
659 "Number of times the TB finished raising a #UD exception",
660 RAISE_PREFIX "RaiseUd", idCpu);
661 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
662 "Number of times the TB finished raising a SSE related exception",
663 RAISE_PREFIX "RaiseSseRelated", idCpu);
664 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
665 "Number of times the TB finished raising a AVX related exception",
666 RAISE_PREFIX "RaiseAvxRelated", idCpu);
667 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
668 "Number of times the TB finished raising a SSE/AVX floating point related exception",
669 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
670 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
671 "Number of times the TB finished raising a #NM exception",
672 RAISE_PREFIX "RaiseNm", idCpu);
673 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
674 "Number of times the TB finished raising a #GP(0) exception",
675 RAISE_PREFIX "RaiseGp0", idCpu);
676 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
677 "Number of times the TB finished raising a #MF exception",
678 RAISE_PREFIX "RaiseMf", idCpu);
679 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
680 "Number of times the TB finished raising a #XF exception",
681 RAISE_PREFIX "RaiseXf", idCpu);
682
683 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
684 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
685 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
686 "/IEM/CPU%u/re/NativeTbExit", idCpu);
687
688
689# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
690
691#endif /* VBOX_WITH_IEM_RECOMPILER */
692
693 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
694 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
695 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
696 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
697 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
698 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
699
700# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
701 /* Instruction statistics: */
702# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
703 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
704 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
705 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
706 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
707# include "IEMInstructionStatisticsTmpl.h"
708# undef IEM_DO_INSTR_STAT
709# endif
710
711# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
712 /* Threaded function statistics: */
713 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
714 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
715 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
716# endif
717
718#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
719 }
720
721#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
722 /*
723 * Register the per-VM VMX APIC-access page handler type.
724 */
725 if (pVM->cpum.ro.GuestFeatures.fVmx)
726 {
727 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
728 iemVmxApicAccessPageHandler,
729 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
730 AssertLogRelRCReturn(rc, rc);
731 }
732#endif
733
734 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
735 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
736#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
737 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
738#endif
739#ifdef VBOX_WITH_DEBUGGER
740 iemR3RegisterDebuggerCommands();
741#endif
742
743 return VINF_SUCCESS;
744}
745
746
747VMMR3DECL(int) IEMR3Term(PVM pVM)
748{
749 NOREF(pVM);
750 return VINF_SUCCESS;
751}
752
753
754VMMR3DECL(void) IEMR3Relocate(PVM pVM)
755{
756 RT_NOREF(pVM);
757}
758
759
760/**
761 * Gets the name of a generic IEM exit code.
762 *
763 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
764 * @param uExit The IEM exit to name.
765 */
766VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
767{
768 static const char * const s_apszNames[] =
769 {
770 /* external interrupts */
771 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
772 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
773 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
774 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
775 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
776 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
777 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
778 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
779 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
780 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
781 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
782 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
783 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
784 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
785 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
786 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
787 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
788 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
789 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
790 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
791 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
792 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
793 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
794 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
795 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
796 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
797 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
798 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
799 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
800 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
801 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
802 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
803 /* software interrups */
804 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
805 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
806 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
807 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
808 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
809 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
810 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
811 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
812 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
813 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
814 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
815 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
816 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
817 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
818 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
819 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
820 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
821 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
822 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
823 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
824 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
825 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
826 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
827 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
828 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
829 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
830 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
831 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
832 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
833 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
834 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
835 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
836 };
837 if (uExit < RT_ELEMENTS(s_apszNames))
838 return s_apszNames[uExit];
839 return NULL;
840}
841
842
843/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
844static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
845{
846 if (*pfHeader)
847 return;
848 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
849 *pfHeader = true;
850}
851
852
853/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
854static void iemR3InfoTlbPrintSlot(PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe, uint32_t uSlot)
855{
856 pHlp->pfnPrintf(pHlp, "%02x: %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s/%s%s%s/%s %s\n",
857 uSlot,
858 (pTlbe->uTag & IEMTLB_REVISION_MASK) == pTlb->uTlbRevision ? "valid "
859 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
860 : "expired",
861 (pTlbe->uTag & ~IEMTLB_REVISION_MASK) << X86_PAGE_SHIFT,
862 pTlbe->GCPhys, pTlbe->pbMappingR3,
863 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
864 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "NX" : " X",
865 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "RO" : "RW",
866 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
867 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
868 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
869 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
870 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "U" : "-",
871 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "S" : "M",
872 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
873 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired");
874}
875
876
877/** Displays one or more TLB slots. */
878static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
879 uint32_t uSlot, uint32_t cSlots, bool *pfHeader)
880{
881 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
882 {
883 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
884 {
885 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
886 cSlots, RT_ELEMENTS(pTlb->aEntries));
887 cSlots = RT_ELEMENTS(pTlb->aEntries);
888 }
889
890 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
891 while (cSlots-- > 0)
892 {
893 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
894 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
895 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
896 }
897 }
898 else
899 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
900 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
901}
902
903
904/** Displays the TLB slot for the given address. */
905static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
906 uint64_t uAddress, bool *pfHeader)
907{
908 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
909
910 uint64_t const uTag = (uAddress << 16) >> (X86_PAGE_SHIFT + 16);
911 uint32_t const uSlot = (uint8_t)uTag;
912 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
913 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
914 Tlbe.uTag == (uTag | pTlb->uTlbRevision) ? "match"
915 : (Tlbe.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
916 iemR3InfoTlbPrintSlot(pHlp, pTlb, &Tlbe, uSlot);
917}
918
919
920/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
921static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
922{
923 /*
924 * This is entirely argument driven.
925 */
926 static RTGETOPTDEF const s_aOptions[] =
927 {
928 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
929 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
930 { "all", 'A', RTGETOPT_REQ_NOTHING },
931 { "--all", 'A', RTGETOPT_REQ_NOTHING },
932 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
933 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
934 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
935 };
936
937 char szDefault[] = "-A";
938 char *papszDefaults[2] = { szDefault, NULL };
939 if (cArgs == 0)
940 {
941 cArgs = 1;
942 papszArgs = papszDefaults;
943 }
944
945 RTGETOPTSTATE State;
946 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
947 AssertRCReturnVoid(rc);
948
949 bool fNeedHeader = true;
950 bool fAddressMode = true;
951 PVMCPU pVCpu = VMMGetCpu(pVM);
952 if (!pVCpu)
953 pVCpu = VMMGetCpuById(pVM, 0);
954
955 RTGETOPTUNION ValueUnion;
956 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
957 {
958 switch (rc)
959 {
960 case 'c':
961 if (ValueUnion.u32 >= pVM->cCpus)
962 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
963 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
964 {
965 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
966 fNeedHeader = true;
967 }
968 break;
969
970 case 'a':
971 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
972 ValueUnion.u64, &fNeedHeader);
973 fAddressMode = true;
974 break;
975
976 case 'A':
977 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
978 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), &fNeedHeader);
979 break;
980
981 case 'r':
982 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
983 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, &fNeedHeader);
984 fAddressMode = false;
985 break;
986
987 case 's':
988 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
989 ValueUnion.u32, 1, &fNeedHeader);
990 fAddressMode = false;
991 break;
992
993 case VINF_GETOPT_NOT_OPTION:
994 if (fAddressMode)
995 {
996 uint64_t uAddr;
997 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
998 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
999 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1000 uAddr, &fNeedHeader);
1001 else
1002 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1003 }
1004 else
1005 {
1006 uint32_t uSlot;
1007 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1008 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1009 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1010 uSlot, 1, &fNeedHeader);
1011 else
1012 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1013 }
1014 break;
1015
1016 case 'h':
1017 pHlp->pfnPrintf(pHlp,
1018 "Usage: info %ctlb [options]\n"
1019 "\n"
1020 "Options:\n"
1021 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1022 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1023 " -A, --all, all\n"
1024 " Display all the TLB entries (default if no other args).\n"
1025 " -a<virt>, --address=<virt>\n"
1026 " Shows the TLB entry for the specified guest virtual address.\n"
1027 " -r<slot:count>, --range=<slot:count>\n"
1028 " Shows the TLB entries for the specified slot range.\n"
1029 " -s<slot>,--slot=<slot>\n"
1030 " Shows the given TLB slot.\n"
1031 "\n"
1032 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1033 "defaulting to addresses if not preceeded by any of those options.\n"
1034 , fITlb ? 'i' : 'd');
1035 return;
1036
1037 default:
1038 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1039 return;
1040 }
1041 }
1042}
1043
1044
1045/**
1046 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1047 */
1048static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1049{
1050 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1051}
1052
1053
1054/**
1055 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1056 */
1057static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1058{
1059 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1060}
1061
1062#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1063/**
1064 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1065 */
1066static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1067{
1068 /*
1069 * Parse arguments.
1070 */
1071 static RTGETOPTDEF const s_aOptions[] =
1072 {
1073 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1074 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1075 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1076 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1077 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1078 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1079 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1080 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1081 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1082 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1083 };
1084
1085 RTGETOPTSTATE State;
1086 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1087 AssertRCReturnVoid(rc);
1088
1089 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1090 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1091 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1092 RTGCPHYS GCVirt = NIL_RTGCPTR;
1093 uint32_t fFlags = UINT32_MAX;
1094
1095 RTGETOPTUNION ValueUnion;
1096 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1097 {
1098 switch (rc)
1099 {
1100 case 'c':
1101 if (ValueUnion.u32 >= pVM->cCpus)
1102 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1103 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1104 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1105 break;
1106
1107 case 'a':
1108 GCVirt = ValueUnion.u64;
1109 GCPhysPc = NIL_RTGCPHYS;
1110 break;
1111
1112 case 'p':
1113 GCVirt = NIL_RTGCPHYS;
1114 GCPhysPc = ValueUnion.u64;
1115 break;
1116
1117 case 'f':
1118 fFlags = ValueUnion.u32;
1119 break;
1120
1121 case 'h':
1122 pHlp->pfnPrintf(pHlp,
1123 "Usage: info %ctlb [options]\n"
1124 "\n"
1125 "Options:\n"
1126 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1127 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1128 " -a<virt>, --address=<virt>\n"
1129 " Shows the TB for the specified guest virtual address.\n"
1130 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1131 " Shows the TB for the specified guest physical address.\n"
1132 " -f<flags>,--flags=<flags>\n"
1133 " The TB flags value (hex) to use when looking up the TB.\n"
1134 "\n"
1135 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1136 return;
1137
1138 default:
1139 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1140 return;
1141 }
1142 }
1143
1144 /* Currently, only do work on the same EMT. */
1145 if (pVCpu != pVCpuThis)
1146 {
1147 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1148 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1149 return;
1150 }
1151
1152 /*
1153 * Defaults.
1154 */
1155 if (GCPhysPc == NIL_RTGCPHYS)
1156 {
1157 if (GCVirt == NIL_RTGCPTR)
1158 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1159 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1160 if (RT_FAILURE(rc))
1161 {
1162 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1163 return;
1164 }
1165 }
1166 if (fFlags == UINT32_MAX)
1167 {
1168 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1169 fFlags = iemCalcExecFlags(pVCpu);
1170 if (pVM->cCpus == 1)
1171 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1172 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1173 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1174 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1175 fFlags |= IEMTB_F_INHIBIT_NMI;
1176 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1177 {
1178 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1179 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1180 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1181 }
1182 }
1183
1184 /*
1185 * Do the lookup...
1186 *
1187 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1188 * have much choice since we don't want to increase use counters and
1189 * trigger native recompilation.
1190 */
1191 fFlags &= IEMTB_F_KEY_MASK;
1192 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1193 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
1194 PCIEMTB pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1195 while (pTb)
1196 {
1197 if (pTb->GCPhysPc == GCPhysPc)
1198 {
1199 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
1200 {
1201 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
1202 break;
1203 }
1204 }
1205 pTb = pTb->pNext;
1206 }
1207 if (!pTb)
1208 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
1209 else
1210 {
1211 /*
1212 * Disassemble according to type.
1213 */
1214 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
1215 {
1216# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
1217 case IEMTB_F_TYPE_NATIVE:
1218 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - native\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1219 iemNativeDisassembleTb(pTb, pHlp);
1220 break;
1221# endif
1222
1223 case IEMTB_F_TYPE_THREADED:
1224 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - threaded\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1225 iemThreadedDisassembleTb(pTb, pHlp);
1226 break;
1227
1228 default:
1229 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - ??? %#x\n",
1230 GCPhysPc, fFlags, pVCpu->idCpu, pTb, pTb->fFlags);
1231 break;
1232 }
1233 }
1234}
1235#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
1236
1237
1238#ifdef VBOX_WITH_DEBUGGER
1239
1240/** @callback_method_impl{FNDBGCCMD,
1241 * Implements the '.alliem' command. }
1242 */
1243static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
1244{
1245 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
1246 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
1247 if (pVCpu)
1248 {
1249 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAll, 1, pVCpu);
1250 return VINF_SUCCESS;
1251 }
1252 RT_NOREF(paArgs, cArgs);
1253 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
1254}
1255
1256
1257/**
1258 * Called by IEMR3Init to register debugger commands.
1259 */
1260static void iemR3RegisterDebuggerCommands(void)
1261{
1262 /*
1263 * Register debugger commands.
1264 */
1265 static DBGCCMD const s_aCmds[] =
1266 {
1267 {
1268 /* .pszCmd = */ "iemflushtlb",
1269 /* .cArgsMin = */ 0,
1270 /* .cArgsMax = */ 0,
1271 /* .paArgDescs = */ NULL,
1272 /* .cArgDescs = */ 0,
1273 /* .fFlags = */ 0,
1274 /* .pfnHandler = */ iemR3DbgFlushTlbs,
1275 /* .pszSyntax = */ "",
1276 /* .pszDescription = */ "Flushed the code and data TLBs"
1277 },
1278 };
1279
1280 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
1281 AssertLogRelRC(rc);
1282}
1283
1284#endif /* VBOX_WITH_DEBUGGER */
1285
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use