VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllTlb.cpp@ 108247

Last change on this file since 108247 was 108247, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.1 KB
Line 
1/* $Id: IEMAllTlb.cpp 108247 2025-02-17 00:28:23Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - TLB Management.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#include <iprt/x86.h>
47
48//#include "IEMInline.h"
49#ifdef VBOX_VMM_TARGET_X86
50# include "target-x86/IEMAllTlbInline-x86.h"
51#endif
52
53
54
55#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
56/**
57 * Worker for iemTlbInvalidateAll.
58 */
59template<bool a_fGlobal>
60DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
61{
62 if (!a_fGlobal)
63 pTlb->cTlsFlushes++;
64 else
65 pTlb->cTlsGlobalFlushes++;
66
67 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
68 if (RT_LIKELY(pTlb->uTlbRevision != 0))
69 { /* very likely */ }
70 else
71 {
72 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
73 pTlb->cTlbRevisionRollovers++;
74 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
75 while (i-- > 0)
76 pTlb->aEntries[i * 2].uTag = 0;
77 }
78
79 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
80 pTlb->NonGlobalLargePageRange.uLastTag = 0;
81 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
82
83 if (a_fGlobal)
84 {
85 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
86 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
87 { /* very likely */ }
88 else
89 {
90 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
91 pTlb->cTlbRevisionRollovers++;
92 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
93 while (i-- > 0)
94 pTlb->aEntries[i * 2 + 1].uTag = 0;
95 }
96
97 pTlb->cTlbGlobalLargePageCurLoads = 0;
98 pTlb->GlobalLargePageRange.uLastTag = 0;
99 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
100 }
101}
102#endif
103
104
105/**
106 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
107 */
108template<bool a_fGlobal>
109DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
110{
111#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
112 Log10(("IEMTlbInvalidateAll\n"));
113
114# ifdef IEM_WITH_CODE_TLB
115 pVCpu->iem.s.cbInstrBufTotal = 0;
116 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
117 if (a_fGlobal)
118 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
119 else
120 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
121# endif
122
123# ifdef IEM_WITH_DATA_TLB
124 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
125 if (a_fGlobal)
126 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
127 else
128 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
129# endif
130#else
131 RT_NOREF(pVCpu);
132#endif
133}
134
135
136/**
137 * Invalidates non-global the IEM TLB entries.
138 *
139 * This is called internally as well as by PGM when moving GC mappings.
140 *
141 * @param pVCpu The cross context virtual CPU structure of the calling
142 * thread.
143 */
144VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
145{
146 iemTlbInvalidateAll<false>(pVCpu);
147}
148
149
150/**
151 * Invalidates all the IEM TLB entries.
152 *
153 * This is called internally as well as by PGM when moving GC mappings.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling
156 * thread.
157 */
158VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
159{
160 iemTlbInvalidateAll<true>(pVCpu);
161}
162
163
164/**
165 * Invalidates a page in the TLBs.
166 *
167 * @param pVCpu The cross context virtual CPU structure of the calling
168 * thread.
169 * @param GCPtr The address of the page to invalidate
170 * @thread EMT(pVCpu)
171 */
172VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
173{
174 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
175#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
176 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
177 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
178 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
179 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
180
181# ifdef IEM_WITH_CODE_TLB
182 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
183# endif
184# ifdef IEM_WITH_DATA_TLB
185 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
186# endif
187#else
188 NOREF(pVCpu); NOREF(GCPtr);
189#endif
190}
191
192
193#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
194/**
195 * Invalid both TLBs slow fashion following a rollover.
196 *
197 * Worker for IEMTlbInvalidateAllPhysical,
198 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
199 * iemMemMapJmp and others.
200 *
201 * @thread EMT(pVCpu)
202 */
203void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
204{
205 Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
206 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
207 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
208
209 unsigned i;
210# ifdef IEM_WITH_CODE_TLB
211 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
212 while (i-- > 0)
213 {
214 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
215 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
216 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
217 }
218 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
219 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
220# endif
221# ifdef IEM_WITH_DATA_TLB
222 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
223 while (i-- > 0)
224 {
225 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
226 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
227 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
228 }
229 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
230 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
231# endif
232
233}
234#endif
235
236
237/**
238 * Invalidates the host physical aspects of the IEM TLBs.
239 *
240 * This is called internally as well as by PGM when moving GC mappings.
241 *
242 * @param pVCpu The cross context virtual CPU structure of the calling
243 * thread.
244 * @note Currently not used.
245 */
246VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
247{
248#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
249 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
250 Log10(("IEMTlbInvalidateAllPhysical\n"));
251
252# ifdef IEM_WITH_CODE_TLB
253 pVCpu->iem.s.cbInstrBufTotal = 0;
254# endif
255 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
256 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
257 {
258 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
259 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
260 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
261 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
262 }
263 else
264 iemTlbInvalidateAllPhysicalSlow(pVCpu);
265#else
266 NOREF(pVCpu);
267#endif
268}
269
270
271/**
272 * Invalidates the host physical aspects of the IEM TLBs.
273 *
274 * This is called internally as well as by PGM when moving GC mappings.
275 *
276 * @param pVM The cross context VM structure.
277 * @param idCpuCaller The ID of the calling EMT if available to the caller,
278 * otherwise NIL_VMCPUID.
279 * @param enmReason The reason we're called.
280 *
281 * @remarks Caller holds the PGM lock.
282 */
283VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
284{
285#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
286 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
287 if (pVCpuCaller)
288 VMCPU_ASSERT_EMT(pVCpuCaller);
289 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
290
291 VMCC_FOR_EACH_VMCPU(pVM)
292 {
293# ifdef IEM_WITH_CODE_TLB
294 if (pVCpuCaller == pVCpu)
295 pVCpu->iem.s.cbInstrBufTotal = 0;
296# endif
297
298 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
299 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
300 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
301 { /* likely */}
302 else if (pVCpuCaller != pVCpu)
303 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
304 else
305 {
306 iemTlbInvalidateAllPhysicalSlow(pVCpu);
307 continue;
308 }
309 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
310 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
311
312 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
313 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
314 }
315 VMCC_FOR_EACH_VMCPU_END(pVM);
316
317#else
318 RT_NOREF(pVM, idCpuCaller, enmReason);
319#endif
320}
321
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette