VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllTlb.cpp

Last change on this file was 108791, checked in by vboxsync, 5 weeks ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.2 KB
Line 
1/* $Id: IEMAllTlb.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - TLB Management.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#include <iprt/x86.h>
47
48#ifdef VBOX_VMM_TARGET_X86
49# include "target-x86/IEMAllTlbInline-x86.h"
50#elif defined(VBOX_VMM_TARGET_ARMV8)
51# include "target-armv8/IEMAllTlbInline-armv8.h"
52#endif
53
54
55
56#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
57/**
58 * Worker for iemTlbInvalidateAll.
59 */
60template<bool a_fGlobal>
61DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
62{
63 if (!a_fGlobal)
64 pTlb->cTlsFlushes++;
65 else
66 pTlb->cTlsGlobalFlushes++;
67
68 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
69 if (RT_LIKELY(pTlb->uTlbRevision != 0))
70 { /* very likely */ }
71 else
72 {
73 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
74 pTlb->cTlbRevisionRollovers++;
75 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
76 while (i-- > 0)
77 pTlb->aEntries[i * 2].uTag = 0;
78 }
79
80 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
81 pTlb->NonGlobalLargePageRange.uLastTag = 0;
82 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
83
84 if (a_fGlobal)
85 {
86 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
87 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
88 { /* very likely */ }
89 else
90 {
91 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
92 pTlb->cTlbRevisionRollovers++;
93 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
94 while (i-- > 0)
95 pTlb->aEntries[i * 2 + 1].uTag = 0;
96 }
97
98 pTlb->cTlbGlobalLargePageCurLoads = 0;
99 pTlb->GlobalLargePageRange.uLastTag = 0;
100 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
101 }
102}
103#endif
104
105
106/**
107 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
108 */
109template<bool a_fGlobal>
110DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
111{
112#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
113 Log10(("IEMTlbInvalidateAll\n"));
114
115# ifdef IEM_WITH_CODE_TLB
116 pVCpu->iem.s.cbInstrBufTotal = 0;
117 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
118 if (a_fGlobal)
119 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
120 else
121 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
122# endif
123
124# ifdef IEM_WITH_DATA_TLB
125 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
126 if (a_fGlobal)
127 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
128 else
129 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
130# endif
131#else
132 RT_NOREF(pVCpu);
133#endif
134}
135
136
137/**
138 * Invalidates non-global the IEM TLB entries.
139 *
140 * This is called internally as well as by PGM when moving GC mappings.
141 *
142 * @param pVCpu The cross context virtual CPU structure of the calling
143 * thread.
144 */
145VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
146{
147 iemTlbInvalidateAll<false>(pVCpu);
148}
149
150
151/**
152 * Invalidates all the IEM TLB entries.
153 *
154 * This is called internally as well as by PGM when moving GC mappings.
155 *
156 * @param pVCpu The cross context virtual CPU structure of the calling
157 * thread.
158 */
159VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
160{
161 iemTlbInvalidateAll<true>(pVCpu);
162}
163
164
165/**
166 * Invalidates a page in the TLBs.
167 *
168 * @param pVCpu The cross context virtual CPU structure of the calling
169 * thread.
170 * @param GCPtr The address of the page to invalidate
171 * @thread EMT(pVCpu)
172 */
173VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
174{
175 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
176#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
177 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
178 GCPtr = IEMTLB_CALC_TAG_NO_REV(pVCpu, GCPtr);
179 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
180 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
181
182# ifdef IEM_WITH_CODE_TLB
183 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
184# endif
185# ifdef IEM_WITH_DATA_TLB
186 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
187# endif
188#else
189 NOREF(pVCpu); NOREF(GCPtr);
190#endif
191}
192
193
194#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
195/**
196 * Invalid both TLBs slow fashion following a rollover.
197 *
198 * Worker for IEMTlbInvalidateAllPhysical,
199 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
200 * iemMemMapJmp and others.
201 *
202 * @thread EMT(pVCpu)
203 */
204void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
205{
206 Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
207 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
208 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
209
210 unsigned i;
211# ifdef IEM_WITH_CODE_TLB
212 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
213 while (i-- > 0)
214 {
215 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
216 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
217 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
218 }
219 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
220 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
221# endif
222# ifdef IEM_WITH_DATA_TLB
223 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
224 while (i-- > 0)
225 {
226 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
227 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
228 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
229 }
230 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
231 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
232# endif
233
234}
235#endif
236
237
238/**
239 * Invalidates the host physical aspects of the IEM TLBs.
240 *
241 * This is called internally as well as by PGM when moving GC mappings.
242 *
243 * @param pVCpu The cross context virtual CPU structure of the calling
244 * thread.
245 * @note Currently not used.
246 */
247VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
248{
249#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
250 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
251 Log10(("IEMTlbInvalidateAllPhysical\n"));
252
253# ifdef IEM_WITH_CODE_TLB
254 pVCpu->iem.s.cbInstrBufTotal = 0;
255# endif
256 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
257 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
258 {
259 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
260 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
261 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
262 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
263 }
264 else
265 iemTlbInvalidateAllPhysicalSlow(pVCpu);
266#else
267 NOREF(pVCpu);
268#endif
269}
270
271
272/**
273 * Invalidates the host physical aspects of the IEM TLBs.
274 *
275 * This is called internally as well as by PGM when moving GC mappings.
276 *
277 * @param pVM The cross context VM structure.
278 * @param idCpuCaller The ID of the calling EMT if available to the caller,
279 * otherwise NIL_VMCPUID.
280 * @param enmReason The reason we're called.
281 *
282 * @remarks Caller holds the PGM lock.
283 */
284VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
285{
286#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
287 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
288 if (pVCpuCaller)
289 VMCPU_ASSERT_EMT(pVCpuCaller);
290 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
291
292 VMCC_FOR_EACH_VMCPU(pVM)
293 {
294# ifdef IEM_WITH_CODE_TLB
295 if (pVCpuCaller == pVCpu)
296 pVCpu->iem.s.cbInstrBufTotal = 0;
297# endif
298
299 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
300 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
301 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
302 { /* likely */}
303 else if (pVCpuCaller != pVCpu)
304 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
305 else
306 {
307 iemTlbInvalidateAllPhysicalSlow(pVCpu);
308 continue;
309 }
310 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
311 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
312
313 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
314 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
315 }
316 VMCC_FOR_EACH_VMCPU_END(pVM);
317
318#else
319 RT_NOREF(pVM, idCpuCaller, enmReason);
320#endif
321}
322
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette