VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 102790

Last change on this file since 102790 was 102790, checked in by vboxsync, 5 months ago

VMM/IEM: Emit TLB lookup for POP GPR instructions. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.2 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 102790 2024-01-09 01:41:28Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80#ifdef IEM_WITH_SETJMP
81
82
83/*********************************************************************************************************************************
84* Fetches *
85*********************************************************************************************************************************/
86
87/**
88 * Inlined fetch function that longjumps on error.
89 *
90 * @note The @a iSegRef is not allowed to be UINT8_MAX!
91 */
92#ifdef TMPL_MEM_BY_REF
93DECL_INLINE_THROW(void)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#else
96DECL_INLINE_THROW(TMPL_MEM_TYPE)
97RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
98#endif
99{
100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
101# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
102 /*
103 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
104 */
105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
106# if TMPL_MEM_TYPE_SIZE > 1
107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
108# endif
109 {
110 /*
111 * TLB lookup.
112 */
113 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
114 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
115 if (RT_LIKELY(pTlbe->uTag == uTag))
116 {
117 /*
118 * Check TLB page table level access flags.
119 */
120 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
121 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
122 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
123 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
124 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
125 {
126 /*
127 * Fetch and return the data.
128 */
129 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
132# ifdef TMPL_MEM_BY_REF
133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
135 iSegReg, GCPtrMem, GCPtrEff, pValue));
136 return;
137# else
138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
140 iSegReg, GCPtrMem, GCPtrEff, uRet));
141 return uRet;
142# endif
143 }
144 }
145 }
146
147 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
148 outdated page pointer, or other troubles. (This will do a TLB load.) */
149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
150# endif
151# ifdef TMPL_MEM_BY_REF
152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
153# else
154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
155# endif
156}
157
158
159/**
160 * Inlined flat addressing fetch function that longjumps on error.
161 */
162# ifdef TMPL_MEM_BY_REF
163DECL_INLINE_THROW(void)
164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
165# else
166DECL_INLINE_THROW(TMPL_MEM_TYPE)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168# endif
169{
170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
173# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
174 /*
175 * Check that it doesn't cross a page boundrary.
176 */
177# if TMPL_MEM_TYPE_SIZE > 1
178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
179# endif
180 {
181 /*
182 * TLB lookup.
183 */
184 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
185 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
186 if (RT_LIKELY(pTlbe->uTag == uTag))
187 {
188 /*
189 * Check TLB page table level access flags.
190 */
191 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
192 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
193 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
194 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
195 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
196 {
197 /*
198 * Fetch and return the dword
199 */
200 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
201 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
202 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
203# ifdef TMPL_MEM_BY_REF
204 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
205 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
206 GCPtrMem, pValue));
207 return;
208# else
209 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
210 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
211 return uRet;
212# endif
213 }
214 }
215 }
216
217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
218 outdated page pointer, or other troubles. (This will do a TLB load.) */
219 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
220# endif
221# ifdef TMPL_MEM_BY_REF
222 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
223# else
224 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
225# endif
226}
227
228
229/*********************************************************************************************************************************
230* Stores *
231*********************************************************************************************************************************/
232# ifndef TMPL_MEM_NO_STORE
233
234/**
235 * Inlined store function that longjumps on error.
236 *
237 * @note The @a iSegRef is not allowed to be UINT8_MAX!
238 */
239DECL_INLINE_THROW(void)
240RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
241# ifdef TMPL_MEM_BY_REF
242 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
243# else
244 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
245# endif
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
248 /*
249 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
250 */
251 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
252# if TMPL_MEM_TYPE_SIZE > 1
253 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
254# endif
255 {
256 /*
257 * TLB lookup.
258 */
259 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
260 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
261 if (RT_LIKELY(pTlbe->uTag == uTag))
262 {
263 /*
264 * Check TLB page table level access flags.
265 */
266 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
267 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
268 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
269 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
270 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
271 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
272 {
273 /*
274 * Store the value and return.
275 */
276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
277 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
278 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
279# ifdef TMPL_MEM_BY_REF
280 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
281 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
282 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
283# else
284 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
285 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
286 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
287# endif
288 return;
289 }
290 }
291 }
292
293 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
294 outdated page pointer, or other troubles. (This will do a TLB load.) */
295 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
296# endif
297# ifdef TMPL_MEM_BY_REF
298 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
299# else
300 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
301# endif
302}
303
304
305/**
306 * Inlined flat addressing store function that longjumps on error.
307 */
308DECL_INLINE_THROW(void)
309RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
310# ifdef TMPL_MEM_BY_REF
311 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
312# else
313 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
314# endif
315{
316 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
317 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
318 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
319# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
320 /*
321 * Check that it doesn't cross a page boundrary.
322 */
323# if TMPL_MEM_TYPE_SIZE > 1
324 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
325# endif
326 {
327 /*
328 * TLB lookup.
329 */
330 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
331 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
332 if (RT_LIKELY(pTlbe->uTag == uTag))
333 {
334 /*
335 * Check TLB page table level access flags.
336 */
337 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
338 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
339 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
340 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
341 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
342 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
343 {
344 /*
345 * Store the value and return.
346 */
347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
350# ifdef TMPL_MEM_BY_REF
351 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
352 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
353 GCPtrMem, pValue));
354# else
355 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
356 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
357# endif
358 return;
359 }
360 }
361 }
362
363 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
364 outdated page pointer, or other troubles. (This will do a TLB load.) */
365 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
366# endif
367# ifdef TMPL_MEM_BY_REF
368 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
369# else
370 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
371# endif
372}
373
374# endif /* !TMPL_MEM_NO_STORE */
375
376
377/*********************************************************************************************************************************
378* Mapping / Direct Memory Access *
379*********************************************************************************************************************************/
380# ifndef TMPL_MEM_NO_MAPPING
381
382/**
383 * Inlined read-write memory mapping function that longjumps on error.
384 */
385DECL_INLINE_THROW(TMPL_MEM_TYPE *)
386RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
387 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
388{
389# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
390 /*
391 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
392 */
393 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
394# if TMPL_MEM_TYPE_SIZE > 1
395 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
396# endif
397 {
398 /*
399 * TLB lookup.
400 */
401 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
402 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
403 if (RT_LIKELY(pTlbe->uTag == uTag))
404 {
405 /*
406 * Check TLB page table level access flags.
407 */
408 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
409 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
410 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
411 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
412 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
413 | fNoUser))
414 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
415 {
416 /*
417 * Return the address.
418 */
419 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
420 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
421 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
422 *pbUnmapInfo = 0;
423 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
424 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
425 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
426 }
427 }
428 }
429
430 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
431 outdated page pointer, or other troubles. (This will do a TLB load.) */
432 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
433# endif
434 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
435}
436
437
438/**
439 * Inlined flat read-write memory mapping function that longjumps on error.
440 */
441DECL_INLINE_THROW(TMPL_MEM_TYPE *)
442RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
443 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
444{
445# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
446 /*
447 * Check that the address doesn't cross a page boundrary.
448 */
449# if TMPL_MEM_TYPE_SIZE > 1
450 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
451# endif
452 {
453 /*
454 * TLB lookup.
455 */
456 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
457 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
458 if (RT_LIKELY(pTlbe->uTag == uTag))
459 {
460 /*
461 * Check TLB page table level access flags.
462 */
463 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
464 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
465 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
466 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
467 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
468 | fNoUser))
469 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
470 {
471 /*
472 * Return the address.
473 */
474 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
475 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
476 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
477 *pbUnmapInfo = 0;
478 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
479 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
480 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
481 }
482 }
483 }
484
485 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
486 outdated page pointer, or other troubles. (This will do a TLB load.) */
487 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
488# endif
489 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
490}
491
492
493/**
494 * Inlined write-only memory mapping function that longjumps on error.
495 */
496DECL_INLINE_THROW(TMPL_MEM_TYPE *)
497RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
498 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
499{
500# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
501 /*
502 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
503 */
504 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
505# if TMPL_MEM_TYPE_SIZE > 1
506 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
507# endif
508 {
509 /*
510 * TLB lookup.
511 */
512 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
513 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
514 if (RT_LIKELY(pTlbe->uTag == uTag))
515 {
516 /*
517 * Check TLB page table level access flags.
518 */
519 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
520 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
521 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
522 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
523 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
524 | fNoUser))
525 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
526 {
527 /*
528 * Return the address.
529 */
530 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
531 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
532 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
533 *pbUnmapInfo = 0;
534 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
535 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
536 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
537 }
538 }
539 }
540
541 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
542 outdated page pointer, or other troubles. (This will do a TLB load.) */
543 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
544# endif
545 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
546}
547
548
549/**
550 * Inlined flat write-only memory mapping function that longjumps on error.
551 */
552DECL_INLINE_THROW(TMPL_MEM_TYPE *)
553RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
554 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
555{
556# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
557 /*
558 * Check that the address doesn't cross a page boundrary.
559 */
560# if TMPL_MEM_TYPE_SIZE > 1
561 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
562# endif
563 {
564 /*
565 * TLB lookup.
566 */
567 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
568 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
569 if (RT_LIKELY(pTlbe->uTag == uTag))
570 {
571 /*
572 * Check TLB page table level access flags.
573 */
574 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
575 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
576 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
577 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
578 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
579 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
580 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
581 {
582 /*
583 * Return the address.
584 */
585 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
586 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
587 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
588 *pbUnmapInfo = 0;
589 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
590 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
591 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
592 }
593 }
594 }
595
596 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
597 outdated page pointer, or other troubles. (This will do a TLB load.) */
598 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
599# endif
600 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
601}
602
603
604/**
605 * Inlined read-only memory mapping function that longjumps on error.
606 */
607DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
608RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
609 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
610{
611# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
612 /*
613 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
614 */
615 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
616# if TMPL_MEM_TYPE_SIZE > 1
617 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
618#endif
619 {
620 /*
621 * TLB lookup.
622 */
623 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
624 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
625 if (RT_LIKELY(pTlbe->uTag == uTag))
626 {
627 /*
628 * Check TLB page table level access flags.
629 */
630 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
631 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
632 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
633 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
634 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
635 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
636 {
637 /*
638 * Return the address.
639 */
640 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
641 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
642 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
643 *pbUnmapInfo = 0;
644 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
645 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
646 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
647 }
648 }
649 }
650
651 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
652 outdated page pointer, or other troubles. (This will do a TLB load.) */
653 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
654# endif
655 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
656}
657
658
659/**
660 * Inlined read-only memory mapping function that longjumps on error.
661 */
662DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
663RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
664 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
665{
666# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
667 /*
668 * Check that the address doesn't cross a page boundrary.
669 */
670# if TMPL_MEM_TYPE_SIZE > 1
671 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
672# endif
673 {
674 /*
675 * TLB lookup.
676 */
677 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
678 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
679 if (RT_LIKELY(pTlbe->uTag == uTag))
680 {
681 /*
682 * Check TLB page table level access flags.
683 */
684 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
685 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
686 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
688 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
689 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
690 {
691 /*
692 * Return the address.
693 */
694 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
695 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
696 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
697 *pbUnmapInfo = 0;
698 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
699 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
700 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
701 }
702 }
703 }
704
705 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
706 outdated page pointer, or other troubles. (This will do a TLB load.) */
707 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
708# endif
709 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
710}
711
712# endif /* !TMPL_MEM_NO_MAPPING */
713
714
715/*********************************************************************************************************************************
716* Stack Access *
717*********************************************************************************************************************************/
718# ifdef TMPL_MEM_WITH_STACK
719# if TMPL_MEM_TYPE_SIZE > 8
720# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
721# endif
722# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
723# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
724# endif
725# ifdef IEM_WITH_SETJMP
726
727/**
728 * Stack store function that longjmps on error.
729 */
730DECL_INLINE_THROW(void)
731RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
732{
733# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
734 /*
735 * Apply segmentation and check that the item doesn't cross a page boundrary.
736 */
737 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
738# if TMPL_MEM_TYPE_SIZE > 1
739 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
740# endif
741 {
742 /*
743 * TLB lookup.
744 */
745 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
746 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
747 if (RT_LIKELY(pTlbe->uTag == uTag))
748 {
749 /*
750 * Check TLB page table level access flags.
751 */
752 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
753 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
754 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
755 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
756 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
757 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
758 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
759 {
760 /*
761 * Do the store and return.
762 */
763 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
764 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
765 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
766 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
767 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
768 return;
769 }
770 }
771 }
772
773 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
774 outdated page pointer, or other troubles. (This will do a TLB load.) */
775 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
776# endif
777 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
778}
779
780
781# ifdef TMPL_WITH_PUSH_SREG
782/**
783 * Stack segment store function that longjmps on error.
784 *
785 * For a detailed discussion of the behaviour see the fallback functions
786 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
787 */
788DECL_INLINE_THROW(void)
789RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
790 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
791{
792# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
793 /*
794 * Apply segmentation to the address and check that the item doesn't cross
795 * a page boundrary.
796 */
797 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
798# if TMPL_MEM_TYPE_SIZE > 1
799 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
800 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
801# endif
802 {
803 /*
804 * TLB lookup.
805 */
806 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
807 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
808 if (RT_LIKELY(pTlbe->uTag == uTag))
809 {
810 /*
811 * Check TLB page table level access flags.
812 */
813 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
814 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
815 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
816 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
817 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
818 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
819 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
820 {
821 /*
822 * Do the push and return.
823 */
824 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
825 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
826 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
827 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
828 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
829 return;
830 }
831 }
832 }
833
834 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
835 outdated page pointer, or other troubles. (This will do a TLB load.) */
836 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
837# endif
838 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
839}
840# endif /* TMPL_WITH_PUSH_SREG */
841
842
843/**
844 * Flat stack store function that longjmps on error.
845 */
846DECL_INLINE_THROW(void)
847RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
848 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
849{
850 Assert( IEM_IS_64BIT_CODE(pVCpu)
851 || ( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
852 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
853 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
854 && pVCpu->cpum.GstCtx.ss.u64Base == 0));
855
856# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
857 /*
858 * Check that the item doesn't cross a page boundrary.
859 */
860# if TMPL_MEM_TYPE_SIZE > 1
861 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
862# endif
863 {
864 /*
865 * TLB lookup.
866 */
867 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
868 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
869 if (RT_LIKELY(pTlbe->uTag == uTag))
870 {
871 /*
872 * Check TLB page table level access flags.
873 */
874 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
875 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
876 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
877 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
878 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
879 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
880 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
881 {
882 /*
883 * Do the push and return.
884 */
885 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
886 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
887 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
888 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
889 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
890 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
891 return;
892 }
893 }
894 }
895
896 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
897 outdated page pointer, or other troubles. (This will do a TLB load.) */
898 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
899# endif
900 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
901}
902
903# ifdef TMPL_WITH_PUSH_SREG
904/**
905 * Flat stack segment store function that longjmps on error.
906 *
907 * For a detailed discussion of the behaviour see the fallback functions
908 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
909 */
910DECL_INLINE_THROW(void)
911RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
912 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
913{
914# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
915 /*
916 * Check that the item doesn't cross a page boundrary.
917 */
918 if (RT_LIKELY( !(GCPtrMem & (sizeof(uint16_t) - 1))
919 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
920 {
921 /*
922 * TLB lookup.
923 */
924 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
925 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
926 if (RT_LIKELY(pTlbe->uTag == uTag))
927 {
928 /*
929 * Check TLB page table level access flags.
930 */
931 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
932 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
933 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
934 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
935 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
936 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
937 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
938 {
939 /*
940 * Do the push and return.
941 */
942 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
943 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
944 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
945 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
946 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
947 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
948 return;
949 }
950 }
951 }
952
953 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
954 outdated page pointer, or other troubles. (This will do a TLB load.) */
955 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
956# endif
957 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
958}
959# endif /* TMPL_WITH_PUSH_SREG */
960
961
962/**
963 * Stack fetch function that longjmps on error.
964 */
965DECL_INLINE_THROW(TMPL_MEM_TYPE)
966RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
967{
968# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
969 /*
970 * Apply segmentation to the address and check that the item doesn't cross
971 * a page boundrary.
972 */
973 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
974# if TMPL_MEM_TYPE_SIZE > 1
975 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
976# endif
977 {
978 /*
979 * TLB lookup.
980 */
981 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
982 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
983 if (RT_LIKELY(pTlbe->uTag == uTag))
984 {
985 /*
986 * Check TLB page table level access flags.
987 */
988 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
989 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
990 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
991 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
992 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
993 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
994 {
995 /*
996 * Do the pop.
997 */
998 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
999 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1000 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1001 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1002 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
1003 return uValue;
1004 }
1005 }
1006 }
1007
1008 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1009 outdated page pointer, or other troubles. (This will do a TLB load.) */
1010 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1011# endif
1012 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1013}
1014
1015
1016/**
1017 * Flat stack fetch function that longjmps on error.
1018 */
1019DECL_INLINE_THROW(TMPL_MEM_TYPE)
1020RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1021{
1022# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1023 /*
1024 * Check that the item doesn't cross a page boundrary.
1025 */
1026# if TMPL_MEM_TYPE_SIZE > 1
1027 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1028# endif
1029 {
1030 /*
1031 * TLB lookup.
1032 */
1033 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
1034 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1035 if (RT_LIKELY(pTlbe->uTag == uTag))
1036 {
1037 /*
1038 * Check TLB page table level access flags.
1039 */
1040 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1041 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1042 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1043 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1044 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1045 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1046 {
1047 /*
1048 * Do the pop.
1049 */
1050 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1051 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1052 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1053 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
1054 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
1055 return uValue;
1056 }
1057 }
1058 }
1059
1060 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1061 outdated page pointer, or other troubles. (This will do a TLB load.) */
1062 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1063# endif
1064 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1065}
1066
1067
1068/**
1069 * Stack push function that longjmps on error.
1070 */
1071DECL_INLINE_THROW(void)
1072RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1073{
1074# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1075 /*
1076 * Decrement the stack pointer (prep), apply segmentation and check that
1077 * the item doesn't cross a page boundrary.
1078 */
1079 uint64_t uNewRsp;
1080 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1081 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1082# if TMPL_MEM_TYPE_SIZE > 1
1083 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1084# endif
1085 {
1086 /*
1087 * TLB lookup.
1088 */
1089 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1090 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1091 if (RT_LIKELY(pTlbe->uTag == uTag))
1092 {
1093 /*
1094 * Check TLB page table level access flags.
1095 */
1096 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1097 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1098 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1099 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1100 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1101 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1102 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1103 {
1104 /*
1105 * Do the push and return.
1106 */
1107 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1108 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1109 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1110 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1111 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1112 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
1113 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1114 return;
1115 }
1116 }
1117 }
1118
1119 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1120 outdated page pointer, or other troubles. (This will do a TLB load.) */
1121 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1122# endif
1123 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1124}
1125
1126
1127/**
1128 * Stack pop greg function that longjmps on error.
1129 */
1130DECL_INLINE_THROW(void)
1131RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1132{
1133 Assert(iGReg < 16);
1134
1135# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1136 /*
1137 * Increment the stack pointer (prep), apply segmentation and check that
1138 * the item doesn't cross a page boundrary.
1139 */
1140 uint64_t uNewRsp;
1141 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1142 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1143# if TMPL_MEM_TYPE_SIZE > 1
1144 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1145# endif
1146 {
1147 /*
1148 * TLB lookup.
1149 */
1150 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1151 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1152 if (RT_LIKELY(pTlbe->uTag == uTag))
1153 {
1154 /*
1155 * Check TLB page table level access flags.
1156 */
1157 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1158 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1159 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1160 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1161 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1162 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1163 {
1164 /*
1165 * Do the pop.
1166 */
1167 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1168 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1169 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1170 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1171 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1172 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
1173 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
1174# if TMPL_MEM_TYPE_SIZE == 2
1175 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1176# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
1177 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1178# else
1179# error "TMPL_MEM_TYPE_SIZE"
1180# endif
1181 return;
1182 }
1183 }
1184 }
1185
1186 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1187 outdated page pointer, or other troubles. (This will do a TLB load.) */
1188 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1189# endif
1190 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1191}
1192
1193# ifdef TMPL_WITH_PUSH_SREG
1194/**
1195 * Stack segment push function that longjmps on error.
1196 *
1197 * For a detailed discussion of the behaviour see the fallback functions
1198 * iemMemStackPushUxxSRegSafeJmp.
1199 */
1200DECL_INLINE_THROW(void)
1201RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1202{
1203# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1204 /* See fallback for details on this weirdness: */
1205 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1206 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1207
1208 /*
1209 * Decrement the stack pointer (prep), apply segmentation and check that
1210 * the item doesn't cross a page boundrary.
1211 */
1212 uint64_t uNewRsp;
1213 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1214 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
1215# if TMPL_MEM_TYPE_SIZE > 1
1216 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U))
1217 || ( cbAccess == sizeof(uint16_t)
1218 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
1219 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
1220# endif
1221 {
1222 /*
1223 * TLB lookup.
1224 */
1225 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1226 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1227 if (RT_LIKELY(pTlbe->uTag == uTag))
1228 {
1229 /*
1230 * Check TLB page table level access flags.
1231 */
1232 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1233 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1234 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1235 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1236 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1237 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1238 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1239 {
1240 /*
1241 * Do the push and return.
1242 */
1243 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1244 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1245 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1246 if (cbAccess == sizeof(uint16_t))
1247 {
1248 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RGv (%RX64->%RX64): %#06x [sreg/i]\n",
1249 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, (uint16_t)uValue));
1250 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1251 }
1252 else
1253 {
1254 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1255 if (fIsIntel)
1256 {
1257 Assert(IEM_IS_REAL_MODE(pVCpu));
1258 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1259 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1260 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1261 }
1262 else
1263 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1264 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1265 *puSlot = uValue;
1266 }
1267 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1268 return;
1269 }
1270 }
1271 }
1272
1273 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1274 outdated page pointer, or other troubles. (This will do a TLB load.) */
1275 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1276# endif
1277 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1278}
1279# endif /* TMPL_WITH_PUSH_SREG */
1280
1281# if TMPL_MEM_TYPE_SIZE != 8
1282
1283/**
1284 * 32-bit flat stack push function that longjmps on error.
1285 */
1286DECL_INLINE_THROW(void)
1287RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1288{
1289 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1290 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1291 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1292 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
1293# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1294 /*
1295 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1296 */
1297 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1298# if TMPL_MEM_TYPE_SIZE > 1
1299 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
1300# endif
1301 {
1302 /*
1303 * TLB lookup.
1304 */
1305 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1306 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1307 if (RT_LIKELY(pTlbe->uTag == uTag))
1308 {
1309 /*
1310 * Check TLB page table level access flags.
1311 */
1312 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1313 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1314 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1315 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1316 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1317 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1318 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1319 {
1320 /*
1321 * Do the push and return.
1322 */
1323 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1324 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1325 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1326 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
1327 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1328 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1329 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1330 return;
1331 }
1332 }
1333 }
1334
1335 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1336 outdated page pointer, or other troubles. (This will do a TLB load.) */
1337 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1338# endif
1339 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1340}
1341
1342
1343/**
1344 * 32-bit flat stack greg pop function that longjmps on error.
1345 */
1346DECL_INLINE_THROW(void)
1347RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1348{
1349 Assert(iGReg < 16);
1350# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1351 /*
1352 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1353 */
1354 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
1355# if TMPL_MEM_TYPE_SIZE > 1
1356 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
1357# endif
1358 {
1359 /*
1360 * TLB lookup.
1361 */
1362 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
1363 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1364 if (RT_LIKELY(pTlbe->uTag == uTag))
1365 {
1366 /*
1367 * Check TLB page table level access flags.
1368 */
1369 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1370 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1371 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1372 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1373 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1374 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1375 {
1376 /*
1377 * Do the pop and update the register values.
1378 */
1379 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1380 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1381 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1382 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1383 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1384# if TMPL_MEM_TYPE_SIZE == 2
1385 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1386# elif TMPL_MEM_TYPE_SIZE == 4
1387 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1388# else
1389# error "TMPL_MEM_TYPE_SIZE"
1390# endif
1391 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1392 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1393 return;
1394 }
1395 }
1396 }
1397
1398 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1399 outdated page pointer, or other troubles. (This will do a TLB load.) */
1400 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1401# endif
1402 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1403}
1404
1405# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1406
1407# ifdef TMPL_WITH_PUSH_SREG
1408/**
1409 * 32-bit flat stack segment push function that longjmps on error.
1410 *
1411 * For a detailed discussion of the behaviour see the fallback functions
1412 * iemMemStackPushUxxSRegSafeJmp.
1413 */
1414DECL_INLINE_THROW(void)
1415RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1416{
1417# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1418 /* See fallback for details on this weirdness: */
1419 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1420 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1421
1422 /*
1423 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1424 */
1425 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1426 if (RT_LIKELY( !(uNewEsp & (cbAccess - 1))
1427 || (cbAccess == sizeof(uint16_t)
1428 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t)
1429 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE)) ))
1430 {
1431 /*
1432 * TLB lookup.
1433 */
1434 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1435 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1436 if (RT_LIKELY(pTlbe->uTag == uTag))
1437 {
1438 /*
1439 * Check TLB page table level access flags.
1440 */
1441 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1442 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1443 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1444 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1445 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1446 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1447 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1448 {
1449 /*
1450 * Do the push and return.
1451 */
1452 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1453 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1454 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1455 if (cbAccess == sizeof(uint16_t))
1456 {
1457 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RX32 (<-%RX32): %#06x [sreg/i]\n",
1458 uNewEsp, pVCpu->cpum.GstCtx.esp, (uint16_t)uValue));
1459 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1460 }
1461 else
1462 {
1463 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK];
1464 if (fIsIntel)
1465 {
1466 Assert(IEM_IS_REAL_MODE(pVCpu));
1467 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1468 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1469 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1470 }
1471 else
1472 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1473 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1474 *puSlot = uValue;
1475 }
1476 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1477 return;
1478 }
1479 }
1480 }
1481
1482 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1483 outdated page pointer, or other troubles. (This will do a TLB load.) */
1484 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1485# endif
1486 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1487}
1488# endif /* TMPL_WITH_PUSH_SREG */
1489
1490# if TMPL_MEM_TYPE_SIZE != 4
1491
1492/**
1493 * 64-bit flat stack push function that longjmps on error.
1494 */
1495DECL_INLINE_THROW(void)
1496RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1497{
1498# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1499 /*
1500 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1501 */
1502 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1503# if TMPL_MEM_TYPE_SIZE > 1
1504 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1505# endif
1506 {
1507 /*
1508 * TLB lookup.
1509 */
1510 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1511 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1512 if (RT_LIKELY(pTlbe->uTag == uTag))
1513 {
1514 /*
1515 * Check TLB page table level access flags.
1516 */
1517 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1518 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1519 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1520 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1521 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1522 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1523 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1524 {
1525 /*
1526 * Do the push and return.
1527 */
1528 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1529 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1530 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1531 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1532 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1533 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1534 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1535 return;
1536 }
1537 }
1538 }
1539
1540 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1541 outdated page pointer, or other troubles. (This will do a TLB load.) */
1542 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1543# endif
1544 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1545}
1546
1547
1548/**
1549 * 64-bit flat stack pop function that longjmps on error.
1550 */
1551DECL_INLINE_THROW(void)
1552RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1553{
1554 Assert(iGReg < 16);
1555# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1556 /*
1557 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1558 */
1559 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1560# if TMPL_MEM_TYPE_SIZE > 1
1561 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1562# endif
1563 {
1564 /*
1565 * TLB lookup.
1566 */
1567 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1568 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1569 if (RT_LIKELY(pTlbe->uTag == uTag))
1570 {
1571 /*
1572 * Check TLB page table level access flags.
1573 */
1574 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1575 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1576 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1577 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1578 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1579 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1580 {
1581 /*
1582 * Do the push and return.
1583 */
1584 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1585 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1586 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1587 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1588 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1589# if TMPL_MEM_TYPE_SIZE == 2
1590 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1591# elif TMPL_MEM_TYPE_SIZE == 8
1592 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1593# else
1594# error "TMPL_MEM_TYPE_SIZE"
1595# endif
1596 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1597 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1598 return;
1599 }
1600 }
1601 }
1602
1603 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1604 outdated page pointer, or other troubles. (This will do a TLB load.) */
1605 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1606# endif
1607 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1608}
1609
1610# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1611
1612# endif /* IEM_WITH_SETJMP */
1613# endif /* TMPL_MEM_WITH_STACK */
1614
1615
1616#endif /* IEM_WITH_SETJMP */
1617
1618#undef TMPL_MEM_TYPE
1619#undef TMPL_MEM_TYPE_ALIGN
1620#undef TMPL_MEM_TYPE_SIZE
1621#undef TMPL_MEM_FN_SUFF
1622#undef TMPL_MEM_FMT_TYPE
1623#undef TMPL_MEM_FMT_DESC
1624#undef TMPL_MEM_NO_STORE
1625#undef TMPL_MEM_ALIGN_CHECK
1626#undef TMPL_MEM_BY_REF
1627
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use