VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 102977

Last change on this file since 102977 was 102977, checked in by vboxsync, 4 months ago

VMM/IEM: Implemented generic fallback for misaligned x86 locking that is not compatible with the host. Using the existing split-lock solution with VINF_EM_EMULATE_SPLIT_LOCK from bugref:10052. We keep ignoring the 'lock' prefix in the recompiler for single CPU VMs (now also on amd64 hosts). bugref:10547

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 26.9 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 102977 2024-01-19 23:11:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 uint8_t bUnmapInfo;
58 TMPL_MEM_TYPE const *puSrc;
59 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
61 if (rc == VINF_SUCCESS)
62 {
63 *puDst = *puSrc;
64 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
65 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
66 }
67 return rc;
68}
69
70
71#ifdef IEM_WITH_SETJMP
72/**
73 * Safe/fallback fetch function that longjmps on error.
74 */
75# ifdef TMPL_MEM_BY_REF
76void
77RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
78{
79# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
80 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
81# endif
82 uint8_t bUnmapInfo;
83 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
85 *pDst = *pSrc;
86 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
87 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
88}
89# else /* !TMPL_MEM_BY_REF */
90TMPL_MEM_TYPE
91RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
92{
93# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
94 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
95# endif
96 uint8_t bUnmapInfo;
97 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
99 TMPL_MEM_TYPE const uRet = *puSrc;
100 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
101 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
102 return uRet;
103}
104# endif /* !TMPL_MEM_BY_REF */
105#endif /* IEM_WITH_SETJMP */
106
107
108
109/**
110 * Standard store function.
111 *
112 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
113 * is defined.
114 */
115VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
116#ifdef TMPL_MEM_BY_REF
117 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
118#else
119 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
120#endif
121{
122 /* The lazy approach for now... */
123 uint8_t bUnmapInfo;
124 TMPL_MEM_TYPE *puDst;
125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
127 if (rc == VINF_SUCCESS)
128 {
129#ifdef TMPL_MEM_BY_REF
130 *puDst = *pValue;
131#else
132 *puDst = uValue;
133#endif
134 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
135#ifdef TMPL_MEM_BY_REF
136 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
137#else
138 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
139#endif
140 }
141 return rc;
142}
143
144
145#ifdef IEM_WITH_SETJMP
146/**
147 * Stores a data byte, longjmp on error.
148 *
149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
150 * @param iSegReg The index of the segment register to use for
151 * this access. The base and limits are checked.
152 * @param GCPtrMem The address of the guest memory.
153 * @param uValue The value to store.
154 */
155void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
156#ifdef TMPL_MEM_BY_REF
157 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
158#else
159 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
160#endif
161{
162# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
163 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
164# endif
165#ifdef TMPL_MEM_BY_REF
166 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
167#else
168 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
169#endif
170 uint8_t bUnmapInfo;
171 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
173#ifdef TMPL_MEM_BY_REF
174 *puDst = *pValue;
175#else
176 *puDst = uValue;
177#endif
178 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
179}
180#endif /* IEM_WITH_SETJMP */
181
182
183#ifdef IEM_WITH_SETJMP
184
185/**
186 * Maps a data buffer for atomic read+write direct access (or via a bounce
187 * buffer), longjmp on error.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
190 * @param pbUnmapInfo Pointer to unmap info variable.
191 * @param iSegReg The index of the segment register to use for
192 * this access. The base and limits are checked.
193 * @param GCPtrMem The address of the guest memory.
194 */
195TMPL_MEM_TYPE *
196RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
197 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
198{
199# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
200 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
201# endif
202 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
205 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN);
206}
207
208
209/**
210 * Maps a data buffer for read+write direct access (or via a bounce buffer),
211 * longjmp on error.
212 *
213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
214 * @param pbUnmapInfo Pointer to unmap info variable.
215 * @param iSegReg The index of the segment register to use for
216 * this access. The base and limits are checked.
217 * @param GCPtrMem The address of the guest memory.
218 */
219TMPL_MEM_TYPE *
220RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
221 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
222{
223# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
224 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
225# endif
226 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
227 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
228 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
229 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
230}
231
232
233/**
234 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
235 * longjmp on error.
236 *
237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
238 * @param pbUnmapInfo Pointer to unmap info variable.
239 * @param iSegReg The index of the segment register to use for
240 * this access. The base and limits are checked.
241 * @param GCPtrMem The address of the guest memory.
242 */
243TMPL_MEM_TYPE *
244RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
245 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
248 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
249# endif
250 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
251 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
252 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
253 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
254}
255
256
257/**
258 * Maps a data buffer for readonly direct access (or via a bounce buffer),
259 * longjmp on error.
260 *
261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
262 * @param pbUnmapInfo Pointer to unmap info variable.
263 * @param iSegReg The index of the segment register to use for
264 * this access. The base and limits are checked.
265 * @param GCPtrMem The address of the guest memory.
266 */
267TMPL_MEM_TYPE const *
268RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
269 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
270{
271# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
272 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
273# endif
274 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
275 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
276 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
277 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
278}
279
280#endif /* IEM_WITH_SETJMP */
281
282
283#ifdef TMPL_MEM_WITH_STACK
284
285/**
286 * Pops a general purpose register off the stack.
287 *
288 * @returns Strict VBox status code.
289 * @param pVCpu The cross context virtual CPU structure of the
290 * calling thread.
291 * @param iGReg The GREG to load the popped value into.
292 */
293VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT
294{
295 Assert(iGReg < 16);
296
297 /* Increment the stack pointer. */
298 uint64_t uNewRsp;
299 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
300
301 /* Load the word the lazy way. */
302 uint8_t bUnmapInfo;
303 TMPL_MEM_TYPE const *puSrc;
304 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
305 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
306 if (rc == VINF_SUCCESS)
307 {
308 TMPL_MEM_TYPE const uValue = *puSrc;
309 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
310
311 /* Commit the register and new RSP values. */
312 if (rc == VINF_SUCCESS)
313 {
314 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
315 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
316 pVCpu->cpum.GstCtx.rsp = uNewRsp;
317 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
318 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
319 else
320 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
321 return VINF_SUCCESS;
322 }
323 }
324 return rc;
325}
326
327
328/**
329 * Pushes an item onto the stack, regular version.
330 *
331 * @returns Strict VBox status code.
332 * @param pVCpu The cross context virtual CPU structure of the
333 * calling thread.
334 * @param uValue The value to push.
335 */
336VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
337{
338 /* Increment the stack pointer. */
339 uint64_t uNewRsp;
340 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
341
342 /* Write the dword the lazy way. */
343 uint8_t bUnmapInfo;
344 TMPL_MEM_TYPE *puDst;
345 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
346 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
347 if (rc == VINF_SUCCESS)
348 {
349 *puDst = uValue;
350 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
351
352 /* Commit the new RSP value unless we an access handler made trouble. */
353 if (rc == VINF_SUCCESS)
354 {
355 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
356 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
357 pVCpu->cpum.GstCtx.rsp = uNewRsp;
358 return VINF_SUCCESS;
359 }
360 }
361
362 return rc;
363}
364
365
366/**
367 * Pops a generic item off the stack, regular version.
368 *
369 * This is used by C-implementation code.
370 *
371 * @returns Strict VBox status code.
372 * @param pVCpu The cross context virtual CPU structure of the
373 * calling thread.
374 * @param puValue Where to store the popped value.
375 */
376VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
377{
378 /* Increment the stack pointer. */
379 uint64_t uNewRsp;
380 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
381
382 /* Write the word the lazy way. */
383 uint8_t bUnmapInfo;
384 TMPL_MEM_TYPE const *puSrc;
385 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
386 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
387 if (rc == VINF_SUCCESS)
388 {
389 *puValue = *puSrc;
390 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
391
392 /* Commit the new RSP value. */
393 if (rc == VINF_SUCCESS)
394 {
395 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
396 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
397 pVCpu->cpum.GstCtx.rsp = uNewRsp;
398 return VINF_SUCCESS;
399 }
400 }
401 return rc;
402}
403
404
405/**
406 * Pushes an item onto the stack, using a temporary stack pointer.
407 *
408 * @returns Strict VBox status code.
409 * @param pVCpu The cross context virtual CPU structure of the
410 * calling thread.
411 * @param uValue The value to push.
412 * @param pTmpRsp Pointer to the temporary stack pointer.
413 */
414VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
415{
416 /* Increment the stack pointer. */
417 RTUINT64U NewRsp = *pTmpRsp;
418 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
419
420 /* Write the word the lazy way. */
421 uint8_t bUnmapInfo;
422 TMPL_MEM_TYPE *puDst;
423 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
424 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
425 if (rc == VINF_SUCCESS)
426 {
427 *puDst = uValue;
428 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
429
430 /* Commit the new RSP value unless we an access handler made trouble. */
431 if (rc == VINF_SUCCESS)
432 {
433 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
434 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
435 *pTmpRsp = NewRsp;
436 return VINF_SUCCESS;
437 }
438 }
439 return rc;
440}
441
442
443/**
444 * Pops an item off the stack, using a temporary stack pointer.
445 *
446 * @returns Strict VBox status code.
447 * @param pVCpu The cross context virtual CPU structure of the
448 * calling thread.
449 * @param puValue Where to store the popped value.
450 * @param pTmpRsp Pointer to the temporary stack pointer.
451 */
452VBOXSTRICTRC
453RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
454{
455 /* Increment the stack pointer. */
456 RTUINT64U NewRsp = *pTmpRsp;
457 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
458
459 /* Write the word the lazy way. */
460 uint8_t bUnmapInfo;
461 TMPL_MEM_TYPE const *puSrc;
462 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
463 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
464 if (rc == VINF_SUCCESS)
465 {
466 *puValue = *puSrc;
467 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
468
469 /* Commit the new RSP value. */
470 if (rc == VINF_SUCCESS)
471 {
472 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
473 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
474 *pTmpRsp = NewRsp;
475 return VINF_SUCCESS;
476 }
477 }
478 return rc;
479}
480
481
482# ifdef IEM_WITH_SETJMP
483
484/**
485 * Safe/fallback stack store function that longjmps on error.
486 */
487void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
488 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
489{
490# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
491 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
492# endif
493
494 uint8_t bUnmapInfo;
495 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
496 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
497 *puDst = uValue;
498 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
499
500 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
501}
502
503
504# ifdef TMPL_WITH_PUSH_SREG
505/**
506 * Safe/fallback stack SREG store function that longjmps on error.
507 */
508void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
509 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
510{
511# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
512 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
513# endif
514
515 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book,
516 with a zero extended DWORD write. While my Intel 10890XE goes all weird
517 in real mode where it will write a DWORD with the top word of EFLAGS in
518 the top half. In all other modes it does a WORD access. */
519
520 /** @todo Docs indicate the behavior changed maybe in Pentium or Pentium Pro.
521 * Check ancient hardware when it actually did change. */
522 uint8_t bUnmapInfo;
523 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
524 {
525 if (!IEM_IS_REAL_MODE(pVCpu))
526 {
527 /* WORD per intel specs. */
528 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
529 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
530 *puDst = (uint16_t)uValue;
531 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
532 Log12(("IEM WR 'word' SS|%RGv: %#06x [sreg/i]\n", GCPtrMem, (uint16_t)uValue));
533 }
534 else
535 {
536 /* DWORD real mode weirness observed on 10980XE. */
537 /** @todo Check this on other intel CPUs and when pushing registers other
538 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is
539 * something for the CPU profile... Hope not.) */
540 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
541 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);
542 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
543 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
544 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", GCPtrMem, uValue));
545 }
546 }
547 else
548 {
549 /* DWORD per spec. */
550 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
551 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);
552 *puDst = uValue;
553 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
554 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue));
555 }
556}
557# endif /* TMPL_WITH_PUSH_SREG */
558
559
560/**
561 * Safe/fallback stack fetch function that longjmps on error.
562 */
563TMPL_MEM_TYPE RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
564{
565# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
566 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
567# endif
568
569 /* Read the data. */
570 uint8_t bUnmapInfo;
571 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
572 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
573 TMPL_MEM_TYPE const uValue = *puSrc;
574 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
575
576 /* Commit the register and RSP values. */
577 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
578 return uValue;
579}
580
581
582/**
583 * Safe/fallback stack push function that longjmps on error.
584 */
585void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
586{
587# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
588 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
589# endif
590
591 /* Decrement the stack pointer (prep). */
592 uint64_t uNewRsp;
593 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
594
595 /* Write the data. */
596 uint8_t bUnmapInfo;
597 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
598 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
599 *puDst = uValue;
600 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
601
602 /* Commit the RSP change. */
603 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
604 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
605 pVCpu->cpum.GstCtx.rsp = uNewRsp;
606}
607
608
609/**
610 * Safe/fallback stack pop greg function that longjmps on error.
611 */
612void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
613{
614# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
615 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
616# endif
617
618 /* Increment the stack pointer. */
619 uint64_t uNewRsp;
620 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
621
622 /* Read the data. */
623 uint8_t bUnmapInfo;
624 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
625 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
626 TMPL_MEM_TYPE const uValue = *puSrc;
627 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
628
629 /* Commit the register and RSP values. */
630 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
631 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
632 pVCpu->cpum.GstCtx.rsp = uNewRsp;
633 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
634 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
635 else
636 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
637}
638
639# ifdef TMPL_WITH_PUSH_SREG
640/**
641 * Safe/fallback stack push function that longjmps on error.
642 */
643void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
644{
645# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
646 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
647# endif
648
649 /* Decrement the stack pointer (prep). */
650 uint64_t uNewRsp;
651 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
652
653 /* Write the data. */
654 /* The intel docs talks about zero extending the selector register
655 value. My actual intel CPU here might be zero extending the value
656 but it still only writes the lower word... */
657 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
658 * happens when crossing an electric page boundrary, is the high word checked
659 * for write accessibility or not? Probably it is. What about segment limits?
660 * It appears this behavior is also shared with trap error codes.
661 *
662 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
663 * ancient hardware when it actually did change. */
664 uint8_t bUnmapInfo;
665 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
666 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
667 *puDst = (uint16_t)uValue;
668 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
669
670 /* Commit the RSP change. */
671 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
672 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
673 pVCpu->cpum.GstCtx.rsp = uNewRsp;
674}
675# endif /* TMPL_WITH_PUSH_SREG */
676
677# endif /* IEM_WITH_SETJMP */
678
679#endif /* TMPL_MEM_WITH_STACK */
680
681/* clean up */
682#undef TMPL_MEM_TYPE
683#undef TMPL_MEM_TYPE_ALIGN
684#undef TMPL_MEM_FN_SUFF
685#undef TMPL_MEM_FMT_TYPE
686#undef TMPL_MEM_FMT_DESC
687#undef TMPL_WITH_PUSH_SREG
688
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use