VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstCommonBodyMacros.h@ 104018

Last change on this file since 104018 was 104018, checked in by vboxsync, 2 months ago

VMM/IEM: Dropped the argument and local variable counts from IEM_MC_BEGIN. bugref:10370

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.2 KB
Line 
1/* $Id: IEMAllInstCommonBodyMacros.h 104018 2024-03-24 00:14:18Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Common Body Macros.
4 *
5 * This is placed in its own file without anything else in it, so that it can
6 * be digested by SimplerParser in IEMAllInstPython.py prior processing
7 * any of the other IEMAllInstruction*.cpp.h files. For instance
8 * IEMAllInstCommon.cpp.h wouldn't do as it defines several invalid
9 * instructions and such that could confuse the parser result.
10 */
11
12/*
13 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/**
36 * Special case body for word/dword/qword instruction like SUB and XOR that can
37 * be used to zero a register.
38 *
39 * This can be used both for the rv_rm and rm_rv forms since it's working on the
40 * same register.
41 */
42#define IEMOP_BODY_BINARY_rv_SAME_REG_ZERO(a_bRm) \
43 if ( (a_bRm >> X86_MODRM_REG_SHIFT) == ((a_bRm & X86_MODRM_RM_MASK) | (X86_MOD_REG << X86_MODRM_REG_SHIFT)) \
44 && pVCpu->iem.s.uRexReg == pVCpu->iem.s.uRexB) \
45 { \
46 switch (pVCpu->iem.s.enmEffOpSize) \
47 { \
48 case IEMMODE_16BIT: \
49 IEM_MC_BEGIN(0, 0); \
50 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
51 IEM_MC_STORE_GREG_U16_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
52 IEM_MC_LOCAL_EFLAGS(fEFlags); \
53 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
54 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
55 IEM_MC_COMMIT_EFLAGS(fEFlags); \
56 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
57 IEM_MC_END(); \
58 break; \
59 \
60 case IEMMODE_32BIT: \
61 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
62 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
63 IEM_MC_STORE_GREG_U32_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
64 IEM_MC_LOCAL_EFLAGS(fEFlags); \
65 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
66 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
67 IEM_MC_COMMIT_EFLAGS(fEFlags); \
68 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
69 IEM_MC_END(); \
70 break; \
71 \
72 case IEMMODE_64BIT: \
73 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
74 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
75 IEM_MC_STORE_GREG_U64_CONST(IEM_GET_MODRM_RM(pVCpu, a_bRm), 0); \
76 IEM_MC_LOCAL_EFLAGS(fEFlags); \
77 IEM_MC_AND_LOCAL_U32(fEFlags, ~(uint32_t)X86_EFL_STATUS_BITS); \
78 IEM_MC_OR_LOCAL_U32(fEFlags, X86_EFL_PF | X86_EFL_ZF); \
79 IEM_MC_COMMIT_EFLAGS(fEFlags); \
80 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
81 IEM_MC_END(); \
82 break; \
83 \
84 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
85 } \
86 } ((void)0)
87
88/**
89 * Body for word/dword/qword instructions like ADD, AND, OR, ++ with a register
90 * as the destination.
91 *
92 * @note Used both in OneByte and TwoByte0f.
93 */
94#define IEMOP_BODY_BINARY_rv_rm(a_bRm, a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_f16BitMcFlag, a_EmitterBasename, a_fNativeArchs) \
95 /* \
96 * If rm is denoting a register, no more instruction bytes. \
97 */ \
98 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
99 { \
100 switch (pVCpu->iem.s.enmEffOpSize) \
101 { \
102 case IEMMODE_16BIT: \
103 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
104 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
105 IEM_MC_ARG(uint16_t, u16Src, 1); \
106 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
107 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
108 IEM_MC_LOCAL(uint16_t, u16Dst); \
109 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
110 IEM_MC_LOCAL_EFLAGS(uEFlags); \
111 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
112 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
113 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
114 } IEM_MC_NATIVE_ELSE() { \
115 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
116 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
117 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
118 IEM_MC_REF_EFLAGS(pEFlags); \
119 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
120 } IEM_MC_NATIVE_ENDIF(); \
121 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
122 IEM_MC_END(); \
123 break; \
124 \
125 case IEMMODE_32BIT: \
126 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
127 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
128 IEM_MC_ARG(uint32_t, u32Src, 1); \
129 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
130 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
131 IEM_MC_LOCAL(uint32_t, u32Dst); \
132 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
133 IEM_MC_LOCAL_EFLAGS(uEFlags); \
134 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
135 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
136 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
137 } IEM_MC_NATIVE_ELSE() { \
138 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
139 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
140 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
141 IEM_MC_REF_EFLAGS(pEFlags); \
142 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
143 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
144 } IEM_MC_NATIVE_ENDIF(); \
145 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
146 IEM_MC_END(); \
147 break; \
148 \
149 case IEMMODE_64BIT: \
150 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
151 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
152 IEM_MC_ARG(uint64_t, u64Src, 1); \
153 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
154 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
155 IEM_MC_LOCAL(uint64_t, u64Dst); \
156 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
157 IEM_MC_LOCAL_EFLAGS(uEFlags); \
158 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
159 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
160 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
161 } IEM_MC_NATIVE_ELSE() { \
162 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
163 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
164 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
165 IEM_MC_REF_EFLAGS(pEFlags); \
166 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
167 } IEM_MC_NATIVE_ENDIF(); \
168 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
169 IEM_MC_END(); \
170 break; \
171 \
172 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
173 } \
174 } \
175 else \
176 { \
177 /* \
178 * We're accessing memory. \
179 */ \
180 switch (pVCpu->iem.s.enmEffOpSize) \
181 { \
182 case IEMMODE_16BIT: \
183 IEM_MC_BEGIN(a_f16BitMcFlag, 0); \
184 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
185 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
186 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
187 IEM_MC_ARG(uint16_t, u16Src, 1); \
188 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
189 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
190 IEM_MC_LOCAL(uint16_t, u16Dst); \
191 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
192 IEM_MC_LOCAL_EFLAGS(uEFlags); \
193 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
194 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \
195 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
196 } IEM_MC_NATIVE_ELSE() { \
197 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
198 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
199 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
200 IEM_MC_REF_EFLAGS(pEFlags); \
201 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \
202 } IEM_MC_NATIVE_ENDIF(); \
203 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
204 IEM_MC_END(); \
205 break; \
206 \
207 case IEMMODE_32BIT: \
208 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
209 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
210 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
211 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
212 IEM_MC_ARG(uint32_t, u32Src, 1); \
213 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
214 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
215 IEM_MC_LOCAL(uint32_t, u32Dst); \
216 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
217 IEM_MC_LOCAL_EFLAGS(uEFlags); \
218 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
219 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \
220 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
221 } IEM_MC_NATIVE_ELSE() { \
222 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
223 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
224 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
225 IEM_MC_REF_EFLAGS(pEFlags); \
226 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \
227 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
228 } IEM_MC_NATIVE_ENDIF(); \
229 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
230 IEM_MC_END(); \
231 break; \
232 \
233 case IEMMODE_64BIT: \
234 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
235 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
236 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
237 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
238 IEM_MC_ARG(uint64_t, u64Src, 1); \
239 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
240 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
241 IEM_MC_LOCAL(uint64_t, u64Dst); \
242 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
243 IEM_MC_LOCAL_EFLAGS(uEFlags); \
244 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
245 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \
246 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
247 } IEM_MC_NATIVE_ELSE() { \
248 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
249 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
250 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
251 IEM_MC_REF_EFLAGS(pEFlags); \
252 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \
253 } IEM_MC_NATIVE_ENDIF(); \
254 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
255 IEM_MC_END(); \
256 break; \
257 \
258 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
259 } \
260 } \
261 (void)0
262
263/**
264 * Body for word/dword/qword the instruction CMP, ++ with a register as the
265 * destination.
266 *
267 * @note Used both in OneByte and TwoByte0f.
268 */
269#define IEMOP_BODY_BINARY_rv_rm_RO(a_bRm, a_InsNm, a_fNativeArchs) \
270 /* \
271 * If rm is denoting a register, no more instruction bytes. \
272 */ \
273 if (IEM_IS_MODRM_REG_MODE(a_bRm)) \
274 { \
275 switch (pVCpu->iem.s.enmEffOpSize) \
276 { \
277 case IEMMODE_16BIT: \
278 IEM_MC_BEGIN(0, 0); \
279 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
280 IEM_MC_ARG(uint16_t, u16Src, 1); \
281 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
282 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
283 IEM_MC_LOCAL(uint16_t, u16Dst); \
284 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
285 IEM_MC_LOCAL_EFLAGS(uEFlags); \
286 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
287 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
288 } IEM_MC_NATIVE_ELSE() { \
289 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
290 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
291 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
292 IEM_MC_REF_EFLAGS(pEFlags); \
293 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \
294 } IEM_MC_NATIVE_ENDIF(); \
295 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
296 IEM_MC_END(); \
297 break; \
298 \
299 case IEMMODE_32BIT: \
300 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
301 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
302 IEM_MC_ARG(uint32_t, u32Src, 1); \
303 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
304 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
305 IEM_MC_LOCAL(uint32_t, u32Dst); \
306 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
307 IEM_MC_LOCAL_EFLAGS(uEFlags); \
308 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
309 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
310 } IEM_MC_NATIVE_ELSE() { \
311 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
312 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
313 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
314 IEM_MC_REF_EFLAGS(pEFlags); \
315 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \
316 } IEM_MC_NATIVE_ENDIF(); \
317 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
318 IEM_MC_END(); \
319 break; \
320 \
321 case IEMMODE_64BIT: \
322 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
323 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
324 IEM_MC_ARG(uint64_t, u64Src, 1); \
325 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, a_bRm)); \
326 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
327 IEM_MC_LOCAL(uint64_t, u64Dst); \
328 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
329 IEM_MC_LOCAL_EFLAGS(uEFlags); \
330 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
331 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
332 } IEM_MC_NATIVE_ELSE() { \
333 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
334 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
335 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
336 IEM_MC_REF_EFLAGS(pEFlags); \
337 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \
338 } IEM_MC_NATIVE_ENDIF(); \
339 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
340 IEM_MC_END(); \
341 break; \
342 \
343 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
344 } \
345 } \
346 else \
347 { \
348 /* \
349 * We're accessing memory. \
350 */ \
351 switch (pVCpu->iem.s.enmEffOpSize) \
352 { \
353 case IEMMODE_16BIT: \
354 IEM_MC_BEGIN(0, 0); \
355 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
357 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
358 IEM_MC_ARG(uint16_t, u16Src, 1); \
359 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
360 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
361 IEM_MC_LOCAL(uint16_t, u16Dst); \
362 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
363 IEM_MC_LOCAL_EFLAGS(uEFlags); \
364 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \
365 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
366 } IEM_MC_NATIVE_ELSE() { \
367 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \
368 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
369 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
370 IEM_MC_REF_EFLAGS(pEFlags); \
371 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \
372 } IEM_MC_NATIVE_ENDIF(); \
373 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
374 IEM_MC_END(); \
375 break; \
376 \
377 case IEMMODE_32BIT: \
378 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
380 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
381 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
382 IEM_MC_ARG(uint32_t, u32Src, 1); \
383 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
384 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
385 IEM_MC_LOCAL(uint32_t, u32Dst); \
386 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
387 IEM_MC_LOCAL_EFLAGS(uEFlags); \
388 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \
389 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
390 } IEM_MC_NATIVE_ELSE() { \
391 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \
392 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
393 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
394 IEM_MC_REF_EFLAGS(pEFlags); \
395 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \
396 } IEM_MC_NATIVE_ENDIF(); \
397 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
398 IEM_MC_END(); \
399 break; \
400 \
401 case IEMMODE_64BIT: \
402 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \
403 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
404 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \
405 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \
406 IEM_MC_ARG(uint64_t, u64Src, 1); \
407 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
408 IEM_MC_NATIVE_IF(a_fNativeArchs) { \
409 IEM_MC_LOCAL(uint64_t, u64Dst); \
410 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \
411 IEM_MC_LOCAL_EFLAGS(uEFlags); \
412 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \
413 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \
414 } IEM_MC_NATIVE_ELSE() { \
415 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \
416 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
417 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, a_bRm)); \
418 IEM_MC_REF_EFLAGS(pEFlags); \
419 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \
420 } IEM_MC_NATIVE_ENDIF(); \
421 IEM_MC_ADVANCE_RIP_AND_FINISH(); \
422 IEM_MC_END(); \
423 break; \
424 \
425 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
426 } \
427 } \
428 (void)0
429
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use