Changeset 104099 in vbox
- Timestamp:
- Mar 28, 2024 1:42:59 AM (6 months ago)
- Location:
- trunk
- Files:
-
- 12 edited
-
include/VBox/err.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllAImpl.asm (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/IEMAllInstPython.py (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/IEMAllThrdPython.py (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h (modified) (2 diffs)
-
src/VBox/VMM/include/IEMMc.h (modified) (2 diffs)
-
src/VBox/VMM/include/IEMN8veRecompilerEmit.h (modified) (1 diff)
-
src/VBox/VMM/testcase/tstIEMCheckMc.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r103860 r104099 2630 2630 /** Recompiler: Fixed jump is out of range. */ 2631 2631 #define VERR_IEM_EMIT_FIXED_JUMP_OUT_OF_RANGE (-5383) 2632 /** Recompiler: Unexpected register assignment. */ 2633 #define VERR_IEM_EMIT_UNEXPECTED_VAR_REGISTER (-5384) 2632 2634 2633 2635 /** Restart the current instruction. For testing only. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r104076 r104099 38 38 ;* Defined Constants And Macros * 39 39 ;********************************************************************************************************************************* 40 41 ;; 42 ; This is handy for generating absolutly correct EFLAGS. 43 %define IEM_AIMPL_WITH_LOAD_AND_SAVE_ALL_STATUS_FLAGS 44 40 45 41 46 ;; … … 272 277 %endif 273 278 274 275 ;;276 ; This is handy for generating absolutly correct EFLAGS.277 ;%define IEM_AIMPL_WITH_LOAD_AND_SAVE_ALL_STATUS_FLAGS278 279 279 280 ;; … … 398 399 ; @param 4 The mask of flags that are zeroed (and thus doesn't require loading, just clearing) 399 400 ; 400 %macro IEM_SAVE_FLAGS 3-4 0401 %macro IEM_SAVE_FLAGS 4 0 401 402 %if (%2 | %3 | %4) != 0 402 403 mov T1_32, [%1] ; flags -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r104018 r104099 9986 9986 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9987 9987 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 9988 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 9989 /*IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 9990 IEM_MC_LOCAL(uint16_t, u16Dst); \ 9988 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 9989 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ 9990 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); /* we modify this on arm64 */ \ 9991 IEM_MC_LOCAL(uint16_t, u16Dst); \ 9991 9992 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9992 } IEM_MC_NATIVE_ELSE() { */ \ 9993 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 9994 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9993 IEM_MC_LOCAL_EFLAGS(fEFlags); \ 9994 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_Ins,_r_CL_efl), u16Dst, cShiftArg, fEFlags, 16); \ 9995 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 9996 IEM_MC_COMMIT_EFLAGS(fEFlags); /** @todo IEM_MC_COMMIT_EFLAGS_OPT */ \ 9997 } IEM_MC_NATIVE_ELSE() { \ 9998 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 9999 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 9995 10000 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10001 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9996 10002 IEM_MC_REF_EFLAGS(pEFlags); \ 9997 10003 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \ 9998 /*}*/\10004 } IEM_MC_NATIVE_ENDIF(); \ 9999 10005 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10000 10006 IEM_MC_END(); \ … … 10004 10010 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 10005 10011 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10006 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \10007 10012 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 10008 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10009 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10010 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10011 IEM_MC_REF_EFLAGS(pEFlags); \ 10012 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 10013 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10013 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 10014 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ 10015 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); /* we modify this on arm64 */ \ 10016 IEM_MC_LOCAL(uint32_t, u32Dst); \ 10017 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10018 IEM_MC_LOCAL_EFLAGS(fEFlags); \ 10019 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_Ins,_r_CL_efl), u32Dst, cShiftArg, fEFlags, 32); \ 10020 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 10021 IEM_MC_COMMIT_EFLAGS(fEFlags); /** @todo IEM_MC_COMMIT_EFLAGS_OPT */ \ 10022 } IEM_MC_NATIVE_ELSE() { \ 10023 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10024 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 10025 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10026 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10027 IEM_MC_REF_EFLAGS(pEFlags); \ 10028 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 10029 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10030 } IEM_MC_NATIVE_ENDIF(); \ 10014 10031 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10015 10032 IEM_MC_END(); \ … … 10019 10036 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 10020 10037 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10021 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \10022 10038 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 10023 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10024 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10025 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10026 IEM_MC_REF_EFLAGS(pEFlags); \ 10027 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 10039 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 10040 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ 10041 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); /* we modify this on arm64 */ \ 10042 IEM_MC_LOCAL(uint64_t, u64Dst); \ 10043 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10044 IEM_MC_LOCAL_EFLAGS(fEFlags); \ 10045 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_Ins,_r_CL_efl), u64Dst, cShiftArg, fEFlags, 64); \ 10046 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 10047 IEM_MC_COMMIT_EFLAGS(fEFlags); /** @todo IEM_MC_COMMIT_EFLAGS_OPT */ \ 10048 } IEM_MC_NATIVE_ELSE() { \ 10049 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10050 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 10051 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10052 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10053 IEM_MC_REF_EFLAGS(pEFlags); \ 10054 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 10055 } IEM_MC_NATIVE_ENDIF(); \ 10028 10056 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10029 10057 IEM_MC_END(); \ … … 10159 10187 { 10160 10188 IEMOP_MNEMONIC2EX(shl_Ev_CL, "shl Ev,CL", M_CL, SHL, shl, Ev, REG_CL, DISOPTYPE_HARMLESS, 0); 10161 GRP2_BODY_Ev_CL(shl, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_shl_eflags), 0, 0);10189 GRP2_BODY_Ev_CL(shl, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_shl_eflags), RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0); 10162 10190 } 10163 10191 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r104092 r104099 3235 3235 'IEM_MC_NATIVE_ELSE': (McBlock.parseMcGenericCond, False, False, True, ), 3236 3236 'IEM_MC_NATIVE_ENDIF': (McBlock.parseMcGenericCond, False, False, True, ), 3237 'IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL': (McBlock.parseMcGeneric, False, False, True, ), 3237 3238 'IEM_MC_OR_2LOCS_U32': (McBlock.parseMcGeneric, False, False, False, ), 3238 3239 'IEM_MC_OR_GREG_U16': (McBlock.parseMcGeneric, True, True, True, ), … … 4509 4510 }, 4510 4511 'rotate_count': { # rol and ror w/o fixed 1 shift count 4511 'asFlTest': [ ],4512 'asFlTest': [ 'cf', 'of', ], # If the count is zero, nothing changes. 4512 4513 'asFlModify': [ 'cf', 'of', ], 4513 4514 'asFlClear': [], … … 4523 4524 }, 4524 4525 'rotate_carry_count': { # rcl and rcr w/o fixed 1 shift count 4525 'asFlTest': [ 'cf', ],4526 'asFlTest': [ 'cf', 'of', ], # If the count is zero, nothing changes, so 'of' is also input. 4526 4527 'asFlModify': [ 'cf', 'of', ], 4527 4528 'asFlClear': [], … … 4537 4538 }, 4538 4539 'shift_count': { # shl, shr or sar w/o fixed 1 shift count 4539 'asFlTest': [ ],4540 'asFlTest': [ 'cf', 'pf', 'af', 'zf', 'sf', 'of', ], # If the count is zero, nothing is changed. 4540 4541 'asFlModify': [ 'cf', 'pf', 'af', 'zf', 'sf', 'of', ], 4541 4542 'asFlClear': [], -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp
r104076 r104099 1029 1029 #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) NOP() 1030 1030 1031 #define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) NOP() 1032 1031 1033 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) NOP() 1032 1034 #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) NOP() -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r104092 r104099 205 205 #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) \ 206 206 off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7)) 207 208 209 #ifndef RT_ARCH_AMD64 210 # define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0) 211 #else 212 /** @note This is a naive approach that ASSUMES that the register isn't 213 * allocated, so it only works safely for the first allocation(s) in 214 * a MC block. */ 215 # define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) \ 216 off = iemNativeVarSetAmd64HostRegisterForLocal(pReNative, off, a_VarNm, a_idxHostReg) 217 218 DECL_INLINE_THROW(uint8_t) iemNativeVarRegisterSet(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint8_t idxReg, uint32_t off); 219 220 DECL_INLINE_THROW(uint32_t) 221 iemNativeVarSetAmd64HostRegisterForLocal(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, uint8_t idxHstReg) 222 { 223 Log12(("iemNativeVarSetAmd64HostRegisterForLocal: idxVar=%#x idxHstReg=%s (%#x) off=%#x\n", idxVar, g_apszIemNativeHstRegNames[idxHstReg], idxHstReg, off)); 224 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs)); 225 Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg))); /* iemNativeVarRegisterSet does a throw/longjmp on this */ 226 227 # ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 228 /* Must flush the register if it hold pending writes. */ 229 if ( (pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg)) 230 && (pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows) ) 231 off = iemNativeRegFlushDirtyGuest(pReNative, off, pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows); 232 # endif 233 234 iemNativeVarRegisterSet(pReNative, idxVar, idxHstReg, off); 235 return off; 236 } 237 238 #endif /* RT_ARCH_AMD64 */ 207 239 208 240 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r104098 r104099 4097 4097 Assert( idxHstReg != UINT8_MAX 4098 4098 && pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(enmGstReg)); 4099 Log12(("iemNativeRegFlushPendingWrite: Clearing guest register %s shadowed by host %s \n",4100 g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg] ));4099 Log12(("iemNativeRegFlushPendingWrite: Clearing guest register %s shadowed by host %s (off=%#x)\n", 4100 g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg], off)); 4101 4101 4102 4102 off = iemNativeEmitStoreGprWithGstShadowReg(pReNative, off, enmGstReg, idxHstReg); … … 4118 4118 iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstReg /*= UINT64_MAX*/) 4119 4119 { 4120 if (pReNative->Core.bmGstRegShadowDirty & fFlushGstReg) 4120 uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fFlushGstReg; 4121 if (bmGstRegShadowDirty) 4121 4122 { 4122 4123 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 4123 4124 iemNativeDbgInfoAddNativeOffset(pReNative, off); 4124 iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, pReNative->Core.bmGstRegShadowDirty & fFlushGstReg);4125 iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, bmGstRegShadowDirty); 4125 4126 # endif 4126 4127 uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fFlushGstReg;4128 uint32_t idxGstReg = 0;4129 4130 4127 do 4131 4128 { 4132 if (bmGstRegShadowDirty & 0x1) 4133 { 4134 off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg); 4135 Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg))); 4136 } 4137 idxGstReg++; 4138 bmGstRegShadowDirty >>= 1; 4129 unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadowDirty) - 1; 4130 bmGstRegShadowDirty &= ~RT_BIT_64(idxGstReg); 4131 off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg); 4132 Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg))); 4139 4133 } while (bmGstRegShadowDirty); 4140 4134 } … … 4164 4158 iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, pReNative->Core.bmGstRegShadowDirty & fGstRegShadows); 4165 4159 # endif 4160 /** @todo r=bird: This is a crap way of enumerating a bitmask where we're 4161 * likely to only have a single bit set. It'll be in the 0..15 range, 4162 * but still it's 15 unnecessary loops for the last guest register. */ 4166 4163 4167 4164 uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fGstRegShadows; … … 10088 10085 case kIemTbDbgEntryType_NativeOffset: 10089 10086 offDbgNativeNext = pDbgInfo->aEntries[iDbgEntry].NativeOffset.offNative; 10090 Assert(offDbgNativeNext > offNative);10087 Assert(offDbgNativeNext >= offNative); 10091 10088 break; 10092 10089 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r104081 r104099 2854 2854 }; 2855 2855 2856 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams ):2856 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0): 2857 2857 """ 2858 2858 Outputs code for unpacking parameters. … … 2894 2894 for asVar in sorted(aasVars): 2895 2895 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],)); 2896 2897 if uNoRefLevel > 0 and aasVars: 2898 if uNoRefLevel > 1: 2899 # level 2: Everything. This is used by liveness. 2900 oOut.write(' '); 2901 for asVar in sorted(aasVars): 2902 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],)); 2903 oOut.write('\n'); 2904 else: 2905 # level 1: Only pfnXxxx variables. This is used by native. 2906 for asVar in sorted(aasVars): 2907 if asVar[2].startswith('pfn'): 2908 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],)); 2896 2909 return True; 2897 2910 … … 3167 3180 ('pCallEntry->auParams[0]', 3168 3181 'pCallEntry->auParams[1]', 3169 'pCallEntry->auParams[2]',)); 3182 'pCallEntry->auParams[2]',), 3183 uNoRefLevel = 1); 3170 3184 3171 3185 # Now for the actual statements. … … 3265 3279 ('pCallEntry->auParams[0]', 3266 3280 'pCallEntry->auParams[1]', 3267 'pCallEntry->auParams[2]',)); 3268 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ]; 3269 for aoRefs in oVariation.dParamRefs.values(): 3270 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,)); 3271 oOut.write(' %s\n' % (' '.join(asNoRefs),)); 3281 'pCallEntry->auParams[2]',), 3282 uNoRefLevel = 2); 3272 3283 3273 3284 # Now for the actual statements. -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r104029 r104099 847 847 848 848 849 850 /********************************************************************************************************************************* 851 * ADD, ADC, SUB, SBB, CMP * 852 *********************************************************************************************************************************/ 853 849 854 /** 850 855 * The ADD instruction will set all status flags. … … 1529 1534 1530 1535 1536 1537 /********************************************************************************************************************************* 1538 * Shifting and Rotating. * 1539 *********************************************************************************************************************************/ 1540 1541 1542 typedef enum 1543 { 1544 kIemNativeEmitEFlagsForShiftType_Left, 1545 kIemNativeEmitEFlagsForShiftType_Right, 1546 kIemNativeEmitEFlagsForShiftType_SignedRight 1547 } IEMNATIVEEMITEFLAGSFORSHIFTTYPE; 1548 1549 /** 1550 * This is used by SHL, SHR and SAR emulation. 1551 * 1552 * It takes liveness stuff into account. 1553 */ 1554 DECL_INLINE_THROW(uint32_t) 1555 iemNativeEmitEFlagsForShift(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegEfl, uint8_t idxRegResult, 1556 uint8_t idxRegSrc, uint8_t idxRegCount, uint8_t cOpBits, IEMNATIVEEMITEFLAGSFORSHIFTTYPE enmType, 1557 uint8_t idxRegTmp) 1558 { 1559 RT_NOREF(pReNative, off, idxRegEfl, idxRegResult, idxRegSrc, idxRegCount, cOpBits, enmType); 1560 #if 0 //def IEMNATIVE_WITH_EFLAGS_SKIPPING 1561 /* 1562 * See if we can skip this wholesale. 1563 */ 1564 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall]; 1565 if (IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(pLivenessEntry)) 1566 { 1567 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeEflSkippedLogical); 1568 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 1569 off = iemNativeEmitOrImmIntoVCpuU32(pReNative, off, X86_EFL_STATUS_BITS, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 1570 # endif 1571 } 1572 else 1573 #endif 1574 { 1575 /* 1576 * The difference between Intel and AMD flags for SHL are: 1577 * - Intel always clears AF while AMD always sets it. 1578 * - Intel sets OF for the first shift, while AMD for the last shift. 1579 * 1580 */ 1581 1582 #ifdef RT_ARCH_AMD64 1583 /* 1584 * We capture flags and does the additional OF and AF calculations as needed. 1585 */ 1586 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64); 1587 /** @todo kIemNativeEmitEFlagsForShiftType_SignedRight: we could alternatively 1588 * use SAHF here when host rax is free since, OF is cleared. */ 1589 /* pushf */ 1590 pCodeBuf[off++] = 0x9c; 1591 /* pop tmp */ 1592 if (idxRegTmp >= 8) 1593 pCodeBuf[off++] = X86_OP_REX_B; 1594 pCodeBuf[off++] = 0x58 + (idxRegTmp & 7); 1595 /* Clear the status bits in EFLs. */ 1596 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegEfl, ~X86_EFL_STATUS_BITS); 1597 uint8_t const idxTargetCpuEflFlavour = pReNative->pVCpu->iem.s.aidxTargetCpuEflFlavour[1]; 1598 if (idxTargetCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_NATIVE) 1599 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegTmp, X86_EFL_STATUS_BITS); 1600 else 1601 { 1602 /* and tmp, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_CF */ 1603 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegTmp, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_CF); 1604 if (idxTargetCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD) 1605 off = iemNativeEmitOrGpr32ByImmEx(pCodeBuf, off, idxRegTmp, X86_EFL_AF); 1606 /* OR in the flags we collected. */ 1607 off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, idxRegEfl, idxRegTmp); 1608 1609 /* Calculate OF */ 1610 if (idxTargetCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD) 1611 { 1612 /* AMD last bit shifted: fEfl |= ((uResult >> (cOpBits - 1)) ^ fCarry) << X86_EFL_OF_BIT; */ 1613 /* bt idxRegResult, (cOpBits - 1) => CF=result-sign-bit */ 1614 off = iemNativeEmitAmd64TwoByteModRmInstrRREx(pCodeBuf, off, 0x0f, 0x0b /*ud2*/, 0xba, 1615 RT_MAX(cOpBits, 16), 4, idxRegResult); 1616 pCodeBuf[off++] = cOpBits - 1; 1617 /* setc idxRegTmp */ 1618 off = iemNativeEmitAmd64TwoByteModRmInstrRREx(pCodeBuf, off, 0x0f, 0x92, 0x0b /*ud2*/, 8, 0, idxRegTmp); 1619 /* xor idxRegTmp, idxRegEfl */ 1620 off = iemNativeEmitXorGpr32ByGpr32Ex(pCodeBuf, off, idxRegTmp, idxRegEfl); 1621 /* and idxRegTmp, 1 */ 1622 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegTmp, 1); 1623 /* shl idxRegTmp, X86_EFL_OF_BIT */ 1624 off = iemNativeEmitShiftGpr32LeftEx(pCodeBuf, off, idxRegTmp, X86_EFL_OF_BIT); 1625 } 1626 else 1627 { 1628 /* Intel first bit shifted: fEfl |= X86_EFL_GET_OF_ ## cOpBits(uDst ^ (uDst << 1)); */ 1629 if (cOpBits <= 32) 1630 { 1631 /* mov idxRegTmp, idxRegSrc */ 1632 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, idxRegTmp, idxRegSrc); 1633 /* shl idxRegTmp, 1 */ 1634 off = iemNativeEmitShiftGpr32LeftEx(pCodeBuf, off, idxRegTmp, 1); 1635 /* xor idxRegTmp, idxRegSrc */ 1636 off = iemNativeEmitXorGprByGprEx(pCodeBuf, off, idxRegTmp, idxRegSrc); 1637 /* shr idxRegTmp, cOpBits - X86_EFL_OF_BIT - 1 or shl idxRegTmp, X86_EFL_OF_BIT - cOpBits + 1 */ 1638 if (cOpBits >= X86_EFL_OF_BIT) 1639 off = iemNativeEmitShiftGpr32RightEx(pCodeBuf, off, idxRegTmp, cOpBits - X86_EFL_OF_BIT - 1); 1640 else 1641 off = iemNativeEmitShiftGpr32LeftEx(pCodeBuf, off, idxRegTmp, X86_EFL_OF_BIT - cOpBits + 1); 1642 } 1643 else 1644 { 1645 /* same as above but with 64-bit grps*/ 1646 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegTmp, idxRegSrc); 1647 off = iemNativeEmitShiftGprLeftEx(pCodeBuf, off, idxRegTmp, 1); 1648 off = iemNativeEmitXorGprByGprEx(pCodeBuf, off, idxRegTmp, idxRegSrc); 1649 off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, idxRegTmp, cOpBits - X86_EFL_OF_BIT - 1); 1650 } 1651 /* and idxRegTmp, X86_EFL_OF */ 1652 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegTmp, X86_EFL_OF); 1653 } 1654 } 1655 /* Or in the collected flag(s) */ 1656 off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, idxRegEfl, idxRegTmp); 1657 1658 #elif defined(RT_ARCH_ARM64) 1659 /* 1660 * Calculate flags. 1661 */ 1662 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 1663 1664 /* Clear the status bits. ~0x8D5 (or ~0x8FD) can't be AND immediate, so use idxRegTmp for constant. */ 1665 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, idxRegTmp, ~X86_EFL_STATUS_BITS); 1666 off = iemNativeEmitAndGpr32ByGpr32Ex(pCodeBuf, off, idxRegEfl, idxRegTmp); 1667 1668 /* N,Z -> SF,ZF */ 1669 if (cOpBits < 32) 1670 pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegResult, cOpBits > 8); /* sets NZ */ 1671 else 1672 pCodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, idxRegResult, idxRegResult, cOpBits > 32 /*f64Bit*/); 1673 pCodeBuf[off++] = Armv8A64MkInstrMrs(idxRegTmp, ARMV8_AARCH64_SYSREG_NZCV); /* Bits: 31=N; 30=Z; 29=C; 28=V; */ 1674 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegTmp, idxRegTmp, 30); 1675 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxRegTmp, X86_EFL_ZF_BIT, 2, false /*f64Bit*/); 1676 AssertCompile(X86_EFL_ZF_BIT + 1 == X86_EFL_SF_BIT); 1677 1678 /* Calculate 8-bit parity of the result. */ 1679 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegTmp, idxRegResult, idxRegResult, false /*f64Bit*/, 1680 4 /*offShift6*/, kArmv8A64InstrShift_Lsr); 1681 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegTmp, idxRegTmp, idxRegTmp, false /*f64Bit*/, 1682 2 /*offShift6*/, kArmv8A64InstrShift_Lsr); 1683 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegTmp, idxRegTmp, idxRegTmp, false /*f64Bit*/, 1684 1 /*offShift6*/, kArmv8A64InstrShift_Lsr); 1685 Assert(Armv8A64ConvertImmRImmS2Mask32(0, 0) == 1); 1686 pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxRegTmp, idxRegTmp, 0, 0, false /*f64Bit*/); 1687 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxRegTmp, X86_EFL_PF_BIT, 1, false /*f64Bit*/); 1688 1689 /* Calculate carry - the last bit shifted out of the input value. */ 1690 if (enmType == kIemNativeEmitEFlagsForShiftType_Left) 1691 { 1692 /* CF = (idxRegSrc >> (cOpBits - idxRegCount))) & 1 */ 1693 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegTmp, cOpBits); 1694 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegTmp, idxRegCount, false /*f64Bit*/, cOpBits < 32 /*fSetFlags*/); 1695 if (cOpBits < 32) 1696 pCodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Cc, 3); /* 16 or 8 bit: CF is clear if all shifted out */ 1697 pCodeBuf[off++] = Armv8A64MkInstrLsrv(idxRegTmp, idxRegSrc, idxRegTmp, cOpBits > 32); 1698 } 1699 else 1700 { 1701 /* CF = (idxRegSrc >> (idxRegCount - 1)) & 1 */ 1702 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegTmp, idxRegCount, 1, false /*f64Bit*/); 1703 pCodeBuf[off++] = Armv8A64MkInstrLsrv(idxRegTmp, idxRegSrc, idxRegTmp, cOpBits > 32); 1704 } 1705 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxRegTmp, X86_EFL_CF_BIT, 1, false /*f64Bit*/); 1706 1707 uint8_t const idxTargetCpuEflFlavour = pReNative->pVCpu->iem.s.aidxTargetCpuEflFlavour[0]; 1708 if (idxTargetCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_AMD) 1709 { 1710 /* Intel: OF = first bit shifted: fEfl |= X86_EFL_GET_OF_ ## cOpBits(uDst ^ (uDst << 1)); */ 1711 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegTmp, idxRegSrc, idxRegSrc, cOpBits > 32, 1 /*left shift count*/); 1712 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegTmp, idxRegTmp, cOpBits - 1, cOpBits > 32); 1713 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxRegTmp, X86_EFL_OF_BIT, 1, false /*f64Bit*/); 1714 } 1715 else 1716 { 1717 /* AMD: OF = last bit shifted: fEfl |= ((uResult >> (cOpBits - 1)) ^ fCarry) << X86_EFL_OF_BIT; */ 1718 AssertCompile(X86_EFL_CF_BIT == 0); 1719 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegTmp, idxRegEfl, idxRegResult, cOpBits > 32, /* ASSUMES CF calculated! */ 1720 cOpBits - 1, kArmv8A64InstrShift_Lsr); 1721 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxRegTmp, X86_EFL_OF_BIT, 1, false /*f64Bit*/); 1722 1723 /* AMD unconditionally clears AF. */ 1724 Assert(Armv8A64ConvertImmRImmS2Mask32(0, 32 - X86_EFL_AF_BIT) == X86_EFL_AF); 1725 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxRegEfl, idxRegEfl, 0, 32 - X86_EFL_AF_BIT, false /*f64Bit*/); 1726 } 1727 #else 1728 # error "port me" 1729 #endif 1730 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1731 1732 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 1733 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 1734 # endif 1735 } 1736 return off; 1737 } 1738 1739 1740 DECL_INLINE_THROW(uint32_t) 1741 iemNativeEmit_shl_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1742 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1743 { 1744 /* Note! Since we're doing some branching here, we need to allocate all 1745 registers we need before the jump or we may end up with invalid 1746 register state if the branch is taken. */ 1747 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off); /* Do this first in hope we'll get EAX. */ 1748 uint8_t const idxRegCount = iemNativeVarRegisterAcquire(pReNative, idxVarCount, &off, true /*fInitialized*/); /* modified on arm64 */ 1749 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); 1750 uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/); 1751 1752 #ifdef RT_ARCH_AMD64 1753 /* Make sure IEM_MC_NATIVE_AMD64_HOST_REG_FOR_LOCAL was used. */ 1754 AssertStmt(idxRegCount == X86_GREG_xCX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_UNEXPECTED_VAR_REGISTER)); 1755 1756 /* We only need a copy of the input value if the target CPU differs from the host CPU. */ 1757 uint8_t const idxRegDstIn = pReNative->pVCpu->iem.s.aidxTargetCpuEflFlavour[1] == IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 1758 ? UINT8_MAX : iemNativeRegAllocTmp(pReNative, &off); 1759 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4+2+3+4); 1760 1761 /* Check if it's NOP before we do anything. */ 1762 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegCount, cOpBits <= 32 ? 0x1f : 0x3f); 1763 uint32_t const offFixup = off; 1764 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit should be enough */, kIemNativeInstrCond_z); 1765 1766 if (idxRegDstIn != UINT8_MAX) 1767 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 1768 off = iemNativeEmitAmd64OneByteModRmInstrRREx(pCodeBuf, off, 0xd2, 0xd3, cOpBits, 4, idxRegDst); 1769 1770 #elif defined(RT_ARCH_ARM64) 1771 /* We always (except we can skip EFLAGS calcs) a copy of the input value. */ 1772 uint8_t const idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off); 1773 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 1774 1775 /* Check if it's NOP before we do anything. We MODIFY idxRegCount here! */ 1776 Assert(Armv8A64ConvertImmRImmS2Mask32(4, 0) == 0x1f); 1777 Assert(Armv8A64ConvertImmRImmS2Mask32(5, 0) == 0x3f); 1778 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegCount, idxRegCount, cOpBits > 32 ? 5 : 4, 0, false /*f64Bit*/); 1779 uint32_t const offFixup = off; 1780 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off, kArmv8InstrCond_Eq); 1781 1782 pCodeBuf[off++] = Armv8A64MkInstrMov(idxRegDstIn, idxRegDst); 1783 pCodeBuf[off++] = Armv8A64MkInstrLslv(idxRegDst, idxRegDst, idxRegCount, cOpBits > 32 /*f64Bit*/); 1784 if (cOpBits < 32) 1785 { 1786 Assert(Armv8A64ConvertImmRImmS2Mask32(7, 0) == 0xff); 1787 Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff); 1788 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegDst, idxRegDst, cOpBits - 1, 0, false /*f64Bit*/); 1789 } 1790 1791 #else 1792 # error "port me" 1793 #endif 1794 1795 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1796 off = iemNativeEmitEFlagsForShift(pReNative, off, idxRegEfl, idxRegDst, idxRegDstIn, idxRegCount, 1797 cOpBits, kIemNativeEmitEFlagsForShiftType_Left, idxRegTmp); 1798 1799 /* fixup the jump */ 1800 iemNativeFixupFixedJump(pReNative, offFixup, off); 1801 1802 #ifdef RT_ARCH_AMD64 1803 if (idxRegDstIn != UINT8_MAX) 1804 #endif 1805 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 1806 iemNativeVarRegisterRelease(pReNative, idxVarEfl); 1807 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1808 iemNativeVarRegisterRelease(pReNative, idxVarCount); 1809 iemNativeRegFreeTmp(pReNative, idxRegTmp); 1810 return off; 1811 } 1812 1813 1814 DECL_INLINE_THROW(uint32_t) 1815 iemNativeEmit_shr_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1816 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1817 { 1818 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1819 AssertFailed(); 1820 return iemNativeEmitBrk(pReNative, off, 0x666); 1821 } 1822 1823 1824 DECL_INLINE_THROW(uint32_t) 1825 iemNativeEmit_sar_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1826 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1827 { 1828 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1829 AssertFailed(); 1830 return iemNativeEmitBrk(pReNative, off, 0x666); 1831 } 1832 1833 1834 DECL_INLINE_THROW(uint32_t) 1835 iemNativeEmit_rol_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1836 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1837 { 1838 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1839 AssertFailed(); 1840 return iemNativeEmitBrk(pReNative, off, 0x666); 1841 } 1842 1843 1844 DECL_INLINE_THROW(uint32_t) 1845 iemNativeEmit_ror_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1846 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1847 { 1848 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1849 AssertFailed(); 1850 return iemNativeEmitBrk(pReNative, off, 0x666); 1851 } 1852 1853 1854 DECL_INLINE_THROW(uint32_t) 1855 iemNativeEmit_rcl_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1856 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1857 { 1858 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1859 AssertFailed(); 1860 return iemNativeEmitBrk(pReNative, off, 0x666); 1861 } 1862 1863 1864 DECL_INLINE_THROW(uint32_t) 1865 iemNativeEmit_rcr_r_CL_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1866 uint8_t idxVarDst, uint8_t idxVarCount, uint8_t idxVarEfl, uint8_t cOpBits) 1867 { 1868 RT_NOREF(idxVarDst, idxVarCount, idxVarEfl, cOpBits); 1869 AssertFailed(); 1870 return iemNativeEmitBrk(pReNative, off, 0x666); 1871 } 1872 1873 1531 1874 #endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h */ -
trunk/src/VBox/VMM/include/IEMMc.h
r104076 r104099 2585 2585 2586 2586 2587 /** The @a a_fS pportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */2587 /** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */ 2588 2588 #define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (false) { 2589 2589 #define IEM_MC_NATIVE_ELSE() } else { … … 2599 2599 #define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6) 2600 2600 #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7) 2601 2602 /** This can be used to direct the register allocator when dealing with 2603 * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */ 2604 #define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0) 2601 2605 2602 2606 -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r104088 r104099 6372 6372 kIemNativeInstrCond_nc, 6373 6373 kIemNativeInstrCond_e, 6374 kIemNativeInstrCond_z = kIemNativeInstrCond_e, 6374 6375 kIemNativeInstrCond_ne, 6376 kIemNativeInstrCond_nz = kIemNativeInstrCond_ne, 6375 6377 kIemNativeInstrCond_be, 6376 6378 kIemNativeInstrCond_nbe, -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r104076 r104099 593 593 #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) do { (void)fMcBegin; (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7); } while (0) 594 594 595 #define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) do { (void)fMcBegin; CHK_VAR(a_VarNm); AssertCompile(a_idxHostReg <= X86_GREG_COUNT); } while (0) 596 595 597 #define IEM_MC_NO_NATIVE_RECOMPILE() ((void)0) 596 598
Note:
See TracChangeset
for help on using the changeset viewer.

