- Timestamp:
- Dec 11, 2023 9:48:23 PM (10 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
-
IEMAllInstOneByte.cpp.h (modified) (1 diff)
-
IEMAllInstPython.py (modified) (1 diff)
-
IEMAllN8vePython.py (modified) (1 diff)
-
IEMAllN8veRecompiler.cpp (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r102572 r102577 2292 2292 { 2293 2293 IEMOP_MNEMONIC(push_rSP, "push rSP"); 2294 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_8086)2295 {2296 IEM_MC_BEGIN(0, 1, IEM_MC_F_ONLY_8086, 0); 2297 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();2298 IEM_MC_LOCAL(uint16_t, u16Value);2299 IEM_MC_FETCH_GREG_U16(u16Value, X86_GREG_xSP);2300 IEM_MC_SUB_LOCAL_U16(u16Value, 2);2301 IEM_MC_PUSH_U16(u16Value);2302 IEM_MC_ADVANCE_RIP_AND_FINISH();2303 IEM_MC_END();2304 }2305 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);2294 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_8086) 2295 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP); 2296 2297 /* 8086 works differently wrt to 'push sp' compared to 80186 and later. */ 2298 IEM_MC_BEGIN(0, 1, IEM_MC_F_ONLY_8086, 0); 2299 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2300 IEM_MC_LOCAL(uint16_t, u16Value); 2301 IEM_MC_FETCH_GREG_U16(u16Value, X86_GREG_xSP); 2302 IEM_MC_SUB_LOCAL_U16(u16Value, 2); 2303 IEM_MC_PUSH_U16(u16Value); 2304 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2305 IEM_MC_END(); 2306 2306 } 2307 2307 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102572 r102577 3061 3061 'IEM_MC_OR_LOCAL_U32': (McBlock.parseMcGeneric, False, False, ), 3062 3062 'IEM_MC_OR_LOCAL_U8': (McBlock.parseMcGeneric, False, False, ), 3063 'IEM_MC_POP_GREG_U16': (McBlock.parseMcGeneric, True, False,),3064 'IEM_MC_POP_GREG_U32': (McBlock.parseMcGeneric, True, False,),3065 'IEM_MC_POP_GREG_U64': (McBlock.parseMcGeneric, True, False,),3063 'IEM_MC_POP_GREG_U16': (McBlock.parseMcGeneric, True, True, ), 3064 'IEM_MC_POP_GREG_U32': (McBlock.parseMcGeneric, True, True, ), 3065 'IEM_MC_POP_GREG_U64': (McBlock.parseMcGeneric, True, True, ), 3066 3066 'IEM_MC_PREPARE_AVX_USAGE': (McBlock.parseMcGeneric, False, True), 3067 3067 'IEM_MC_PREPARE_FPU_USAGE': (McBlock.parseMcGeneric, False, True), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102572 r102577 176 176 'IEM_MC_FLAT64_PUSH_U16': (None, True, False, ), 177 177 'IEM_MC_FLAT64_PUSH_U64': (None, True, False, ), 178 'IEM_MC_FLAT64_POP_GREG_U16': (None, True, False,),179 'IEM_MC_FLAT64_POP_GREG_U64': (None, True, False,),178 'IEM_MC_FLAT64_POP_GREG_U16': (None, True, True, ), 179 'IEM_MC_FLAT64_POP_GREG_U64': (None, True, True, ), 180 180 'IEM_MC_FLAT32_PUSH_U16': (None, True, False, ), 181 181 'IEM_MC_FLAT32_PUSH_U32': (None, True, False, ), 182 'IEM_MC_FLAT32_POP_GREG_U16': (None, True, False,),183 'IEM_MC_FLAT32_POP_GREG_U32': (None, True, False,),182 'IEM_MC_FLAT32_POP_GREG_U16': (None, True, True, ), 183 'IEM_MC_FLAT32_POP_GREG_U32': (None, True, True, ), 184 184 }; 185 185 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102572 r102577 1736 1736 1737 1737 1738 /** 1739 * Used by TB code to push unsigned 16-bit value onto a generic stack. 1740 */ 1741 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU16,(PVMCPUCC pVCpu, uint16_t u16Value)) 1742 { 1743 iemMemStackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemStackPushU16SafeJmp */ 1744 } 1745 1746 1747 /** 1748 * Used by TB code to push unsigned 32-bit value onto a generic stack. 1749 */ 1750 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU32,(PVMCPUCC pVCpu, uint32_t u32Value)) 1751 { 1752 iemMemStackPushU32Jmp(pVCpu, u32Value); /** @todo iemMemStackPushU32SafeJmp */ 1753 } 1754 1755 1756 /** 1757 * Used by TB code to push 32-bit selector value onto a generic stack. 1758 * 1759 * Intel CPUs doesn't do write a whole dword, thus the special function. 1760 */ 1761 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU32SReg,(PVMCPUCC pVCpu, uint32_t u32Value)) 1762 { 1763 iemMemStackPushU32SRegJmp(pVCpu, u32Value); /** @todo iemMemStackPushU32SRegSafeJmp */ 1764 } 1765 1766 1767 /** 1768 * Used by TB code to push unsigned 64-bit value onto a generic stack. 1769 */ 1770 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU64,(PVMCPUCC pVCpu, uint64_t u64Value)) 1771 { 1772 iemMemStackPushU64Jmp(pVCpu, u64Value); /** @todo iemMemStackPushU64SafeJmp */ 1773 } 1774 1775 1776 /** 1777 * Used by TB code to pop a 16-bit general purpose register off a generic stack. 1778 */ 1779 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPopGRegU16,(PVMCPUCC pVCpu, uint8_t iGReg)) 1780 { 1781 iemMemStackPopGRegU16Jmp(pVCpu, iGReg); /** @todo iemMemStackPopGRegU16SafeJmp */ 1782 } 1783 1784 1785 /** 1786 * Used by TB code to pop a 32-bit general purpose register off a generic stack. 1787 */ 1788 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPopGRegU32,(PVMCPUCC pVCpu, uint8_t iGReg)) 1789 { 1790 iemMemStackPopGRegU32Jmp(pVCpu, iGReg); /** @todo iemMemStackPopGRegU32SafeJmp */ 1791 } 1792 1793 1794 /** 1795 * Used by TB code to pop a 64-bit general purpose register off a generic stack. 1796 */ 1797 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPopGRegU64,(PVMCPUCC pVCpu, uint8_t iGReg)) 1798 { 1799 iemMemStackPopGRegU64Jmp(pVCpu, iGReg); /** @todo iemMemStackPopGRegU64SafeJmp */ 1800 } 1801 1802 1803 1738 1804 /********************************************************************************************************************************* 1739 1805 * Helpers: Flat memory fetches and stores. * … … 1878 1944 iemMemFlatStoreDataU64Jmp(pVCpu, GCPtrMem, u64Value); /** @todo use iemMemStoreDataU64SafeJmp */ 1879 1945 } 1946 1947 1948 1949 /** 1950 * Used by TB code to push unsigned 16-bit value onto a flat 32-bit stack. 1951 */ 1952 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU16,(PVMCPUCC pVCpu, uint16_t u16Value)) 1953 { 1954 iemMemFlat32StackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemFlat32StackPushU16SafeJmp */ 1955 } 1956 1957 1958 /** 1959 * Used by TB code to push unsigned 32-bit value onto a flat 32-bit stack. 1960 */ 1961 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU32,(PVMCPUCC pVCpu, uint32_t u32Value)) 1962 { 1963 iemMemFlat32StackPushU32Jmp(pVCpu, u32Value); /** @todo iemMemFlat32StackPushU32SafeJmp */ 1964 } 1965 1966 1967 /** 1968 * Used by TB code to push segment selector value onto a flat 32-bit stack. 1969 * 1970 * Intel CPUs doesn't do write a whole dword, thus the special function. 1971 */ 1972 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU32SReg,(PVMCPUCC pVCpu, uint32_t u32Value)) 1973 { 1974 iemMemFlat32StackPushU32SRegJmp(pVCpu, u32Value); /** @todo iemMemFlat32StackPushU32SRegSafeJmp */ 1975 } 1976 1977 1978 /** 1979 * Used by TB code to pop a 16-bit general purpose register off a flat 32-bit stack. 1980 */ 1981 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PopGRegU16,(PVMCPUCC pVCpu, uint8_t iGReg)) 1982 { 1983 iemMemFlat32StackPopGRegU16Jmp(pVCpu, iGReg); /** @todo iemMemFlat32StackPopGRegU16SafeJmp */ 1984 } 1985 1986 1987 /** 1988 * Used by TB code to pop a 64-bit general purpose register off a flat 32-bit stack. 1989 */ 1990 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PopGRegU32,(PVMCPUCC pVCpu, uint8_t iGReg)) 1991 { 1992 iemMemFlat32StackPopGRegU32Jmp(pVCpu, iGReg); /** @todo iemMemFlat32StackPopGRegU32SafeJmp */ 1993 } 1994 1995 1996 1997 /** 1998 * Used by TB code to push unsigned 16-bit value onto a flat 64-bit stack. 1999 */ 2000 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PushU16,(PVMCPUCC pVCpu, uint16_t u16Value)) 2001 { 2002 iemMemFlat64StackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemFlat64StackPushU16SafeJmp */ 2003 } 2004 2005 2006 /** 2007 * Used by TB code to push unsigned 64-bit value onto a flat 64-bit stack. 2008 */ 2009 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PushU64,(PVMCPUCC pVCpu, uint64_t u64Value)) 2010 { 2011 iemMemFlat64StackPushU64Jmp(pVCpu, u64Value); /** @todo iemMemFlat64StackPushU64SafeJmp */ 2012 } 2013 2014 2015 /** 2016 * Used by TB code to pop a 16-bit general purpose register off a flat 64-bit stack. 2017 */ 2018 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PopGRegU16,(PVMCPUCC pVCpu, uint8_t iGReg)) 2019 { 2020 iemMemFlat64StackPopGRegU16Jmp(pVCpu, iGReg); /** @todo iemMemFlat64StackPopGRegU16SafeJmp */ 2021 } 2022 2023 2024 /** 2025 * Used by TB code to pop a 64-bit general purpose register off a flat 64-bit stack. 2026 */ 2027 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PopGRegU64,(PVMCPUCC pVCpu, uint8_t iGReg)) 2028 { 2029 iemMemFlat64StackPopGRegU64Jmp(pVCpu, iGReg); /** @todo iemMemFlat64StackPopGRegU64SafeJmp */ 2030 } 2031 1880 2032 1881 2033 … … 9746 9898 9747 9899 /********************************************************************************************************************************* 9900 * Stack Accesses. * 9901 *********************************************************************************************************************************/ 9902 /* RT_MAKE_U32_FROM_U8(cBitsVar, cBitsFlat, fSReg, 0) */ 9903 #define IEM_MC_PUSH_U16(a_u16Value) \ 9904 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU16) 9905 #define IEM_MC_PUSH_U32(a_u32Value) \ 9906 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU32) 9907 #define IEM_MC_PUSH_U32_SREG(a_uSegVal) \ 9908 off = iemNativeEmitStackPush(pReNative, off, a_uSegVal, RT_MAKE_U32_FROM_U8(32, 0, 1, 0), (uintptr_t)iemNativeHlpStackPushU32SReg) 9909 #define IEM_MC_PUSH_U64(a_u64Value) \ 9910 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU64) 9911 9912 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value) \ 9913 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), (uintptr_t)iemNativeHlpStackFlat32PushU16) 9914 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value) \ 9915 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), (uintptr_t)iemNativeHlpStackFlat32PushU32) 9916 #define IEM_MC_FLAT32_PUSH_U32_SREG(a_u32Value) \ 9917 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 1, 0), (uintptr_t)iemNativeHlpStackFlat32PushU32SReg) 9918 9919 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value) \ 9920 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), (uintptr_t)iemNativeHlpStackFlat64PushU16) 9921 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value) \ 9922 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), (uintptr_t)iemNativeHlpStackFlat64PushU64) 9923 9924 DECL_INLINE_THROW(uint32_t) 9925 iemNativeEmitStackPush(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue, 9926 uint32_t cBitsVarAndFlat, uintptr_t pfnFunction) 9927 { 9928 RT_NOREF(pReNative, off, idxVarValue, cBitsVarAndFlat, pfnFunction); 9929 AssertReleaseFailed(); 9930 return off; 9931 } 9932 9933 9934 9935 /* RT_MAKE_U32_FROM_U8(cBitsVar, cBitsFlat, 0, 0) */ 9936 #define IEM_MC_POP_GREG_U16(a_iGReg) \ 9937 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), \ 9938 (uintptr_t)iemNativeHlpStackPopGRegU16, pCallEntry->idxInstr) 9939 #define IEM_MC_POP_GREG_U32(a_iGReg) \ 9940 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), \ 9941 (uintptr_t)iemNativeHlpStackPopGRegU32, pCallEntry->idxInstr) 9942 #define IEM_MC_POP_GREG_U64(a_iGReg) \ 9943 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), \ 9944 (uintptr_t)iemNativeHlpStackPopGRegU64, pCallEntry->idxInstr) 9945 9946 #define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) \ 9947 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), \ 9948 (uintptr_t)iemNativeHlpStackFlat32PopGRegU16, pCallEntry->idxInstr) 9949 #define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) \ 9950 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), \ 9951 (uintptr_t)iemNativeHlpStackFlat32PopGRegU32, pCallEntry->idxInstr) 9952 9953 #define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) \ 9954 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), \ 9955 (uintptr_t)iemNativeHlpStackFlat64PopGRegU16, pCallEntry->idxInstr) 9956 #define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) \ 9957 off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), \ 9958 (uintptr_t)iemNativeHlpStackFlat64PopGRegU64, pCallEntry->idxInstr) 9959 9960 /** IEM_MC[|_FLAT32|_FLAT64]_POP_GREG_U16/32/64 */ 9961 DECL_INLINE_THROW(uint32_t) 9962 iemNativeEmitStackPopGReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGReg, 9963 uint32_t cBitsVarAndFlat, uintptr_t pfnFunction, uint8_t idxInstr) 9964 { 9965 /* 9966 * Assert sanity. 9967 */ 9968 Assert(idxGReg < 16); 9969 #ifdef VBOX_STRICT 9970 if (RT_BYTE2(cBitsVarAndFlat) != 0) 9971 { 9972 Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT 9973 || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT 9974 || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT); 9975 Assert( pfnFunction 9976 == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PopGRegU16 9977 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PopGRegU32 9978 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 16, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PopGRegU16 9979 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PopGRegU64 9980 : UINT64_C(0xc000b000a0009000) )); 9981 } 9982 else 9983 Assert( pfnFunction 9984 == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPopGRegU16 9985 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPopGRegU32 9986 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPopGRegU64 9987 : UINT64_C(0xc000b000a0009000) )); 9988 #endif 9989 9990 #ifdef VBOX_STRICT 9991 /* 9992 * Check that the fExec flags we've got make sense. 9993 */ 9994 off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec); 9995 #endif 9996 9997 /* 9998 * To keep things simple we have to commit any pending writes first as we 9999 * may end up making calls. 10000 */ 10001 /** @todo we could postpone this till we make the call and reload the 10002 * registers after returning from the call. Not sure if that's sensible or 10003 * not, though. */ 10004 off = iemNativeRegFlushPendingWrites(pReNative, off); 10005 10006 /* 10007 * Move/spill/flush stuff out of call-volatile registers. 10008 * This is the easy way out. We could contain this to the tlb-miss branch 10009 * by saving and restoring active stuff here. 10010 */ 10011 /** @todo save+restore active registers and maybe guest shadows in tlb-miss. */ 10012 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */); 10013 10014 /* For now, flush the any shadow copy of the guest register that is about 10015 to be popped and the xSP register. */ 10016 iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTREG_GPR(idxGReg)) | RT_BIT_64(X86_GREG_xSP)); 10017 10018 /* 10019 * Define labels and allocate the result register (trying for the return 10020 * register if we can). 10021 */ 10022 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++; 10023 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo); 10024 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo); 10025 10026 /* 10027 * First we try to go via the TLB. 10028 */ 10029 //pReNative->pInstrBuf[off++] = 0xcc; 10030 /** @todo later. */ 10031 RT_NOREF(cBitsVarAndFlat); 10032 10033 /* 10034 * Call helper to do the popping. 10035 */ 10036 iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off); 10037 10038 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 10039 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)); 10040 #else 10041 RT_NOREF(idxInstr); 10042 #endif 10043 10044 /* IEMNATIVE_CALL_ARG1_GREG = iGReg */ 10045 off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxGReg); 10046 10047 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 10048 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 10049 10050 /* Done setting up parameters, make the call. */ 10051 off = iemNativeEmitCallImm(pReNative, off, pfnFunction); 10052 10053 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off); 10054 10055 return off; 10056 } 10057 10058 10059 10060 /********************************************************************************************************************************* 9748 10061 * Memory mapping (IEM_MEM_MAP_XXX, IEM_MEM_FLAT_MAP_XXX). * 9749 10062 *********************************************************************************************************************************/ … … 10250 10563 } 10251 10564 10252 10253 10254 /*********************************************************************************************************************************10255 * Stack Accesses. *10256 *********************************************************************************************************************************/10257 #define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value))10258 #define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value))10259 #define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))10260 #define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value))10261 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value)10262 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value)10263 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value)10264 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value)10265 10266 #define IEM_MC_POP_GREG_U16(a_pu16Value) (*(a_pu16Value) = iemMemStackPopU16Jmp(pVCpu))10267 #define IEM_MC_POP_U32(a_pu32Value) (*(a_pu32Value) = iemMemStackPopU32Jmp(pVCpu))10268 #define IEM_MC_POP_U64(a_pu64Value) (*(a_pu64Value) = iemMemStackPopU64Jmp(pVCpu))10269 #define IEM_MC_FLAT64_POP_U16(a_pu16Value)10270 #define IEM_MC_FLAT64_POP_U64(a_pu32Value)10271 #define IEM_MC_FLAT32_POP_U16(a_pu16Value)10272 #define IEM_MC_FLAT32_POP_U32(a_pu64Value)10273 10565 10274 10566
Note:
See TracChangeset
for help on using the changeset viewer.

