VirtualBox

Changeset 103318 in vbox for trunk


Ignore:
Timestamp:
Feb 12, 2024 4:24:58 PM (8 months ago)
Author:
vboxsync
Message:

VMM/IEM: Liveness analysis, part 10: Debugging, asserting liveness state sanity, major fixes, new storage format. bugref:10372

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp

    r103257 r103318  
    5151 * BEGIN & END as well as internal workers.
    5252 */
    53 #define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
     53#ifdef IEMLIVENESS_OLD_LAYOUT
     54# define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
    5455    { \
    5556        /* Define local variables that we use to accumulate the liveness state changes in. */ \
     
    5960        IEMLIVENESSPART2 LiveMaskPart2   = { 0 }; \
    6061        bool             fDoneXpctOrCall = false
    61 
    62 
    63 #define IEM_LIVENESS_MARK_XCPT_OR_CALL() do { \
     62#else
     63# define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
     64    { \
     65        /* Define local variables that we use to accumulate the liveness state changes in. */ \
     66        IEMLIVENESSBIT  LiveStateBit0   = { 0 }; \
     67        IEMLIVENESSBIT  LiveStateBit1   = { 0 }; \
     68        IEMLIVENESSBIT  LiveMask        = { 0 }; \
     69        bool            fDoneXpctOrCall = false
     70#endif
     71
     72AssertCompile(IEMLIVENESS_STATE_INPUT == IEMLIVENESS_STATE_MASK);
     73#ifdef IEMLIVENESS_OLD_LAYOUT
     74# define IEM_LIVENESS_MARK_XCPT_OR_CALL() do { \
    6475            if (!fDoneXpctOrCall) \
    6576            { \
     
    7788            } \
    7889        } while (0)
    79 
    80 
     90#else
     91AssertCompile(IEMLIVENESSBIT0_XCPT_OR_CALL == 0 && IEMLIVENESSBIT1_XCPT_OR_CALL != 0);
     92# define IEM_LIVENESS_MARK_XCPT_OR_CALL() do { \
     93            if (!fDoneXpctOrCall) \
     94            { \
     95                LiveStateBit0.bm64 |= pIncoming->Bit0.bm64 & pIncoming->Bit1.bm64 & ~LiveMask.bm64; \
     96                LiveStateBit1.bm64 |= IEMLIVENESSBIT1_XCPT_OR_CALL; \
     97                \
     98                LiveMask.bm64   |= IEMLIVENESSBIT_MASK; /* could also use UINT64_MAX here, but makes little no(?) difference */ \
     99                fDoneXpctOrCall  = true;                /* when compiling with gcc and cl.exe on x86 - may on arm, though. */ \
     100            } \
     101        } while (0)
     102#endif
     103
     104
     105AssertCompile(IEMLIVENESS_STATE_CLOBBERED == 0);
     106#ifdef IEMLIVENESS_OLD_LAYOUT
     107# define IEM_LIVENESS_ALL_EFLAGS_CLOBBERED() do { \
     108            LiveMaskPart2.bm64  |= IEMLIVENESSPART2_ALL_EFL_MASK; \
     109        } while (0)
    81110AssertCompile(IEMLIVENESSPART1_ALL_EFL_MASK == 0);
    82 #define IEM_LIVENESS_ALL_EFLAGS_CLOBBERED() do { \
    83             LiveMaskPart2.bm64  |= IEMLIVENESSPART2_ALL_EFL_MASK; \
    84         } while (0)
    85 #define IEM_LIVENESS_ALL_EFLAGS_INPUT() do { \
     111# define IEM_LIVENESS_ALL_EFLAGS_INPUT() do { \
    86112            LiveMaskPart2.bm64  |= IEMLIVENESSPART2_ALL_EFL_MASK; \
    87113            LiveStatePart2.bm64 |= IEMLIVENESSPART2_ALL_EFL_INPUT; \
    88114        } while (0)
    89 
    90 
    91 AssertCompile(IEMLIVENESS_STATE_CLOBBERED == 0);
    92 #define IEM_LIVENESS_ONE_EFLAG_CLOBBERED(a_Name) do { \
     115#else
     116# define IEM_LIVENESS_ALL_EFLAGS_CLOBBERED() do { \
     117            LiveMask.bm64       |= IEMLIVENESSBIT_ALL_EFL_MASK; \
     118        } while (0)
     119AssertCompile(IEMLIVENESS_STATE_INPUT == IEMLIVENESS_STATE_MASK);
     120# define IEM_LIVENESS_ALL_EFLAGS_INPUT() do { \
     121            LiveStateBit0.bm64  |= IEMLIVENESSBIT_ALL_EFL_MASK; \
     122            LiveStateBit1.bm64  |= IEMLIVENESSBIT_ALL_EFL_MASK; \
     123            LiveMask.bm64       |= IEMLIVENESSBIT_ALL_EFL_MASK; \
     124        } while (0)
     125#endif
     126
     127
     128#ifdef IEMLIVENESS_OLD_LAYOUT
     129# define IEM_LIVENESS_ONE_EFLAG_CLOBBERED(a_Name) do { \
    93130            LiveMaskPart2.a_Name  |= IEMLIVENESS_STATE_MASK; \
    94131        } while (0)
    95 #define IEM_LIVENESS_ONE_EFLAG_INPUT(a_Name) do { \
     132# define IEM_LIVENESS_ONE_EFLAG_INPUT(a_Name) do { \
    96133            LiveMaskPart2.a_Name  |= IEMLIVENESS_STATE_MASK; \
    97134            LiveStatePart2.a_Name |= IEMLIVENESS_STATE_INPUT; \
    98135        } while (0)
    99 
    100 
    101 #define IEM_LIVENESS_GPR_CLOBBERED(a_idxGpr) do { \
    102             LiveMaskPart1.bmGprs  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_idxGpr) * IEMLIVENESS_STATE_BIT_COUNT); \
    103         } while (0)
    104 #define IEM_LIVENESS_GPR_INPUT(a_idxGpr) do { \
    105             LiveMaskPart1.bmGprs  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_idxGpr) * IEMLIVENESS_STATE_BIT_COUNT); \
    106             LiveStatePart1.bmGprs |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_idxGpr) * IEMLIVENESS_STATE_BIT_COUNT); \
    107         } while (0)
    108 
    109 
    110 #define IEM_LIVENESS_SEG_BASE_CLOBBERED(a_iSeg) do { \
    111             LiveMaskPart1.bmSegBase    |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    112         } while (0)
    113 #define IEM_LIVENESS_SEG_BASE_INPUT(a_iSeg) do { \
    114             LiveMaskPart1.bmSegBase    |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    115             LiveStatePart1.bmSegBase   |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    116         } while (0)
    117 
    118 
    119 #define IEM_LIVENESS_SEG_ATTRIB_CLOBBERED(a_iSeg) do { \
    120             LiveMaskPart1.bmSegAttrib  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    121         } while (0)
    122 #define IEM_LIVENESS_SEG_ATTRIB_INPUT(a_iSeg) do { \
    123             LiveMaskPart1.bmSegAttrib  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    124             LiveStatePart1.bmSegAttrib |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    125         } while (0)
    126 
    127 
    128 #define IEM_LIVENESS_SEG_LIMIT_CLOBBERED(a_iSeg) do { \
    129             LiveMaskPart2.bmSegLimit   |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    130         } while (0)
    131 #define IEM_LIVENESS_SEG_LIMIT_INPUT(a_iSeg) do { \
    132             LiveMaskPart2.bmSegLimit   |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    133             LiveStatePart2.bmSegLimit  |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    134         } while (0)
    135 
    136 
    137 #define IEM_LIVENESS_SEG_SEL_CLOBBERED(a_iSeg) do { \
    138             LiveMaskPart2.bmSegSel     |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    139         } while (0)
    140 #define IEM_LIVENESS_SEG_SEL_INPUT(a_iSeg) do { \
    141             LiveMaskPart2.bmSegSel     |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    142             LiveStatePart2.bmSegSel    |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSeg) * IEMLIVENESS_STATE_BIT_COUNT); \
    143         } while (0)
     136#else
     137# define IEM_LIVENESS_ONE_EFLAG_CLOBBERED(a_Name) do { \
     138            LiveMask.a_Name       |= 1; \
     139        } while (0)
     140# define IEM_LIVENESS_ONE_EFLAG_INPUT(a_Name) do { \
     141            LiveStateBit0.a_Name  |= 1; \
     142            LiveStateBit1.a_Name  |= 1; \
     143            LiveMask.a_Name       |= 1; \
     144        } while (0)
     145#endif
     146
     147
     148/* Generic bitmap (bmGpr, bmSegBase, ++) setters. */
     149#ifdef IEMLIVENESS_OLD_LAYOUT
     150# define IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(a_Part, a_bmMember, a_iElement) do { \
     151            LiveMaskPart##a_Part.a_bmMember  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iElement) * IEMLIVENESS_STATE_BIT_COUNT); \
     152        } while (0)
     153# define IEM_LIVENESS_BITMAP_MEMBER_INPUT(a_Part, a_bmMember, a_iElement) do { \
     154            LiveMaskPart##a_Part.a_bmMember  |= (uint32_t)IEMLIVENESS_STATE_MASK  << ((a_iElement) * IEMLIVENESS_STATE_BIT_COUNT); \
     155            LiveStatePart##a_Part.a_bmMember |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iElement) * IEMLIVENESS_STATE_BIT_COUNT); \
     156        } while (0)
     157#else
     158# define IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(a_Part, a_bmMember, a_iElement) do { \
     159            LiveMask.a_bmMember  |= RT_BIT_64(a_iElement); \
     160        } while (0)
     161# define IEM_LIVENESS_BITMAP_MEMBER_INPUT(a_Part, a_bmMember, a_iElement) do { \
     162            LiveStateBit0.a_bmMember  |= RT_BIT_64(a_iElement); \
     163            LiveStateBit1.a_bmMember  |= RT_BIT_64(a_iElement); \
     164            LiveMask.a_bmMember       |= RT_BIT_64(a_iElement); \
     165        } while (0)
     166#endif
     167
     168
     169#define IEM_LIVENESS_GPR_CLOBBERED(a_idxGpr)        IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(1, bmGprs, a_idxGpr)
     170#define IEM_LIVENESS_GPR_INPUT(a_idxGpr)            IEM_LIVENESS_BITMAP_MEMBER_INPUT(    1, bmGprs, a_idxGpr)
     171
     172
     173#define IEM_LIVENESS_SEG_BASE_CLOBBERED(a_iSeg)     IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(1, bmSegBase, a_iSeg)
     174#define IEM_LIVENESS_SEG_BASE_INPUT(a_iSeg)         IEM_LIVENESS_BITMAP_MEMBER_INPUT(    1, bmSegBase, a_iSeg)
     175
     176
     177#define IEM_LIVENESS_SEG_ATTRIB_CLOBBERED(a_iSeg)   IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(1, bmSegAttrib, a_iSeg)
     178#define IEM_LIVENESS_SEG_ATTRIB_INPUT(a_iSeg)       IEM_LIVENESS_BITMAP_MEMBER_INPUT(    1, bmSegAttrib, a_iSeg)
     179
     180
     181#define IEM_LIVENESS_SEG_LIMIT_CLOBBERED(a_iSeg)    IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(2, bmSegLimit, a_iSeg)
     182#define IEM_LIVENESS_SEG_LIMIT_INPUT(a_iSeg)        IEM_LIVENESS_BITMAP_MEMBER_INPUT(    2, bmSegLimit, a_iSeg)
     183
     184
     185#define IEM_LIVENESS_SEG_SEL_CLOBBERED(a_iSeg)      IEM_LIVENESS_BITMAP_MEMBER_CLOBBERED(2, bmSegSel, a_iSeg)
     186#define IEM_LIVENESS_SEG_SEL_INPUT(a_iSeg)          IEM_LIVENESS_BITMAP_MEMBER_INPUT(    2, bmSegSel, a_iSeg)
    144187
    145188
     
    154197
    155198#define IEM_LIVENESS_STACK() do { \
    156         IEM_LIVENESS_MEM(X86_GREG_xSP); \
     199        IEM_LIVENESS_MEM(X86_SREG_SS); \
    157200        IEM_LIVENESS_GPR_INPUT(X86_GREG_xSP); \
    158201    } while (0)
     
    164207
    165208
    166 #define IEM_LIVENESS_PC_NO_FLAGS() NOP()
    167 
    168 #define IEM_LIVENESS_PC_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL()
    169 
    170 
    171 #define IEM_MC_END() \
     209#define IEM_LIVENESS_PC_NO_FLAGS()          NOP()
     210#define IEM_LIVENESS_PC_WITH_FLAGS()        IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     211#define IEM_LIVENESS_PC16_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     212#define IEM_LIVENESS_PC32_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     213#define IEM_LIVENESS_PC64_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL()
     214#define IEM_LIVENESS_PC16_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     215#define IEM_LIVENESS_PC32_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     216#define IEM_LIVENESS_PC64_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     217
     218#ifdef IEMLIVENESS_OLD_LAYOUT
     219# define IEM_MC_END() \
    172220        /* Combine the incoming state with what we've accumulated in this block. */ \
    173221        /* We can help the compiler by skipping OR'ing when having applied XPCT_OR_CALL, */ \
     
    184232        } \
    185233    }
     234#else
     235# define IEM_MC_END() \
     236        /* Combine the incoming state with what we've accumulated in this block. */ \
     237        /* We can help the compiler by skipping OR'ing when having applied XPCT_OR_CALL, */ \
     238        /* since that already imports all the incoming state. Saves a lot with cl.exe. */ \
     239        if (!fDoneXpctOrCall) \
     240        { \
     241            pOutgoing->Bit0.bm64 = LiveStateBit0.bm64 | (~LiveMask.bm64 & pIncoming->Bit0.bm64); \
     242            pOutgoing->Bit1.bm64 = LiveStateBit1.bm64 | (~LiveMask.bm64 & pIncoming->Bit1.bm64); \
     243        } \
     244        else \
     245        { \
     246            pOutgoing->Bit0.bm64 = LiveStateBit0.bm64; \
     247            pOutgoing->Bit1.bm64 = LiveStateBit1.bm64; \
     248        } \
     249    }
     250#endif
    186251
    187252/*
     
    197262
    198263/* We don't track RIP (PC) liveness. */
    199 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal)                                  IEM_LIVENESS_PC_NO_FLAGS()
    200 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal)                                  IEM_LIVENESS_PC_NO_FLAGS()
    201 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal)                                  IEM_LIVENESS_PC_NO_FLAGS()
    202 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal)                       IEM_LIVENESS_PC_WITH_FLAGS()
    203 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal)                       IEM_LIVENESS_PC_WITH_FLAGS()
    204 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal)                       IEM_LIVENESS_PC_WITH_FLAGS()
    205 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal)                             IEM_LIVENESS_PC_NO_FLAGS()
    206 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC_NO_FLAGS()
    207 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC_NO_FLAGS()
    208 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal)                  IEM_LIVENESS_PC_WITH_FLAGS()
    209 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC_WITH_FLAGS()
    210 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC_WITH_FLAGS()
    211 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    212 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    213 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    214 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    215 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    216 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    217 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    218 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    219 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC_NO_FLAGS()
    220 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    221 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    222 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC_WITH_FLAGS()
    223 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP)                                             IEM_LIVENESS_PC_NO_FLAGS()
    224 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP)                                             IEM_LIVENESS_PC_NO_FLAGS()
    225 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP)                                             IEM_LIVENESS_PC_NO_FLAGS()
    226 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC_WITH_FLAGS()
    227 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC_WITH_FLAGS()
    228 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC_WITH_FLAGS()
    229 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP)                                            IEM_LIVENESS_PC_NO_FLAGS()
    230 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC_NO_FLAGS()
    231 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC_WITH_FLAGS()
    232 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC_WITH_FLAGS()
    233 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC_NO_FLAGS()
    234 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC_WITH_FLAGS()
     264#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal)              IEM_LIVENESS_PC_NO_FLAGS()
     265#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal)              IEM_LIVENESS_PC_NO_FLAGS()
     266#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal)              IEM_LIVENESS_PC_NO_FLAGS()
     267#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal)   IEM_LIVENESS_PC_WITH_FLAGS()
     268#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal)   IEM_LIVENESS_PC_WITH_FLAGS()
     269#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal)   IEM_LIVENESS_PC_WITH_FLAGS()
     270
     271#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal)                             IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     272#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     273#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     274#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal)                  IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     275#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     276#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     277#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     278#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     279#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     280#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     281#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     282#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     283#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     284#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     285#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     286#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     287#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     288#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     289#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP)                                             IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     290#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP)                                             IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     291#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP)                                             IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     292#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     293#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     294#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     295#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP)                                            IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     296#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     297#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     298#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     299#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     300#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    235301
    236302/* Effective address stuff is rather complicated... */
     
    369435#undef  IEM_MC_COMMIT_EFLAGS /* unused here */
    370436#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
    371         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, u2EflCf); \
    372         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, u2EflPf); \
    373         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, u2EflAf); \
    374         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, u2EflZf); \
    375         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, u2EflSf); \
    376         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, u2EflOf); \
    377         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, u2EflOther); \
     437        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, fEflCf); \
     438        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, fEflPf); \
     439        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, fEflAf); \
     440        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, fEflZf); \
     441        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, fEflSf); \
     442        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, fEflOf); \
     443        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, fEflOther); \
    378444        Assert(!(  ((a_fEflInput) | (a_fEflOutput)) \
    379445                 & ~(uint32_t)(X86_EFL_STATUS_BITS | X86_EFL_DF | X86_EFL_VM | X86_EFL_VIF | X86_EFL_IOPL))); \
     
    405471        /* IEM_MC_COMMIT_EFLAGS_EX doesn't cover input-only situations.  This OTOH, leads \
    406472           to duplication in many cases, but the compiler's optimizers should help with that. */ \
    407         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, u2EflCf); \
    408         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, u2EflPf); \
    409         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, u2EflAf); \
    410         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, u2EflZf); \
    411         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, u2EflSf); \
    412         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, u2EflOf); \
    413         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, u2EflOther); \
     473        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, fEflCf); \
     474        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, fEflPf); \
     475        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, fEflAf); \
     476        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, fEflZf); \
     477        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, fEflSf); \
     478        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, fEflOf); \
     479        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, fEflOther); \
    414480        Assert(!(  ((a_fEflInput) | (a_fEflOutput)) \
    415481                 & ~(uint32_t)(X86_EFL_STATUS_BITS | X86_EFL_DF | X86_EFL_VM | X86_EFL_VIF | X86_EFL_IOPL))); \
     
    426492#define IEM_MC_FETCH_FCW(a_u16Fcw)                                  NOP()
    427493
    428 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value)                  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
     494#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value)                  IEM_LIVENESS_GPR_INPUT(a_iGReg)
    429495#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value)                  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
    430496#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value)                  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
    431497#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value)                  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
    432 #define IEM_MC_STORE_GREG_U16_CONST(a_iGReg, a_u16Const)            IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
     498#define IEM_MC_STORE_GREG_U16_CONST(a_iGReg, a_u16Const)            IEM_LIVENESS_GPR_INPUT(a_iGReg)
    433499#define IEM_MC_STORE_GREG_U32_CONST(a_iGReg, a_u32Const)            IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
    434500#define IEM_MC_STORE_GREG_U64_CONST(a_iGReg, a_u32Const)            IEM_LIVENESS_GPR_CLOBBERED(a_iGReg)
     
    437503#define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) \
    438504    do { IEM_LIVENESS_GPR_CLOBBERED(a_iGRegLo); IEM_LIVENESS_GPR_CLOBBERED(a_iGRegHi); } while(0)
    439 #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg)                         NOP() /* ASSUMES it's never used w/o associated modifications that we flag instead */
     505#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg)                         IEM_LIVENESS_GPR_INPUT(a_iGReg) /** @todo This isn't always the case... */
    440506
    441507#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value)             IEM_LIVENESS_SEG_BASE_CLOBBERED(a_iSReg)
     
    459525        else if ((a_fEflOutput) & (a_fEfl)) IEM_LIVENESS_ONE_EFLAG_CLOBBERED(a_Member)
    460526#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) do { \
    461         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, u2EflCf); \
    462         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, u2EflPf); \
    463         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, u2EflAf); \
    464         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, u2EflZf); \
    465         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, u2EflSf); \
    466         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, u2EflOf); \
    467         IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, u2EflOther); \
     527        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_CF, fEflCf); \
     528        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_PF, fEflPf); \
     529        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_AF, fEflAf); \
     530        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_ZF, fEflZf); \
     531        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_SF, fEflSf); \
     532        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, X86_EFL_OF, fEflOf); \
     533        IEMLIVENESS_EFL_HLP(a_fEflInput, a_fEflOutput, ~X86_EFL_STATUS_BITS, fEflOther); \
    468534        Assert(!(  ((a_fEflInput) | (a_fEflOutput)) \
    469535                 & ~(uint32_t)(X86_EFL_STATUS_BITS | X86_EFL_DF | X86_EFL_VM | X86_EFL_VIF | X86_EFL_IOPL))); \
     
    529595
    530596#define IEM_MC_SET_EFL_BIT(a_fBit) do { \
    531         if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); \
    532         else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflOther); \
     597        if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); \
     598        else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); \
    533599        else { AssertFailed();           IEM_LIVENESS_ALL_EFLAG_INPUT(); } \
    534600    } while (0)
    535601#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { \
    536         if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); \
    537         else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflOther); \
     602        if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); \
     603        else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); \
    538604        else { AssertFailed();           IEM_LIVENESS_ALL_EFLAG_INPUT(); } \
    539605    } while (0)
    540606#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { \
    541         if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); \
     607        if ((a_fBit) == X86_EFL_CF)      IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); \
    542608        else { AssertFailed();           IEM_LIVENESS_ALL_EFLAG_INPUT(); } \
    543609    } while (0)
     
    810876#define IEM_MC_PUSH_U64(a_u64Value)                  IEM_LIVENESS_STACK()
    811877
    812 #define IEM_MC_POP_GREG_U16(a_iGReg)            do { IEM_LIVENESS_STACK();  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
     878#define IEM_MC_POP_GREG_U16(a_iGReg)            do { IEM_LIVENESS_STACK();  IEM_LIVENESS_GPR_INPUT(a_iGReg); } while (0)
    813879#define IEM_MC_POP_GREG_U32(a_iGReg)            do { IEM_LIVENESS_STACK();  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
    814880#define IEM_MC_POP_GREG_U64(a_iGReg)            do { IEM_LIVENESS_STACK();  IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
     
    819885#define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal)       IEM_LIVENESS_STACK_FLAT()
    820886
    821 #define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
     887#define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_INPUT(a_iGReg); } while (0)
    822888#define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
    823889
     
    826892#define IEM_MC_FLAT64_PUSH_U64(a_u64Value)           IEM_LIVENESS_STACK_FLAT()
    827893
    828 #define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
     894#define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_INPUT(a_iGReg); } while (0)
    829895#define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)     do { IEM_LIVENESS_STACK_FLAT(); IEM_LIVENESS_GPR_CLOBBERED(a_iGReg); } while (0)
    830896
     
    9641030
    9651031#define IEM_LIVENESS_ONE_STATUS_EFLAG_INPUT(a_fBit) \
    966     do { if (     (a_fBit) == X86_EFL_CF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); \
    967          else if ((a_fBit) == X86_EFL_PF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflPf); \
    968          else if ((a_fBit) == X86_EFL_AF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflAf); \
    969          else if ((a_fBit) == X86_EFL_ZF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflZf); \
    970          else if ((a_fBit) == X86_EFL_SF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflSf); \
    971          else if ((a_fBit) == X86_EFL_OF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflOf); \
    972          else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflOther); /* loadsb and friends */ \
     1032    do { if (     (a_fBit) == X86_EFL_CF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); \
     1033         else if ((a_fBit) == X86_EFL_PF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflPf); \
     1034         else if ((a_fBit) == X86_EFL_AF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflAf); \
     1035         else if ((a_fBit) == X86_EFL_ZF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflZf); \
     1036         else if ((a_fBit) == X86_EFL_SF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflSf); \
     1037         else if ((a_fBit) == X86_EFL_OF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOf); \
     1038         else if ((a_fBit) == X86_EFL_DF) IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); /* loadsb and friends */ \
    9731039         else { AssertMsgFailed(("#s (%#x)\n", #a_fBit, (a_fBit)));  IEM_LIVENESS_ALL_EFLAGS_INPUT(); } \
    9741040    } while (0)
     
    9781044#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) \
    9791045    do { if ((a_fBits) == (X86_EFL_CF | X86_EFL_ZF)) \
    980          { IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflZf); } \
     1046         { IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflZf); } \
    9811047         else { AssertMsgFailed(("#s (%#x)\n", #a_fBits, (a_fBits)));  IEM_LIVENESS_ALL_EFLAGS_INPUT(); } \
    9821048    } while (0);                                        {
    9831049#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) \
    9841050    do { if ((a_fBits) == (X86_EFL_CF | X86_EFL_ZF)) \
    985          { IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflCf); IEM_LIVENESS_ONE_EFLAG_INPUT(u2EflZf); } \
     1051         { IEM_LIVENESS_ONE_EFLAG_INPUT(fEflCf); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflZf); } \
    9861052         else { AssertMsgFailed(("#s (%#x)\n", #a_fBits, (a_fBits)));  IEM_LIVENESS_ALL_EFLAGS_INPUT(); } \
    9871053    } while (0);                                        {
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r103181 r103318  
    282282{
    283283    IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(pOutgoing, pIncoming);
     284    IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(pOutgoing, fEflOther);
    284285    RT_NOREF(pCallEntry);
    285286}
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r103236 r103318  
    35443544
    35453545
     3546#if 0 /* unused */
    35463547/**
    35473548 * Tries to locate a suitable register in the given register mask.
     
    35733574    return UINT8_MAX;
    35743575}
     3576#endif /* unused */
    35753577
    35763578
     
    35923594                                         uint32_t fRegMask = IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK)
    35933595{
     3596    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFree);
    35943597    Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
    35953598    Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
    35963599
    35973600    /*
    3598      * Try a freed register that's shadowing a guest register
     3601     * Try a freed register that's shadowing a guest register.
    35993602     */
    36003603    uint32_t fRegs = ~pReNative->Core.bmHstRegs & fRegMask;
    36013604    if (fRegs)
    36023605    {
     3606        STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeNoVar);
     3607
     3608#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     3609        /*
     3610         * When we have livness information, we use it to kick out all shadowed
     3611         * guest register that will not be needed any more in this TB.  If we're
     3612         * lucky, this may prevent us from ending up here again.
     3613         *
     3614         * Note! We must consider the previous entry here so we don't free
     3615         *       anything that the current threaded function requires (current
     3616         *       entry is produced by the next threaded function).
     3617         */
     3618        uint32_t const idxCurCall = pReNative->idxCurCall;
     3619        if (idxCurCall > 0)
     3620        {
     3621            PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall - 1];
     3622
     3623            /* Construct a mask of the guest registers in the UNUSED and XCPT_OR_CALL state. */
     3624# ifdef IEMLIVENESS_OLD_LAYOUT
     3625            AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
     3626            uint64_t fToFreeMask = 0;
     3627            uint64_t fTmp = pLivenessEntry->s1.bm64;
     3628            fTmp ^= fTmp >> 1;
     3629            for (unsigned iReg = 0; i < 32; i++)
     3630                fToFreeMask = ((fTmp >> (iReg * IEMLIVENESS_STATE_BIT_COUNT)) & 1) << iReg;
     3631
     3632            IEMLIVENESSPART2 Part2 = pLivenessEntry->s2;
     3633            Part2.fEflOther &= Part2.fEflCf;      /** @todo optimize this */
     3634            Part2.fEflOther &= Part2.fEflPf;
     3635            Part2.fEflOther &= Part2.fEflAf;
     3636            Part2.fEflOther &= Part2.fEflZf;
     3637            Part2.fEflOther &= Part2.fEflSf;
     3638            Part2.fEflOther &= Part2.fEflOf;
     3639            fTmp = pLivenessEntry->s2.bm64;
     3640            fTmp ^= fTmp >> 1;
     3641            for (unsigned iReg = 0; i < IEMLIVENESSPART2_REG_COUNT - 6; i++)
     3642                fToFreeMask = ((fTmp >> (iReg * IEMLIVENESS_STATE_BIT_COUNT)) & 1) << (iReg + 32);
     3643# else
     3644            AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
     3645#  if 0
     3646            IEMLIVENESSBIT Tmp = { pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64 }; /* mask of regs in either UNUSED */
     3647            Tmp.fEflOther &= Tmp.fEflCf; /** @todo optimize this (pair of 3 (status), pair of 4 (in other), pair of 2, pair of 1). */
     3648            Tmp.fEflOther &= Tmp.fEflPf;
     3649            Tmp.fEflOther &= Tmp.fEflAf;
     3650            Tmp.fEflOther &= Tmp.fEflZf;
     3651            Tmp.fEflOther &= Tmp.fEflSf;
     3652            Tmp.fEflOther &= Tmp.fEflOf;
     3653            Tmp.fEflCf     = 0; /* not necessary, but better safe. */
     3654            Tmp.fEflPf     = 0;
     3655            Tmp.fEflAf     = 0;
     3656            Tmp.fEflZf     = 0;
     3657            Tmp.fEflSf     = 0;
     3658            Tmp.fEflOf     = 0;
     3659            uint64_t fToFreeMask = Tmp.bm64;
     3660#  else
     3661            uint64_t fToFreeMask = pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64; /* mask of regs in either UNUSED */
     3662            uint64_t fTmp = fToFreeMask & (fToFreeMask >> 3);   /* AF2,PF2,CF2,Other2 = AF,PF,CF,Other & OF,SF,ZF,AF */
     3663            fTmp &= fTmp >> 2;                                  /*         CF3,Other3 = AF2,PF2 & CF2,Other2  */
     3664            fTmp &= fTmp >> 1;                                  /*             Other4 = CF3 & Other3 */
     3665            fToFreeMask &= RT_BIT_64(kIemNativeGstReg_EFlags) - 1;
     3666            fToFreeMask |= fTmp & RT_BIT_64(kIemNativeGstReg_EFlags);
     3667#  endif
     3668# endif
     3669
     3670            /* If it matches any shadowed registers. */
     3671            if (pReNative->Core.bmGstRegShadows & fToFreeMask)
     3672            {
     3673                STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed);
     3674                iemNativeRegFlushGuestShadows(pReNative, fToFreeMask);
     3675                Assert(fRegs == (~pReNative->Core.bmHstRegs & fRegMask)); /* this shall not change. */
     3676
     3677                /* See if we've got any unshadowed registers we can return now. */
     3678                uint32_t const fUnshadowedRegs = fRegs & ~pReNative->Core.bmHstRegsWithGstShadow;
     3679                if (fUnshadowedRegs)
     3680                {
     3681                    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped);
     3682                    return (fPreferVolatile
     3683                            ? ASMBitFirstSetU32(fUnshadowedRegs)
     3684                            : ASMBitLastSetU32(  fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
     3685                                               ? fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fUnshadowedRegs))
     3686                         - 1;
     3687                }
     3688            }
     3689        }
     3690#endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
     3691
    36033692        unsigned const idxReg = (fPreferVolatile
    36043693                                 ? ASMBitFirstSetU32(fRegs)
    36053694                                 : ASMBitLastSetU32(  fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
    3606                                                     ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK: fRegs))
     3695                                                    ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs))
    36073696                              - 1;
    36083697
     
    36243713     * saved on the stack, then in the second round move things to the stack.
    36253714     */
     3715    STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeVar);
    36263716    for (uint32_t iLoop = 0; iLoop < 2; iLoop++)
    36273717    {
     
    37493839    /** @todo Implement basic variable liveness analysis (python) so variables
    37503840     * can be freed immediately once no longer used.  This has the potential to
    3751      * be trashing registers and stack for dead variables. */
     3841     * be trashing registers and stack for dead variables.
     3842     * Update: This is mostly done. (Not IEMNATIVE_WITH_LIVENESS_ANALYSIS.) */
    37523843
    37533844    /*
     
    38183909        Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
    38193910        Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
     3911        Log12(("iemNativeRegAllocTmp: %s\n", g_apszIemNativeHstRegNames[idxReg]));
    38203912    }
    38213913    else
     
    38233915        idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile);
    38243916        AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
     3917        Log12(("iemNativeRegAllocTmp: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
    38253918    }
    38263919    return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
     
    38673960        Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
    38683961        Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
     3962        Log12(("iemNativeRegAllocTmpEx: %s\n", g_apszIemNativeHstRegNames[idxReg]));
    38693963    }
    38703964    else
     
    38723966        idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask);
    38733967        AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
     3968        Log12(("iemNativeRegAllocTmpEx: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
    38743969    }
    38753970    return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
     
    39084003
    39094004/**
     4005 * Helper for iemNativeLivenessGetStateByGstReg.
     4006 *
     4007 * @returns IEMLIVENESS_STATE_XXX
     4008 * @param   fMergedStateExp2    This is the RT_BIT_32() of each sub-state
     4009 *                              ORed together.
     4010 */
     4011DECL_FORCE_INLINE(uint32_t)
     4012iemNativeLivenessMergeExpandedEFlagsState(uint32_t fMergedStateExp2)
     4013{
     4014    /* INPUT trumps anything else. */
     4015    if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_INPUT))
     4016        return IEMLIVENESS_STATE_INPUT;
     4017
     4018    /* CLOBBERED trumps XCPT_OR_CALL and UNUSED. */
     4019    if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_CLOBBERED))
     4020    {
     4021        /* If not all sub-fields are clobbered they must be considered INPUT. */
     4022        if (fMergedStateExp2 & (RT_BIT_32(IEMLIVENESS_STATE_UNUSED) | RT_BIT_32(IEMLIVENESS_STATE_XCPT_OR_CALL)))
     4023            return IEMLIVENESS_STATE_INPUT;
     4024        return IEMLIVENESS_STATE_CLOBBERED;
     4025    }
     4026
     4027    /* XCPT_OR_CALL trumps UNUSED. */
     4028    if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_XCPT_OR_CALL))
     4029        return IEMLIVENESS_STATE_XCPT_OR_CALL;
     4030
     4031    return IEMLIVENESS_STATE_UNUSED;
     4032}
     4033
     4034#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     4035
     4036DECL_FORCE_INLINE(uint32_t)
     4037iemNativeLivenessGetStateByGstRegEx(PCIEMLIVENESSENTRY pLivenessEntry, unsigned enmGstRegEx)
     4038{
     4039#ifdef IEMLIVENESS_OLD_LAYOUT
     4040    if ((unsigned)enmGstRegEx < 32)
     4041        return (pLivenessEntry->s1.bm64 >> (enmGstRegEx * IEMLIVENESS_STATE_BIT_COUNT)) & IEMLIVENESS_STATE_MASK;
     4042    return (pLivenessEntry->s2.bm64 >> ((enmGstRegEx - 32) * IEMLIVENESS_STATE_BIT_COUNT)) & IEMLIVENESS_STATE_MASK;
     4043#else
     4044    return ((pLivenessEntry->Bit0.bm64 >> enmGstRegEx) & 1)
     4045         | (((pLivenessEntry->Bit1.bm64 >> enmGstRegEx) << 1) & 2);
     4046#endif
     4047}
     4048
     4049
     4050DECL_FORCE_INLINE(uint32_t)
     4051iemNativeLivenessGetStateByGstReg(PCIEMLIVENESSENTRY pLivenessEntry, IEMNATIVEGSTREG enmGstReg)
     4052{
     4053#ifdef IEMLIVENESS_OLD_LAYOUT
     4054    uint32_t uRet;
     4055    if ((unsigned)enmGstReg < 32)
     4056        uRet = (pLivenessEntry->s1.bm64 >> ((unsigned)enmGstReg * IEMLIVENESS_STATE_BIT_COUNT)) & IEMLIVENESS_STATE_MASK;
     4057    else
     4058    {
     4059        uRet = (pLivenessEntry->s2.bm64 >> (((unsigned)enmGstReg - 32) * IEMLIVENESS_STATE_BIT_COUNT)) & IEMLIVENESS_STATE_MASK;
     4060        if (enmGstReg == kIemNativeGstReg_EFlags)
     4061        {
     4062            /* Merge the eflags states to one. */
     4063            uRet  = RT_BIT_32(uRet);
     4064            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflCf);
     4065            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflPf);
     4066            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflAf);
     4067            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflZf);
     4068            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflSf);
     4069            uRet |= RT_BIT_32(pLivenessEntry->s2.fEflOf);
     4070            uRet  = iemNativeLivenessMergeExpandedEFlagsState(uRet);
     4071        }
     4072    }
     4073#else
     4074    uint32_t uRet = ((pLivenessEntry->Bit0.bm64 >> (unsigned)enmGstReg) & 1)
     4075                  | (((pLivenessEntry->Bit1.bm64 >> (unsigned)enmGstReg) << 1) & 2);
     4076    if (enmGstReg == kIemNativeGstReg_EFlags)
     4077    {
     4078        /* Merge the eflags states to one. */
     4079        uRet  = RT_BIT_32(uRet);
     4080        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflCf | (pLivenessEntry->Bit1.fEflCf << 1));
     4081        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflPf | (pLivenessEntry->Bit1.fEflPf << 1));
     4082        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflAf | (pLivenessEntry->Bit1.fEflAf << 1));
     4083        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflZf | (pLivenessEntry->Bit1.fEflZf << 1));
     4084        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflSf | (pLivenessEntry->Bit1.fEflSf << 1));
     4085        uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflOf | (pLivenessEntry->Bit1.fEflOf << 1));
     4086        uRet  = iemNativeLivenessMergeExpandedEFlagsState(uRet);
     4087    }
     4088#endif
     4089    return uRet;
     4090}
     4091
     4092
     4093# ifdef VBOX_STRICT
     4094/** For assertions only, user checks that idxCurCall isn't zerow. */
     4095DECL_FORCE_INLINE(uint32_t)
     4096iemNativeLivenessGetPrevStateByGstReg(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg)
     4097{
     4098    return iemNativeLivenessGetStateByGstReg(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], enmGstReg);
     4099}
     4100# endif /* VBOX_STRICT */
     4101
     4102#endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
     4103
     4104/**
    39104105 * Marks host register @a idxHstReg as containing a shadow copy of guest
    39114106 * register @a enmGstReg.
     
    40014196
    40024197
     4198#if 0 /* unused */
    40034199/**
    40044200 * Clear any guest register shadow claim for @a enmGstReg.
     
    40144210    }
    40154211}
     4212#endif
    40164213
    40174214
     
    40194216 * Clear any guest register shadow claim for @a enmGstReg and mark @a idxHstRegNew
    40204217 * as the new shadow of it.
     4218 *
     4219 * Unlike the other guest reg shadow helpers, this does the logging for you.
     4220 * However, it is the liveness state is not asserted here, the caller must do
     4221 * that.
    40214222 */
    40224223DECL_FORCE_INLINE(void)
     
    40274228    if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
    40284229    {
    4029         Assert(pReNative->Core.aidxGstRegShadows[enmGstReg] < RT_ELEMENTS(pReNative->Core.aHstRegs));
    4030         if (pReNative->Core.aidxGstRegShadows[enmGstReg] == idxHstRegNew)
     4230        uint8_t const idxHstRegOld = pReNative->Core.aidxGstRegShadows[enmGstReg];
     4231        Assert(idxHstRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs));
     4232        if (idxHstRegOld == idxHstRegNew)
    40314233            return;
     4234        Log12(("iemNativeRegClearAndMarkAsGstRegShadow: %s for guest %s (from %s)\n", g_apszIemNativeHstRegNames[idxHstRegNew],
     4235               g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstRegOld]));
    40324236        iemNativeRegClearGstRegShadowingOne(pReNative, pReNative->Core.aidxGstRegShadows[enmGstReg], enmGstReg, off);
    40334237    }
     4238    else
     4239        Log12(("iemNativeRegClearAndMarkAsGstRegShadow: %s for guest %s\n", g_apszIemNativeHstRegNames[idxHstRegNew],
     4240               g_aGstShadowInfo[enmGstReg].pszName));
    40344241    iemNativeRegMarkAsGstRegShadow(pReNative, idxHstRegNew, enmGstReg, off);
    40354242}
     
    40924299 *                          registers, so this is only applied if we allocate a
    40934300 *                          new register.
     4301 * @param   fSkipLivenessAssert     Hack for liveness input validation of EFLAGS.
    40944302 * @sa      iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
    40954303 */
     
    40974305iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg,
    40984306                                IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/,
    4099                                 bool fNoVolatileRegs /*= false*/)
     4307                                bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/)
    41004308{
    41014309    Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
     4310#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     4311    AssertMsg(   fSkipLivenessAssert
     4312              || pReNative->idxCurCall == 0
     4313              || enmGstReg == kIemNativeGstReg_Pc
     4314              || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
     4315                  ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
     4316                  : IEMLIVENESS_STATE_IS_ACCESS_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ),
     4317              ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
     4318#endif
     4319    RT_NOREF(fSkipLivenessAssert);
    41024320#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
    41034321    static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
     
    41654383            {
    41664384                Assert(fNoVolatileRegs);
    4167                 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask,
     4385                uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxReg),
    41684386                                                                    !fNoVolatileRegs
    41694387                                                                 && enmIntendedUse == kIemNativeGstRegUse_Calculation);
     
    42694487{
    42704488    Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
     4489#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     4490    AssertMsg(   pReNative->idxCurCall == 0
     4491              || IEMLIVENESS_STATE_IS_ACCESS_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
     4492              || enmGstReg == kIemNativeGstReg_Pc,
     4493              ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
     4494#endif
    42714495
    42724496    /*
     
    52835507#endif
    52845508
    5285     /** @todo implement expand down/whatnot checking */
    52865509    AssertStmt(idxSegReg == X86_SREG_CS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_CASE_NOT_IMPLEMENTED_1));
    52875510
     
    59376160    AssertCompile(   (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)
    59386161                  <= UINT32_MAX);
     6162#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     6163    AssertMsg(   pReNative->idxCurCall == 0
     6164              || IEMLIVENESS_STATE_IS_ACCESS_EXPECTED(iemNativeLivenessGetStateByGstRegEx(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], kIemNativeGstReg_EFlags/*_Other*/)),
     6165              ("Efl_Other - %u\n", iemNativeLivenessGetStateByGstRegEx(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], kIemNativeGstReg_EFlags/*_Other*/)));
     6166#endif
     6167
    59396168    uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
    5940                                                              kIemNativeGstRegUse_ForUpdate);
     6169                                                              kIemNativeGstRegUse_ForUpdate, false /*fNoVolatileRegs*/,
     6170                                                              true /*fSkipLivenessAssert*/);
    59416171    off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(pReNative, off, idxEflReg,
    59426172                                                             X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK,
     
    61856415
    61866416    /* Perform limit checking, potentially raising #GP(0) and exit the TB. */
     6417/** @todo we can skip this in 32-bit FLAT mode. */
    61876418    off = iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, X86_SREG_CS, idxInstr);
    61886419
     
    63396570    /* Check limit (may #GP(0) + exit TB). */
    63406571    if (!f64Bit)
     6572/** @todo we can skip this test in FLAT 32-bit mode. */
    63416573        off = iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, X86_SREG_CS, idxInstr);
    63426574    /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
     
    95979829    if (fEflOutput)
    95989830    {
     9831        PVMCPUCC const pVCpu = pReNative->pVCpu;
     9832# ifdef IEMLIVENESS_OLD_LAYOUT
    95999833        IEMLIVENESSPART2 const LivenessInfo2 = pReNative->paLivenessEntries[pReNative->idxCurCall].s2;
    9600         PVMCPUCC const pVCpu = pReNative->pVCpu;
    9601 # define CHECK_FLAG_AND_UPDATE_STATS(a_fEfl, a_u2LivenessMember, a_CoreStatName) \
     9834#  define CHECK_FLAG_AND_UPDATE_STATS(a_fEfl, a_fLivenessMember, a_CoreStatName) \
    96029835            if (fEflOutput & (a_fEfl)) \
    96039836            { \
    9604                 if (LivenessInfo2.a_u2LivenessMember != IEMLIVENESS_STATE_CLOBBERED) \
     9837                if (LivenessInfo2.a_fLivenessMember != IEMLIVENESS_STATE_CLOBBERED) \
    96059838                    STAM_COUNTER_INC(&pVCpu->iem.s. a_CoreStatName ## Required); \
    96069839                else \
    96079840                    STAM_COUNTER_INC(&pVCpu->iem.s. a_CoreStatName ## Skippable); \
    96089841            } else do { } while (0)
    9609         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_CF, u2EflCf, StatNativeLivenessEflCf);
    9610         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_PF, u2EflPf, StatNativeLivenessEflPf);
    9611         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_AF, u2EflAf, StatNativeLivenessEflAf);
    9612         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_ZF, u2EflZf, StatNativeLivenessEflZf);
    9613         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_SF, u2EflSf, StatNativeLivenessEflSf);
    9614         CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_OF, u2EflOf, StatNativeLivenessEflOf);
    9615         CHECK_FLAG_AND_UPDATE_STATS(~X86_EFL_STATUS_BITS, u2EflOther, StatNativeLivenessEflOther);
     9842# else
     9843        IEMLIVENESSBIT const LivenessBit0 = pReNative->paLivenessEntries[pReNative->idxCurCall].Bit0;
     9844        IEMLIVENESSBIT const LivenessBit1 = pReNative->paLivenessEntries[pReNative->idxCurCall].Bit1;
     9845        AssertCompile(IEMLIVENESS_STATE_CLOBBERED == 0);
     9846#  define CHECK_FLAG_AND_UPDATE_STATS(a_fEfl, a_fLivenessMember, a_CoreStatName) \
     9847            if (fEflOutput & (a_fEfl)) \
     9848            { \
     9849                if (LivenessBit0.a_fLivenessMember | LivenessBit1.a_fLivenessMember) \
     9850                    STAM_COUNTER_INC(&pVCpu->iem.s. a_CoreStatName ## Required); \
     9851                else \
     9852                    STAM_COUNTER_INC(&pVCpu->iem.s. a_CoreStatName ## Skippable); \
     9853            } else do { } while (0)
     9854# endif
     9855        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_CF, fEflCf, StatNativeLivenessEflCf);
     9856        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_PF, fEflPf, StatNativeLivenessEflPf);
     9857        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_AF, fEflAf, StatNativeLivenessEflAf);
     9858        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_ZF, fEflZf, StatNativeLivenessEflZf);
     9859        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_SF, fEflSf, StatNativeLivenessEflSf);
     9860        CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_OF, fEflOf, StatNativeLivenessEflOf);
     9861        CHECK_FLAG_AND_UPDATE_STATS(~X86_EFL_STATUS_BITS, fEflOther, StatNativeLivenessEflOther);
    96169862# undef CHECK_FLAG_AND_UPDATE_STATS
    96179863    }
     
    96229868#undef  IEM_MC_FETCH_EFLAGS /* should not be used */
    96239869#define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \
    9624     off = iemNativeEmitFetchEFlags(pReNative, off, a_EFlags)
     9870    off = iemNativeEmitFetchEFlags(pReNative, off, a_EFlags, a_fEflInput, a_fEflOutput)
    96259871
    96269872/** Handles IEM_MC_FETCH_EFLAGS_EX. */
    96279873DECL_INLINE_THROW(uint32_t)
    9628 iemNativeEmitFetchEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags)
     9874iemNativeEmitFetchEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags,
     9875                         uint32_t fEflInput, uint32_t fEflOutput)
    96299876{
    96309877    IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarEFlags);
    96319878    Assert(pReNative->Core.aVars[idxVarEFlags].cbVar == sizeof(uint32_t));
    9632 
     9879    RT_NOREF(fEflInput, fEflOutput);
     9880
     9881#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     9882# ifdef VBOX_STRICT
     9883    if (   pReNative->idxCurCall != 0
     9884        && (fEflInput != 0 || fEflOutput != 0) /* for NOT these are both zero for now. */)
     9885    {
     9886        PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall - 1];
     9887        uint32_t const           fBoth          = fEflInput | fEflOutput;
     9888# define ASSERT_ONE_EFL(a_fElfConst, a_offField) \
     9889            AssertMsg(   !(fBoth & (a_fElfConst)) \
     9890                      || (!(fEflInput & (a_fElfConst)) \
     9891                          ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, kIemNativeGstReg_EFlags + (a_offField))) \
     9892                          : IEMLIVENESS_STATE_IS_ACCESS_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, kIemNativeGstReg_EFlags + (a_offField))) ), \
     9893                      ("%s - %u\n", #a_fElfConst, iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, kIemNativeGstReg_EFlags + (a_offField))))
     9894        ASSERT_ONE_EFL(~(uint32_t)X86_EFL_STATUS_BITS, 0);
     9895        ASSERT_ONE_EFL(X86_EFL_CF, 1);
     9896        ASSERT_ONE_EFL(X86_EFL_PF, 2);
     9897        ASSERT_ONE_EFL(X86_EFL_AF, 3);
     9898        ASSERT_ONE_EFL(X86_EFL_ZF, 4);
     9899        ASSERT_ONE_EFL(X86_EFL_SF, 5);
     9900        ASSERT_ONE_EFL(X86_EFL_OF, 6);
     9901# undef ASSERT_ONE_EFL
     9902    }
     9903# endif
     9904#endif
     9905
     9906    /** @todo this is suboptimial. EFLAGS is probably shadowed and we should use
     9907     *        the existing shadow copy. */
    96339908    uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, false /*fInitialized*/);
    96349909    iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxReg, kIemNativeGstReg_EFlags, off);
     
    96469921#define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \
    96479922    IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \
    9648     off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags)
     9923    off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput)
    96499924
    96509925/** Handles IEM_MC_COMMIT_EFLAGS_EX. */
    96519926DECL_INLINE_THROW(uint32_t)
    9652 iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags)
     9927iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, uint32_t fEflOutput)
    96539928{
    96549929    IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarEFlags);
    96559930    Assert(pReNative->Core.aVars[idxVarEFlags].cbVar == sizeof(uint32_t));
     9931    RT_NOREF(fEflOutput);
    96569932
    96579933    uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, true /*fInitialized*/);
     
    96699945    off = iemNativeEmitBrk(pReNative, off, UINT32_C(0x2002));
    96709946    iemNativeFixupFixedJump(pReNative, offFixup, off);
     9947
     9948    /** @todo validate that only bits in the fElfOutput mask changed. */
    96719949#endif
    96729950
     
    1205412332        if (cbMem >= sizeof(uint32_t))
    1205512333        {
     12334#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
     12335            AssertMsg(   pReNative->idxCurCall == 0
     12336                      || IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, IEMNATIVEGSTREG_GPR(idxGReg))),
     12337                      ("%s - %u\n", g_aGstShadowInfo[idxGReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, IEMNATIVEGSTREG_GPR(idxGReg))));
     12338#endif
    1205612339            iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult,  IEMNATIVEGSTREG_GPR(idxGReg), off);
    1205712340            off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult,
     
    1343713720        /* The initial (final) entry. */
    1343813721        idxCall--;
     13722# ifdef IEMLIVENESS_OLD_LAYOUT
    1343913723        paLivenessEntries[idxCall].s1.bm64 = IEMLIVENESSPART1_ALL_UNUSED;
    1344013724        paLivenessEntries[idxCall].s2.bm64 = IEMLIVENESSPART2_ALL_UNUSED;
     13725# else
     13726        paLivenessEntries[idxCall].Bit0.bm64 = IEMLIVENESSBIT0_ALL_UNUSED;
     13727        paLivenessEntries[idxCall].Bit1.bm64 = IEMLIVENESSBIT1_ALL_UNUSED;
     13728# endif
    1344113729
    1344213730        /* Loop backwards thru the calls and fill in the other entries. */
     
    1344613734            PFNIEMNATIVELIVENESSFUNC const pfnLiveness = g_apfnIemNativeLivenessFunctions[pCallEntry->enmFunction];
    1344713735            if (pfnLiveness)
    13448                 pfnLiveness(pCallEntry, &paLivenessEntries[idxCall - 1], &paLivenessEntries[idxCall]);
     13736                pfnLiveness(pCallEntry, &paLivenessEntries[idxCall], &paLivenessEntries[idxCall - 1]);
    1344913737            else
    1345013738                IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(&paLivenessEntries[idxCall - 1], &paLivenessEntries[idxCall]);
     
    1354513833            Assert(pReNative->cCondDepth == 0);
    1354613834
     13835#if defined(LOG_ENABLED) && defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS)
     13836            if (LogIs2Enabled())
     13837            {
     13838                PCIEMLIVENESSENTRY pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall];
     13839                static const char s_achState[] = "CUXI";
     13840
     13841                char szGpr[17];
     13842                for (unsigned i = 0; i < 16; i++)
     13843                    szGpr[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_GprFirst)];
     13844                szGpr[16] = '\0';
     13845
     13846                char szSegBase[X86_SREG_COUNT + 1];
     13847                char szSegLimit[X86_SREG_COUNT + 1];
     13848                char szSegAttrib[X86_SREG_COUNT + 1];
     13849                char szSegSel[X86_SREG_COUNT + 1];
     13850                for (unsigned i = 0; i < X86_SREG_COUNT; i++)
     13851                {
     13852                    szSegBase[i]   = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegBaseFirst)];
     13853                    szSegAttrib[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegAttribFirst)];
     13854                    szSegLimit[i]  = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegLimitFirst)];
     13855                    szSegSel[i]    = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegSelFirst)];
     13856                }
     13857                szSegBase[X86_SREG_COUNT] = szSegAttrib[X86_SREG_COUNT] = szSegLimit[X86_SREG_COUNT]
     13858                    = szSegSel[X86_SREG_COUNT] = '\0';
     13859
     13860                char szEFlags[8];
     13861                for (unsigned i = 0; i < 7; i++)
     13862                    szEFlags[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_EFlags)];
     13863                szEFlags[7] = '\0';
     13864
     13865                Log2(("liveness: grp=%s segbase=%s segattr=%s seglim=%s segsel=%s efl=%s\n",
     13866                      szGpr, szSegBase, szSegAttrib, szSegLimit, szSegSel, szEFlags));
     13867            }
     13868#endif
     13869
    1354713870            /*
    1354813871             * Advance.
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r103234 r103318  
    435435# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
    436436#  ifdef VBOX_WITH_STATISTICS
     437        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     438                        "Number of calls to iemNativeRegAllocFindFree.",
     439                        "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
     440#  endif
     441        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     442                        "Number of times iemNativeRegAllocFindFree needed to free a variable.",
     443                        "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
     444#  ifdef VBOX_WITH_STATISTICS
     445        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     446                        "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
     447                        "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
     448        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     449                        "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
     450                        "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
     451        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     452                        "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
     453                        "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
     454
    437455        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating",    "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
    438456        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating",    "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r103267 r103318  
    17541754    STAMCOUNTER             StatNativeCodeTlbHitsForNewPageWithOffset;
    17551755
     1756    /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
     1757    STAMCOUNTER             StatNativeRegFindFree;
     1758    /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
     1759     *  to free a variable. */
     1760    STAMCOUNTER             StatNativeRegFindFreeVar;
     1761    /** Native recompiler: Number of times iemNativeRegAllocFindFree did
     1762     *  not need to free any variables. */
     1763    STAMCOUNTER             StatNativeRegFindFreeNoVar;
     1764    /** Native recompiler: Liveness info freed shadowed guest registers in
     1765     * iemNativeRegAllocFindFree. */
     1766    STAMCOUNTER             StatNativeRegFindFreeLivenessUnshadowed;
     1767    /** Native recompiler: Liveness info helped with the allocation in
     1768     *  iemNativeRegAllocFindFree. */
     1769    STAMCOUNTER             StatNativeRegFindFreeLivenessHelped;
     1770
    17561771    /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
    17571772    STAMCOUNTER             StatNativeLivenessEflCfSkippable;
     
    17831798    STAMCOUNTER             StatNativeLivenessEflOtherRequired;
    17841799
    1785     uint64_t                au64Padding[5];
     1800    //uint64_t                au64Padding[3];
    17861801    /** @} */
    17871802
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r103181 r103318  
    381381typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
    382382
     383//#define IEMLIVENESS_OLD_LAYOUT
     384#ifdef IEMLIVENESS_OLD_LAYOUT
    383385
    384386typedef union IEMLIVENESSPART1
     
    388390    {                                     /*   bit no */
    389391        uint64_t    bmGprs      : 32;   /**< 0x00 /  0: The 16 general purpose registers. */
    390         uint64_t    u2UnusedPc  : 2;    /**< 0x20 / 32: (PC in ) */
     392        uint64_t    fUnusedPc   : 2;    /**< 0x20 / 32: (PC in ) */
    391393        uint64_t    u6Padding   : 6;    /**< 0x22 / 34: */
    392394        uint64_t    bmSegBase   : 12;   /**< 0x28 / 40: */
     
    403405        uint64_t    bmSegLimit  : 12;   /**< 0x40 / 64: */
    404406        uint64_t    bmSegSel    : 12;   /**< 0x4c / 76: */
    405         uint64_t    u2EflOther  : 2;    /**< 0x58 / 88: Other EFLAGS bits   (~X86_EFL_STATUS_BITS & X86_EFL_LIVE_MASK). */
    406         uint64_t    u2EflCf     : 2;    /**< 0x5a / 90: Carry flag          (X86_EFL_CF / 0). */
    407         uint64_t    u2EflPf     : 2;    /**< 0x5c / 92: Parity flag         (X86_EFL_PF / 2). */
    408         uint64_t    u2EflAf     : 2;    /**< 0x5e / 94: Auxilary carry flag (X86_EFL_AF / 4). */
    409         uint64_t    u2EflZf     : 2;    /**< 0x60 / 96: Zero flag           (X86_EFL_ZF / 6). */
    410         uint64_t    u2EflSf     : 2;    /**< 0x62 / 98: Signed flag         (X86_EFL_SF / 7). */
    411         uint64_t    u2EflOf     : 2;    /**< 0x64 /100: Overflow flag       (X86_EFL_OF / 12). */
    412         uint64_t    u24Unused   : 24;     /* 0x66 /102 -> 0x80/128 */
     407        uint64_t    fEflOther   : 2;    /**< 0x58 / 88: Other EFLAGS bits   (~X86_EFL_STATUS_BITS & X86_EFL_LIVE_MASK). First! */
     408        uint64_t    fEflCf      : 2;    /**< 0x5a / 90: Carry flag          (X86_EFL_CF / 0). */
     409        uint64_t    fEflPf      : 2;    /**< 0x5c / 92: Parity flag         (X86_EFL_PF / 2). */
     410        uint64_t    fEflAf      : 2;    /**< 0x5e / 94: Auxilary carry flag (X86_EFL_AF / 4). */
     411        uint64_t    fEflZf      : 2;    /**< 0x60 / 96: Zero flag           (X86_EFL_ZF / 6). */
     412        uint64_t    fEflSf      : 2;    /**< 0x62 / 98: Signed flag         (X86_EFL_SF / 7). */
     413        uint64_t    fEflOf      : 2;    /**< 0x64 /100: Overflow flag       (X86_EFL_OF / 12). */
     414        uint64_t    u24Unused   : 26;     /* 0x66 /102 -> 0x80/128 */
    413415    };
    414416} IEMLIVENESSPART2;
    415417AssertCompileSize(IEMLIVENESSPART2, 8);
     418# define IEMLIVENESSPART2_REG_COUNT 19
     419
     420#else
     421
     422/**
     423 * One bit of the state.
     424 *
     425 * Each register state takes up two bits.  We keep the two bits in two separate
     426 * 64-bit words to simplify applying them to the guest shadow register mask in
     427 * the register allocator.
     428 */
     429typedef union IEMLIVENESSBIT
     430{
     431    uint64_t        bm64;
     432    RT_GCC_EXTENSION struct
     433    {                                     /*   bit no */
     434        uint64_t    bmGprs      : 16;   /**< 0x00 /  0: The 16 general purpose registers. */
     435        uint64_t    fUnusedPc   :  1;   /**< 0x10 / 16: (PC in ) */
     436        uint64_t    uPadding1   :  3;   /**< 0x11 / 17: */
     437        uint64_t    bmSegBase   :  6;   /**< 0x14 / 20: */
     438        uint64_t    bmSegAttrib :  6;   /**< 0x1a / 26: */
     439        uint64_t    bmSegLimit  :  6;   /**< 0x20 / 32: */
     440        uint64_t    bmSegSel    :  6;   /**< 0x26 / 38: */
     441        uint64_t    fEflOther   :  1;   /**< 0x2c / 44: Other EFLAGS bits   (~X86_EFL_STATUS_BITS & X86_EFL_LIVE_MASK). First! */
     442        uint64_t    fEflCf      :  1;   /**< 0x2d / 45: Carry flag          (X86_EFL_CF / 0). */
     443        uint64_t    fEflPf      :  1;   /**< 0x2e / 46: Parity flag         (X86_EFL_PF / 2). */
     444        uint64_t    fEflAf      :  1;   /**< 0x2f / 47: Auxilary carry flag (X86_EFL_AF / 4). */
     445        uint64_t    fEflZf      :  1;   /**< 0x30 / 48: Zero flag           (X86_EFL_ZF / 6). */
     446        uint64_t    fEflSf      :  1;   /**< 0x31 / 49: Signed flag         (X86_EFL_SF / 7). */
     447        uint64_t    fEflOf      :  1;   /**< 0x32 / 50: Overflow flag       (X86_EFL_OF / 12). */
     448        uint64_t    uUnused     : 13;     /* 0x33 / 51 -> 0x40/64 */
     449    };
     450} IEMLIVENESSBIT;
     451AssertCompileSize(IEMLIVENESSBIT, 8);
     452
     453#endif
    416454
    417455/**
     
    430468    RT_GCC_EXTENSION struct
    431469    {
     470#ifdef IEMLIVENESS_OLD_LAYOUT
    432471        IEMLIVENESSPART1 s1;
    433472        IEMLIVENESSPART2 s2;
     473#else
     474        /** Bit \#0 of the register states. */
     475        IEMLIVENESSBIT Bit0;
     476        /** Bit \#1 of the register states. */
     477        IEMLIVENESSBIT Bit1;
     478#endif
    434479    };
    435480} IEMLIVENESSENTRY;
     
    442487/** @name 64-bit value masks for IEMLIVENESSENTRY.
    443488 * @{ */                                      /*         0xzzzzyyyyxxxxwwww */
    444 #define IEMLIVENESSPART1_MASK                   UINT64_C(0xffffff00ffffffff)
    445 #define IEMLIVENESSPART2_MASK                   UINT64_C(0x0000003fffffffff)
    446 
    447 #define IEMLIVENESSPART1_XCPT_OR_CALL           UINT64_C(0xaaaaaa00aaaaaaaa)
    448 #define IEMLIVENESSPART2_XCPT_OR_CALL           UINT64_C(0x0000002aaaaaaaaa)
    449 
    450 #define IEMLIVENESSPART1_ALL_UNUSED             UINT64_C(0x5555550055555555)
    451 #define IEMLIVENESSPART2_ALL_UNUSED             UINT64_C(0x0000001555555555)
    452 
    453 #define IEMLIVENESSPART1_ALL_EFL_MASK           UINT64_C(0x0000000000000000)
    454 #define IEMLIVENESSPART2_ALL_EFL_MASK           UINT64_C(0x0000003fff000000)
    455 
    456 #define IEMLIVENESSPART1_ALL_EFL_INPUT          IEMLIVENESSPART1_ALL_EFL_MASK
    457 #define IEMLIVENESSPART2_ALL_EFL_INPUT          IEMLIVENESSPART2_ALL_EFL_MASK
     489#ifdef IEMLIVENESS_OLD_LAYOUT
     490# define IEMLIVENESSPART1_MASK                  UINT64_C(0xffffff00ffffffff)
     491# define IEMLIVENESSPART2_MASK                  UINT64_C(0x0000003fffffffff)
     492#else
     493# define IEMLIVENESSBIT_MASK                    UINT64_C(0x0007fffffff0ffff)
     494#endif
     495
     496#ifdef IEMLIVENESS_OLD_LAYOUT
     497# define IEMLIVENESSPART1_XCPT_OR_CALL          UINT64_C(0xaaaaaa00aaaaaaaa)
     498# define IEMLIVENESSPART2_XCPT_OR_CALL          UINT64_C(0x0000002aaaaaaaaa)
     499#else
     500# define IEMLIVENESSBIT0_XCPT_OR_CALL           UINT64_C(0x0000000000000000)
     501# define IEMLIVENESSBIT1_XCPT_OR_CALL           IEMLIVENESSBIT_MASK
     502#endif
     503
     504#ifdef IEMLIVENESS_OLD_LAYOUT
     505# define IEMLIVENESSPART1_ALL_UNUSED            UINT64_C(0x5555550055555555)
     506# define IEMLIVENESSPART2_ALL_UNUSED            UINT64_C(0x0000001555555555)
     507#else
     508# define IEMLIVENESSBIT0_ALL_UNUSED             IEMLIVENESSBIT_MASK
     509# define IEMLIVENESSBIT1_ALL_UNUSED             UINT64_C(0x0000000000000000)
     510#endif
     511
     512#ifdef IEMLIVENESS_OLD_LAYOUT
     513# define IEMLIVENESSPART1_ALL_EFL_MASK          UINT64_C(0x0000000000000000)
     514# define IEMLIVENESSPART2_ALL_EFL_MASK          UINT64_C(0x0000003fff000000)
     515#else
     516# define IEMLIVENESSBIT_ALL_EFL_MASK            UINT64_C(0x0007f00000000000)
     517#endif
     518
     519#ifdef IEMLIVENESS_OLD_LAYOUT
     520# define IEMLIVENESSPART1_ALL_EFL_INPUT         IEMLIVENESSPART1_ALL_EFL_MASK
     521# define IEMLIVENESSPART2_ALL_EFL_INPUT         IEMLIVENESSPART2_ALL_EFL_MASK
     522#else
     523# define IEMLIVENESSBIT0_ALL_EFL_INPUT          IEMLIVENESSBIT_ALL_EFL_MASK
     524# define IEMLIVENESSBIT1_ALL_EFL_INPUT          IEMLIVENESSBIT_ALL_EFL_MASK
     525#endif
    458526/** @} */
    459527
     
    519587/** The number of bits per state.   */
    520588#define IEMLIVENESS_STATE_BIT_COUNT     2
     589/** Check if we're expecting accesses to a register with the given (previous) liveness state.
     590 * .  */
     591#define IEMLIVENESS_STATE_IS_ACCESS_EXPECTED(a_uState)  ((uint32_t)((a_uState) - 1U) >= (uint32_t)(IEMLIVENESS_STATE_INPUT - 1U))
     592/** Check if a register clobbering is expected given the (previous) liveness state.
     593 * The state must be either CLOBBERED or XCPT_OR_CALL, but it may also
     594 * include INPUT if the register is used in more than one place. */
     595#define IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(a_uState) ((uint32_t)(a_uState) != IEMLIVENESS_STATE_UNUSED)
    521596/** @} */
    522597
     
    527602 *
    528603 * @{ */
    529 /** Initializing the outgoing state with a potnetial xcpt or call state.
    530  * This only works when all changes will be IEMLIVENESS_STATE_INPUT. */
    531 #define IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(a_pOutgoing, a_pIncoming) \
     604/** Initializing the outgoing state with a potential xcpt or call state.
     605 * This only works when all later changes will be IEMLIVENESS_STATE_INPUT. */
     606#ifdef IEMLIVENESS_OLD_LAYOUT
     607# define IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(a_pOutgoing, a_pIncoming) \
    532608    do { \
    533609        uint64_t uTmp1 = (a_pIncoming)->s1.bm64; \
     
    539615        (a_pOutgoing)->s2.bm64 = uTmp2 | IEMLIVENESSPART2_XCPT_OR_CALL; \
    540616    } while (0)
     617#else
     618# define IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(a_pOutgoing, a_pIncoming) \
     619    do { \
     620        (a_pOutgoing)->Bit0.bm64 = (a_pIncoming)->Bit0.bm64 & (a_pIncoming)->Bit1.bm64; \
     621        (a_pOutgoing)->Bit1.bm64 = IEMLIVENESSBIT1_XCPT_OR_CALL; \
     622    } while (0)
     623#endif
    541624
    542625/** Adds a segment base register as input to the outgoing state. */
    543 #define IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, a_iSReg) \
     626#ifdef IEMLIVENESS_OLD_LAYOUT
     627# define IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, a_iSReg) \
    544628    (a_pOutgoing)->s1.bmSegBase   |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSReg) * IEMLIVENESS_STATE_BIT_COUNT)
     629#else
     630# define IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, a_iSReg) do { \
     631        (a_pOutgoing)->Bit0.bmSegBase   |= RT_BIT_64(a_iSReg); \
     632        (a_pOutgoing)->Bit1.bmSegBase   |= RT_BIT_64(a_iSReg); \
     633    } while (0)
     634#endif
    545635
    546636/** Adds a segment attribute register as input to the outgoing state. */
    547 #define IEM_LIVENESS_RAW_SEG_ATTRIB_INPUT(a_pOutgoing, a_iSReg) \
     637#ifdef IEMLIVENESS_OLD_LAYOUT
     638# define IEM_LIVENESS_RAW_SEG_ATTRIB_INPUT(a_pOutgoing, a_iSReg) \
    548639    (a_pOutgoing)->s1.bmSegAttrib |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSReg) * IEMLIVENESS_STATE_BIT_COUNT)
     640#else
     641# define IEM_LIVENESS_RAW_SEG_ATTRIB_INPUT(a_pOutgoing, a_iSReg) do { \
     642        (a_pOutgoing)->Bit0.bmSegAttrib |= RT_BIT_64(a_iSReg); \
     643        (a_pOutgoing)->Bit1.bmSegAttrib |= RT_BIT_64(a_iSReg); \
     644    } while (0)
     645#endif
     646
    549647
    550648/** Adds a segment limit register as input to the outgoing state. */
    551 #define IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, a_iSReg) \
     649#ifdef IEMLIVENESS_OLD_LAYOUT
     650# define IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, a_iSReg) \
    552651    (a_pOutgoing)->s2.bmSegLimit  |= (uint32_t)IEMLIVENESS_STATE_INPUT << ((a_iSReg) * IEMLIVENESS_STATE_BIT_COUNT)
     652#else
     653# define IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, a_iSReg) do { \
     654        (a_pOutgoing)->Bit0.bmSegLimit  |= RT_BIT_64(a_iSReg); \
     655        (a_pOutgoing)->Bit1.bmSegLimit  |= RT_BIT_64(a_iSReg); \
     656    } while (0)
     657#endif
     658
     659/** Adds a segment limit register as input to the outgoing state. */
     660#ifdef IEMLIVENESS_OLD_LAYOUT
     661# define IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(a_pOutgoing, a_fEflMember) \
     662    (a_pOutgoing)->s2.a_fEflMember |= IEMLIVENESS_STATE_INPUT
     663#else
     664# define IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(a_pOutgoing, a_fEflMember) do { \
     665        (a_pOutgoing)->Bit0.a_fEflMember  |= 1; \
     666        (a_pOutgoing)->Bit1.a_fEflMember  |= 1; \
     667    } while (0)
     668#endif
    553669/** @} */
    554670
     
    10481164                                                            IEMNATIVEGSTREG enmGstReg,
    10491165                                                            IEMNATIVEGSTREGUSE enmIntendedUse = kIemNativeGstRegUse_ReadOnly,
    1050                                                             bool fNoVolatileRegs = false);
     1166                                                            bool fNoVolatileRegs = false, bool fSkipLivenessAssert = false);
    10511167DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    10521168                                                                            IEMNATIVEGSTREG enmGstReg);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette