Changeset 105768 in vbox
- Timestamp:
- Aug 21, 2024 2:01:05 PM (5 weeks ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
-
VMMAll/IEMAllN8veLiveness.cpp (modified) (2 diffs)
-
VMMAll/IEMAllN8vePython.py (modified) (1 diff)
-
VMMAll/IEMAllN8veRecompFuncs.h (modified) (6 diffs)
-
VMMAll/IEMAllThrdFuncs.cpp (modified) (7 diffs)
-
VMMAll/IEMAllThrdPython.py (modified) (22 diffs)
-
include/IEMInline.h (modified) (18 diffs)
-
include/IEMOpHlp.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp
r105652 r105768 243 243 244 244 245 #define IEM_LIVENESS_PC_NO_FLAGS() NOP() 246 #define IEM_LIVENESS_PC_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 247 #define IEM_LIVENESS_PC16_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 248 #define IEM_LIVENESS_PC32_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 249 #define IEM_LIVENESS_PC64_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL() 250 #define IEM_LIVENESS_PC16_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 251 #define IEM_LIVENESS_PC32_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 252 #define IEM_LIVENESS_PC64_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 245 #define IEM_LIVENESS_PC_NO_FLAGS() NOP() 246 #define IEM_LIVENESS_PC_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 247 #define IEM_LIVENESS_PC16_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 248 #define IEM_LIVENESS_PC32_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 249 #define IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL() 250 #define IEM_LIVENESS_PC64_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL() 251 #define IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL() /* Typically ends TB. */ 252 #define IEM_LIVENESS_PC16_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 253 #define IEM_LIVENESS_PC32_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS) 254 #define IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 255 #define IEM_LIVENESS_PC64_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 256 #define IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS() IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther) 253 257 254 258 #ifndef IEMLIVENESS_EXTENDED_LAYOUT … … 308 312 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal) IEM_LIVENESS_PC_WITH_FLAGS() 309 313 310 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 311 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 312 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 313 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 314 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 315 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 316 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 317 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 318 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 319 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 320 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 321 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 322 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 323 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 324 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 325 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 326 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 327 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 328 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 329 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 330 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 331 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 332 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 333 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 334 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 335 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 336 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 337 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 338 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 339 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 314 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 315 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 316 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS() 317 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 318 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS() 319 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 320 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 321 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS() 322 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 323 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS() 324 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 325 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 326 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS() 327 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 328 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS() 329 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 330 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 331 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS() 332 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 333 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS() 334 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 335 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 336 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS() 337 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 338 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS() 339 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 340 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 341 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS() 342 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 343 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS() 344 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) IEM_LIVENESS_PC16_JMP_NO_FLAGS() 345 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 346 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 347 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC16_JMP_WITH_FLAGS() 348 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 349 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 350 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) IEM_LIVENESS_PC32_JMP_NO_FLAGS() 351 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 352 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC32_JMP_WITH_FLAGS() 353 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 354 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_NO_FLAGS() 355 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) IEM_LIVENESS_PC64_JMP_WITH_FLAGS() 340 356 341 357 #define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) do { IEM_LIVENESS_PC16_JMP_NO_FLAGS(); IEM_LIVENESS_STACK(); } while (0) -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r105652 r105768 65 65 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16': (None, True, True, True, ), 66 66 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32': (None, True, True, True, ), 67 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT': (None, True, True, True, ), 67 68 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64': (None, True, True, True, ), 69 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG': (None, True, True, True, ), 68 70 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16': (None, True, True, True, ), 69 71 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32': (None, True, True, True, ), 72 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT': (None, True, True, True, ), 70 73 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64': (None, True, True, True, ), 74 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG': (None, True, True, True, ), 71 75 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32': (None, True, True, True, ), 76 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT': (None, True, True, True, ), 72 77 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64': (None, True, True, True, ), 78 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG': (None, True, True, True, ), 73 79 74 80 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS': (None, True, True, True, ), 75 81 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, True, True, ), 82 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS': (None, True, True, True, ), 76 83 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, True, True, ), 84 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS': (None, True, True, True, ), 77 85 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS': (None, True, True, True, ), 78 86 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, True, True, ), 87 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS': (None, True, True, True, ), 79 88 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, True, True, ), 89 'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS': (None, True, True, True, ), 80 90 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, True, True, ), 91 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS': (None, True, True, True, ), 81 92 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, True, True, ), 93 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS': (None, True, True, True, ), 82 94 83 95 'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16': (None, True, True, True, ), 96 'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32': (None, True, True, True, ), 84 97 'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32': (None, True, True, True, ), 85 98 'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64': (None, True, True, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r105739 r105768 610 610 611 611 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 612 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int8_t)(a_i8), \613 (a_enmEffOpSize), pCallEntry->idxInstr); \612 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 613 (a_enmEffOpSize), pCallEntry->idxInstr); \ 614 614 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 615 615 616 616 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 617 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int8_t)(a_i8), \618 (a_enmEffOpSize), pCallEntry->idxInstr); \617 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 618 (a_enmEffOpSize), pCallEntry->idxInstr); \ 619 619 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 620 620 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 621 621 622 622 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \ 623 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int16_t)(a_i16), \624 IEMMODE_16BIT, pCallEntry->idxInstr); \623 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 624 IEMMODE_16BIT, pCallEntry->idxInstr); \ 625 625 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 626 626 627 627 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 628 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int16_t)(a_i16), \629 IEMMODE_16BIT, pCallEntry->idxInstr); \628 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 629 IEMMODE_16BIT, pCallEntry->idxInstr); \ 630 630 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 631 631 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 632 632 633 633 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \ 634 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (a_i32), \635 IEMMODE_64BIT, pCallEntry->idxInstr); \634 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \ 635 IEMMODE_64BIT, pCallEntry->idxInstr); \ 636 636 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 637 637 638 638 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 639 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \ 640 IEMMODE_64BIT, pCallEntry->idxInstr); \ 639 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \ 640 IEMMODE_64BIT, pCallEntry->idxInstr); \ 641 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 642 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 643 644 645 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 646 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 647 (a_enmEffOpSize), pCallEntry->idxInstr); \ 648 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 649 650 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 651 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 652 (a_enmEffOpSize), pCallEntry->idxInstr); \ 653 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 654 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 655 656 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \ 657 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 658 IEMMODE_16BIT, pCallEntry->idxInstr); \ 659 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 660 661 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 662 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 663 IEMMODE_16BIT, pCallEntry->idxInstr); \ 664 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 665 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 666 667 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \ 668 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \ 669 IEMMODE_64BIT, pCallEntry->idxInstr); \ 670 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 671 672 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 673 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \ 674 IEMMODE_64BIT, pCallEntry->idxInstr); \ 641 675 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 642 676 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) … … 645 679 * iemRegRip64RelativeJumpS16AndFinishNoFlags and 646 680 * iemRegRip64RelativeJumpS32AndFinishNoFlags. */ 681 template<bool const a_fWithinPage> 647 682 DECL_INLINE_THROW(uint32_t) 648 683 iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, … … 668 703 if (RT_LIKELY(enmEffOpSize == IEMMODE_64BIT)) 669 704 { 670 /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */ 671 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 705 /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. 706 We can skip this if the target is within the same page. */ 707 if (!a_fWithinPage) 708 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 672 709 } 673 710 else … … 687 724 688 725 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 689 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int8_t)(a_i8), \690 (a_enmEffOpSize), pCallEntry->idxInstr); \726 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 727 (a_enmEffOpSize), pCallEntry->idxInstr); \ 691 728 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 692 729 693 730 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 694 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int8_t)(a_i8), \695 (a_enmEffOpSize), pCallEntry->idxInstr); \731 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 732 (a_enmEffOpSize), pCallEntry->idxInstr); \ 696 733 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 697 734 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 698 735 699 736 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \ 700 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int16_t)(a_i16), \701 IEMMODE_16BIT, pCallEntry->idxInstr); \737 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 738 IEMMODE_16BIT, pCallEntry->idxInstr); \ 702 739 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 703 740 704 741 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 705 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (int16_t)(a_i16), \706 IEMMODE_16BIT, pCallEntry->idxInstr); \742 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 743 IEMMODE_16BIT, pCallEntry->idxInstr); \ 707 744 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 708 745 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 709 746 710 747 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \ 711 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags (pReNative, off, (a_cbInstr), (a_i32), \712 IEMMODE_32BIT, pCallEntry->idxInstr); \748 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \ 749 IEMMODE_32BIT, pCallEntry->idxInstr); \ 713 750 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 714 751 715 752 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 716 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \ 717 IEMMODE_32BIT, pCallEntry->idxInstr); \ 753 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \ 754 IEMMODE_32BIT, pCallEntry->idxInstr); \ 755 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 756 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 757 758 759 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 760 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 761 (a_enmEffOpSize), pCallEntry->idxInstr); \ 762 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 763 764 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 765 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 766 (a_enmEffOpSize), pCallEntry->idxInstr); \ 767 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 768 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8)) 769 770 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \ 771 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 772 IEMMODE_16BIT, pCallEntry->idxInstr); \ 773 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 774 775 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 776 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 777 IEMMODE_16BIT, pCallEntry->idxInstr); \ 778 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 779 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16)) 780 781 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \ 782 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \ 783 IEMMODE_32BIT, pCallEntry->idxInstr); \ 784 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) 785 786 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 787 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \ 788 IEMMODE_32BIT, pCallEntry->idxInstr); \ 718 789 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 719 790 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32)) … … 722 793 * iemRegEip32RelativeJumpS16AndFinishNoFlags and 723 794 * iemRegEip32RelativeJumpS32AndFinishNoFlags. */ 795 template<bool const a_fFlat> 724 796 DECL_INLINE_THROW(uint32_t) 725 797 iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, … … 748 820 749 821 /* Perform limit checking, potentially raising #GP(0) and exit the TB. */ 750 /** @todo we can skip this in 32-bit FLAT mode. */ 751 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);822 if (!a_fFlat) 823 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 752 824 753 825 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp
r104419 r105768 126 126 127 127 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 128 * size as extra parameters, for use in flat 32-bit code on 386 and later 129 * CPUs. */ 130 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 131 return iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 132 133 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 128 134 * size as extra parameters, for use in 64-bit code. */ 129 135 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 130 136 return iemRegRip64RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 137 138 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 139 * size as extra parameters, for use in 64-bit code jumping within a page. */ 140 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 141 return iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 131 142 132 143 … … 144 155 145 156 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 157 * size as extra parameters, for use in flat 32-bit code on 386 and later 158 * CPUs and we need to check and clear flags. */ 159 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 160 return iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 161 162 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 146 163 * size as extra parameters, for use in 64-bit code and we need to check and 147 164 * clear flags. */ … … 149 166 return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 150 167 168 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand 169 * size as extra parameters, for use in 64-bit code jumping within a page and we 170 * need to check and clear flags. */ 171 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \ 172 return iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal) 173 151 174 #undef IEM_MC_REL_JMP_S8_AND_FINISH 152 175 … … 163 186 164 187 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 188 * param, for use in flat 32-bit code on 386 and later CPUs. */ 189 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \ 190 return iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 191 192 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 165 193 * param, for use in 64-bit code. */ 166 194 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \ 195 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 196 197 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 198 * param, for use in 64-bit code jumping with a page. 199 * @note No special function for this, there is nothing to save here. */ 200 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \ 167 201 return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 168 202 … … 181 215 182 216 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 217 * param, for use in flat 32-bit code on 386 and later CPUs and we need 218 * to check and clear flags. */ 219 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 220 return iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 221 222 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 183 223 * param, for use in 64-bit code and we need to check and clear flags. */ 184 224 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 225 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 226 227 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as 228 * param, for use in 64-bit code jumping within a page and we need to check and 229 * clear flags. 230 * @note No special function for this, there is nothing to save here. */ 231 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \ 185 232 return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal) 186 233 … … 200 247 201 248 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 249 * an extra parameter, for use in flat 32-bit code on 386+. */ 250 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \ 251 return iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 252 253 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 202 254 * an extra parameter, for use in 64-bit code. */ 203 255 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \ 204 256 return iemRegRip64RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 257 258 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 259 * an extra parameter, for use in 64-bit code jumping within a page. */ 260 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \ 261 return iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 205 262 206 263 … … 218 275 219 276 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 277 * an extra parameter, for use in flat 32-bit code on 386+ and we need 278 * to check and clear flags. */ 279 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 280 return iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 281 282 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 220 283 * an extra parameter, for use in 64-bit code and we need to check and clear 221 284 * flags. */ 222 285 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 223 286 return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 287 288 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 289 * an extra parameter, for use in 64-bit code jumping within a page and we need 290 * to check and clear flags. */ 291 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \ 292 return iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal) 224 293 225 294 #undef IEM_MC_REL_JMP_S32_AND_FINISH -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r105673 r105768 199 199 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken. 200 200 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken. 201 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide. 202 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags. 201 ksVariation_32_Flat_Jmp = '_32_Flat_Jmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump taken. 202 ksVariation_32f_Flat_Jmp = '_32f_Flat_Jmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump taken. 203 ksVariation_32_Flat_NoJmp = '_32_Flat_NoJmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump not taken. 204 ksVariation_32f_Flat_NoJmp = '_32f_Flat_NoJmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump not taken. 205 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide. 206 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide, eflags. 203 207 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing. 204 208 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags. … … 208 212 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken. 209 213 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken. 210 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken. 214 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken. 215 ksVariation_64_SamePg_Jmp = '_64_SamePg_Jmp'; ##< 64-bit mode code, conditional jump within page taken. 216 ksVariation_64f_SamePg_Jmp = '_64f_SamePg_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken. 217 ksVariation_64_SamePg_NoJmp = '_64_SamePg_NoJmp'; ##< 64-bit mode code, conditional jump within page not taken. 218 ksVariation_64f_SamePg_NoJmp = '_64f_SamePg_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken. 211 219 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS. 212 220 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags. … … 236 244 ksVariation_32_NoJmp, 237 245 ksVariation_32f_NoJmp, 246 ksVariation_32_Flat_Jmp, 247 ksVariation_32f_Flat_Jmp, 248 ksVariation_32_Flat_NoJmp, 249 ksVariation_32f_Flat_NoJmp, 238 250 ksVariation_32_Flat, 239 251 ksVariation_32f_Flat, … … 246 258 ksVariation_64_NoJmp, 247 259 ksVariation_64f_NoJmp, 260 ksVariation_64_SamePg_Jmp, 261 ksVariation_64f_SamePg_Jmp, 262 ksVariation_64_SamePg_NoJmp, 263 ksVariation_64f_SamePg_NoJmp, 248 264 ksVariation_64_FsGs, 249 265 ksVariation_64f_FsGs, … … 369 385 ksVariation_64_Jmp, 370 386 ksVariation_64f_Jmp, 387 ksVariation_64_SamePg_Jmp, 388 ksVariation_64f_SamePg_Jmp, 371 389 ksVariation_64_NoJmp, 372 390 ksVariation_64f_NoJmp, 391 ksVariation_64_SamePg_NoJmp, 392 ksVariation_64f_SamePg_NoJmp, 373 393 ksVariation_64_FsGs, 374 394 ksVariation_64f_FsGs, 375 395 ksVariation_32_Flat, 376 396 ksVariation_32f_Flat, 397 ksVariation_32_Flat_Jmp, 398 ksVariation_32f_Flat_Jmp, 399 ksVariation_32_Flat_NoJmp, 400 ksVariation_32f_Flat_NoJmp, 377 401 ksVariation_32, 378 402 ksVariation_32f, … … 422 446 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken', 423 447 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken', 448 ksVariation_32_Flat_Jmp: '32-bit flat+wide CS, ++ w/ conditional jump taken', 449 ksVariation_32f_Flat_Jmp: '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump taken', 450 ksVariation_32_Flat_NoJmp: '32-bit flat+wide CS, ++ w/ conditional jump not taken', 451 ksVariation_32f_Flat_NoJmp: '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump not taken', 424 452 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES', 425 453 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing', … … 429 457 ksVariation_64f: '64-bit w/ eflag checking and clearing', 430 458 ksVariation_64_Jmp: '64-bit w/ conditional jump taken', 431 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',459 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken', 432 460 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken', 433 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken', 461 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken', 462 ksVariation_64_SamePg_Jmp: '64-bit w/ conditional jump within page taken', 463 ksVariation_64f_SamePg_Jmp: '64-bit w/ eflag checking and clearing and conditional jumpwithin page taken', 464 ksVariation_64_SamePg_NoJmp: '64-bit w/ conditional jump within page not taken', 465 ksVariation_64f_SamePg_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump within page not taken', 434 466 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS', 435 467 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing', … … 449 481 ksVariation_32f_NoJmp: True, 450 482 ksVariation_32f_Flat: True, 483 ksVariation_32f_Flat_Jmp: True, 484 ksVariation_32f_Flat_NoJmp: True, 451 485 ksVariation_32f_Addr16: True, 452 486 ksVariation_64f: True, 453 487 ksVariation_64f_Jmp: True, 454 488 ksVariation_64f_NoJmp: True, 489 ksVariation_64f_SamePg_Jmp: True, 490 ksVariation_64f_SamePg_NoJmp: True, 455 491 ksVariation_64f_FsGs: True, 456 492 ksVariation_64f_Addr32: True, 457 493 }; 458 494 kdVariationsOnly64NoFlags = { 459 ksVariation_64: True, 460 ksVariation_64_Jmp: True, 461 ksVariation_64_NoJmp: True, 462 ksVariation_64_FsGs: True, 463 ksVariation_64_Addr32: True, 495 ksVariation_64: True, 496 ksVariation_64_Jmp: True, 497 ksVariation_64_NoJmp: True, 498 ksVariation_64_SamePg_Jmp: True, 499 ksVariation_64_SamePg_NoJmp: True, 500 ksVariation_64_FsGs: True, 501 ksVariation_64_Addr32: True, 464 502 }; 465 503 kdVariationsOnly64WithFlags = { 466 ksVariation_64f: True, 467 ksVariation_64f_Jmp: True, 468 ksVariation_64f_NoJmp: True, 469 ksVariation_64f_FsGs: True, 470 ksVariation_64f_Addr32: True, 504 ksVariation_64f: True, 505 ksVariation_64f_Jmp: True, 506 ksVariation_64f_NoJmp: True, 507 ksVariation_64f_SamePg_Jmp: True, 508 ksVariation_64f_SamePg_NoJmp: True, 509 ksVariation_64f_FsGs: True, 510 ksVariation_64f_Addr32: True, 471 511 }; 472 512 kdVariationsOnlyPre386NoFlags = { … … 537 577 ksVariation_32_Jmp: True, 538 578 ksVariation_32_NoJmp: True, 579 ksVariation_32_Flat_Jmp: True, 580 ksVariation_32_Flat_NoJmp: True, 539 581 ksVariation_64_Jmp: True, 540 582 ksVariation_64_NoJmp: True, 583 ksVariation_64_SamePg_Jmp: True, 584 ksVariation_64_SamePg_NoJmp: True, 541 585 ksVariation_16f_Jmp: True, 542 586 ksVariation_16f_NoJmp: True, … … 545 589 ksVariation_32f_Jmp: True, 546 590 ksVariation_32f_NoJmp: True, 591 ksVariation_32f_Flat_Jmp: True, 592 ksVariation_32f_Flat_NoJmp: True, 547 593 ksVariation_64f_Jmp: True, 548 594 ksVariation_64f_NoJmp: True, 595 ksVariation_64f_SamePg_Jmp: True, 596 ksVariation_64f_SamePg_NoJmp: True, 549 597 }; 550 598 kdVariationsWithConditionalNoJmp = { … … 552 600 ksVariation_16_Pre386_NoJmp: True, 553 601 ksVariation_32_NoJmp: True, 602 ksVariation_32_Flat_NoJmp: True, 554 603 ksVariation_64_NoJmp: True, 604 ksVariation_64_SamePg_NoJmp: True, 555 605 ksVariation_16f_NoJmp: True, 556 606 ksVariation_16f_Pre386_NoJmp: True, 557 607 ksVariation_32f_NoJmp: True, 608 ksVariation_32f_Flat_NoJmp: True, 558 609 ksVariation_64f_NoJmp: True, 610 ksVariation_64f_SamePg_NoJmp: True, 611 }; 612 kdVariationsWithFlat32Conditional = { 613 ksVariation_32_Flat_Jmp: True, 614 ksVariation_32_Flat_NoJmp: True, 615 ksVariation_32f_Flat_Jmp: True, 616 ksVariation_32f_Flat_NoJmp: True, 617 }; 618 kdVariationsWithSamePgConditional = { 619 ksVariation_64_SamePg_Jmp: True, 620 ksVariation_64_SamePg_NoJmp: True, 621 ksVariation_64f_SamePg_Jmp: True, 622 ksVariation_64f_SamePg_NoJmp: True, 559 623 }; 560 624 kdVariationsOnlyPre386 = { … … 967 1031 }; 968 1032 1033 kdRelJmpMcWithFlatOrSamePageVariations = { 1034 'IEM_MC_REL_JMP_S8_AND_FINISH': True, 1035 'IEM_MC_REL_JMP_S16_AND_FINISH': True, 1036 'IEM_MC_REL_JMP_S32_AND_FINISH': True, 1037 }; 1038 969 1039 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0): 970 1040 """ … … 1050 1120 and self.sVariation not in self.kdVariationsOnlyPre386): 1051 1121 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName); 1052 oNewStmt.sName += '_THREADED';1053 1122 if self.sVariation in self.kdVariationsOnly64NoFlags: 1054 oNewStmt.sName += '_PC64'; 1123 if ( self.sVariation not in self.kdVariationsWithSamePgConditional 1124 or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations): 1125 oNewStmt.sName += '_THREADED_PC64'; 1126 else: 1127 oNewStmt.sName += '_THREADED_PC64_INTRAPG'; 1055 1128 elif self.sVariation in self.kdVariationsOnly64WithFlags: 1056 oNewStmt.sName += '_PC64_WITH_FLAGS'; 1129 if ( self.sVariation not in self.kdVariationsWithSamePgConditional 1130 or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations): 1131 oNewStmt.sName += '_THREADED_PC64_WITH_FLAGS'; 1132 else: 1133 oNewStmt.sName += '_THREADED_PC64_INTRAPG_WITH_FLAGS'; 1057 1134 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags: 1058 oNewStmt.sName += '_ PC16';1135 oNewStmt.sName += '_THREADED_PC16'; 1059 1136 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags: 1060 oNewStmt.sName += '_PC16_WITH_FLAGS'; 1061 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing: 1062 assert self.sVariation != self.ksVariation_Default; 1063 oNewStmt.sName += '_PC32'; 1137 oNewStmt.sName += '_THREADED_PC16_WITH_FLAGS'; 1138 elif oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations: 1139 if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing: 1140 assert self.sVariation != self.ksVariation_Default; 1141 oNewStmt.sName += '_THREADED_PC32'; 1142 else: 1143 oNewStmt.sName += '_THREADED_PC32_WITH_FLAGS'; 1064 1144 else: 1065 oNewStmt.sName += '_PC32_WITH_FLAGS'; 1145 if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing: 1146 assert self.sVariation != self.ksVariation_Default; 1147 oNewStmt.sName += '_THREADED_PC32_FLAT'; 1148 else: 1149 oNewStmt.sName += '_THREADED_PC32_FLAT_WITH_FLAGS'; 1066 1150 1067 1151 # This is making the wrong branch of conditionals break out of the TB. … … 2214 2298 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional); 2215 2299 2300 # We've got some Flat variations we need to add manually to avoid unnecessary CS.LIM checks. 2301 if ThrdFnVar.ksVariation_32 in asVariationsBase: 2302 assert ThrdFnVar.ksVariation_32f in asVariationsBase; 2303 asVariations.extend([ 2304 ThrdFnVar.ksVariation_32_Flat_Jmp, 2305 ThrdFnVar.ksVariation_32_Flat_NoJmp, 2306 ThrdFnVar.ksVariation_32f_Flat_Jmp, 2307 ThrdFnVar.ksVariation_32f_Flat_NoJmp, 2308 ]); 2309 2310 # Similarly, if there are 64-bit variants, we need the within same page variations. 2311 # We skip this when the operand size prefix forces is used because it cuts RIP down 2312 # to 16-bit only and the same-page assumptions are most likely wrong then. 2313 if ( ThrdFnVar.ksVariation_64 in asVariationsBase 2314 and not iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_REL_JMP_S16_AND_FINISH': True })): 2315 assert ThrdFnVar.ksVariation_64f in asVariationsBase; 2316 asVariations.extend([ 2317 ThrdFnVar.ksVariation_64_SamePg_Jmp, 2318 ThrdFnVar.ksVariation_64_SamePg_NoJmp, 2319 ThrdFnVar.ksVariation_64f_SamePg_Jmp, 2320 ThrdFnVar.ksVariation_64f_SamePg_NoJmp, 2321 ]); 2322 2216 2323 if not iai.McStmt.findStmtByNames(aoStmts, 2217 2324 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True, … … 2272 2379 The sBranch parameter is used with conditional branches where we'll emit 2273 2380 different threaded calls depending on whether we're in the jump-taken or 2274 no-jump code path. 2381 no-jump code path. Values are either None, 'Jmp' or 'NoJmp'. 2275 2382 2276 2383 The fTbLookupTable parameter can either be False, True or whatever else 2277 (like 2) - in the latte case this means a large lookup table.2384 (like 2) - in the latter case this means a large lookup table. 2278 2385 """ 2279 2386 # Special case for only default variation: … … 2286 2393 # 2287 2394 dByVari = self.dVariations; 2288 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';2395 fDbg = self.oMcBlock.sFunction == 'iemOp_jnl_Jv'; 2289 2396 class Case: 2290 def __init__(self, sCond, sVarNm = None ):2397 def __init__(self, sCond, sVarNm = None, sIntraPgVarNm = None, sIntraPgDispVariable = None): 2291 2398 self.sCond = sCond; 2292 2399 self.sVarNm = sVarNm; 2293 2400 self.oVar = dByVari[sVarNm] if sVarNm else None; 2294 2401 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None; 2402 # Some annoying complications just to skip canonical jump target checks for intrapage jumps. 2403 self.sIntraPgDispVariable = sIntraPgDispVariable; 2404 self.oIntraPgVar = dByVari[sIntraPgVarNm] if sIntraPgVarNm else None; 2405 self.aoIntraPgBody = self.oIntraPgVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sIntraPgVarNm \ 2406 else None; 2295 2407 2296 2408 def toCode(self): 2297 2409 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ]; 2298 2410 if self.aoBody: 2299 aoStmts.extend(self.aoBody); 2300 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8)); 2411 if not self.aoIntraPgBody: 2412 aoStmts.extend(self.aoBody); 2413 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8)); 2414 else: 2415 aoStmts.extend([ 2416 iai.McCppCond('!IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s)' % (self.sIntraPgDispVariable,), 2417 True, self.aoBody, self.aoIntraPgBody, cchIndent = 8), 2418 iai.McCppGeneric('break;', cchIndent = 8), 2419 ]); 2301 2420 return aoStmts; 2302 2421 … … 2304 2423 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ]; 2305 2424 if self.aoBody: 2306 aoStmts.extend([ 2307 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8), 2308 iai.McCppGeneric('break;', cchIndent = 8), 2309 ]); 2425 if not self.aoIntraPgBody: 2426 aoStmts.extend([ 2427 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8), 2428 iai.McCppGeneric('break;', cchIndent = 8), 2429 ]); 2430 else: 2431 aoStmts.extend([ 2432 iai.McCppGeneric('enmFunction = !IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s) ? %s : %s;' 2433 % (self.sIntraPgDispVariable, self.oVar.getIndexName(), 2434 self.oIntraPgVar.getIndexName(),), cchIndent = 8), 2435 iai.McCppGeneric('break;', cchIndent = 8), 2436 ]); 2310 2437 return aoStmts; 2311 2438 2312 def isSame(self, oThat): 2313 if not self.aoBody: # fall thru always matches. 2314 return True; 2315 if len(self.aoBody) != len(oThat.aoBody): 2316 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),)); 2439 @staticmethod 2440 def isSameBody(aoThisBody, sThisIndexName, aoThatBody, sThatIndexName, sBody = ''): 2441 if len(aoThisBody) != len(aoThatBody): 2442 if fDbg: print('dbg: %sbody len diff: %s vs %s' % (sBody, len(aoThisBody), len(aoThatBody),)); 2317 2443 return False; 2318 for iStmt, oStmt in enumerate( self.aoBody):2319 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt2444 for iStmt, oStmt in enumerate(aoThisBody): 2445 oThatStmt = aoThatBody[iStmt] # type: iai.McStmt 2320 2446 assert isinstance(oStmt, iai.McCppGeneric); 2321 2447 assert not isinstance(oStmt, iai.McStmtCond); … … 2323 2449 return False; 2324 2450 if oStmt.sName != oThatStmt.sName: 2325 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));2451 if fDbg: print('dbg: %sstmt #%s name: %s vs %s' % (sBody, iStmt, oStmt.sName, oThatStmt.sName,)); 2326 2452 return False; 2327 2453 if len(oStmt.asParams) != len(oThatStmt.asParams): 2328 #if fDbg: print('dbg:stmt #%s param count: %s vs %s'2329 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));2454 if fDbg: print('dbg: %sstmt #%s param count: %s vs %s' 2455 % (sBody, iStmt, len(oStmt.asParams), len(oThatStmt.asParams),)); 2330 2456 return False; 2331 2457 for iParam, sParam in enumerate(oStmt.asParams): 2332 2458 if ( sParam != oThatStmt.asParams[iParam] 2333 and ( iParam != 12459 and ( iParam not in (1, 2) 2334 2460 or not isinstance(oStmt, iai.McCppCall) 2335 2461 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_') 2336 or sParam != s elf.oVar.getIndexName()2337 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName())):2338 #if fDbg: print('dbg:stmt #%s, param #%s: %s vs %s'2339 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));2462 or sParam != sThisIndexName 2463 or oThatStmt.asParams[iParam] != sThatIndexName )): 2464 if fDbg: print('dbg: %sstmt #%s, param #%s: %s vs %s' 2465 % (sBody, iStmt, iParam, sParam, oThatStmt.asParams[iParam],)); 2340 2466 return False; 2467 return True; 2468 2469 def isSame(self, oThat): 2470 if self.aoBody: # no body == fall thru - that always matches. 2471 if not self.isSameBody(self.aoBody, self.oVar.getIndexName(), 2472 oThat.aoBody, oThat.oVar.getIndexName()): 2473 return False; 2474 if self.aoIntraPgBody and not self.isSameBody(self.aoIntraPgBody, self.oIntraPgVar.getIndexName(), 2475 oThat.aoBody, oThat.oVar.getIndexName(), 2476 'intrapg/left '): 2477 return False; 2478 if oThat.aoIntraPgBody and not self.isSameBody(self.aoBody, self.oVar.getIndexName(), 2479 oThat.aoIntraPgBody, oThat.oIntraPgVar.getIndexName(), 2480 'intrapg/right '): 2481 return False; 2341 2482 return True; 2342 2483 … … 2385 2526 elif ThrdFnVar.ksVariation_64_Jmp in dByVari: 2386 2527 assert fSimple and sBranch; 2387 aoCases.append(Case('IEMMODE_64BIT', 2388 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp)); 2389 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari: 2390 aoCases.append(Case('IEMMODE_64BIT | 32', 2391 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp)); 2528 if ThrdFnVar.ksVariation_64_SamePg_Jmp not in dByVari: 2529 assert ThrdFnVar.ksVariation_64f_Jmp in dByVari; 2530 aoCases.extend([ 2531 Case('IEMMODE_64BIT', 2532 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp), 2533 Case('IEMMODE_64BIT | 32', 2534 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp), 2535 ]); 2536 else: 2537 assert ThrdFnVar.ksVariation_64f_SamePg_Jmp in dByVari; 2538 oStmtRelJmp = iai.McStmt.findStmtByNames(self.oMcBlock.decode(), 2539 { 'IEM_MC_REL_JMP_S8_AND_FINISH': True, 2540 'IEM_MC_REL_JMP_S16_AND_FINISH': True, 2541 'IEM_MC_REL_JMP_S32_AND_FINISH': True,}); 2542 sIntraPgDispVariable = oStmtRelJmp.asParams[0]; 2543 aoCases.extend([ 2544 Case('IEMMODE_64BIT', 2545 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp, 2546 ThrdFnVar.ksVariation_64_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_SamePg_NoJmp, 2547 sIntraPgDispVariable), 2548 Case('IEMMODE_64BIT | 32', 2549 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp, 2550 ThrdFnVar.ksVariation_64f_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_SamePg_NoJmp, 2551 sIntraPgDispVariable), 2552 ]); 2553 2392 2554 2393 2555 if ThrdFnVar.ksVariation_32_Addr16 in dByVari: … … 2428 2590 assert fSimple and sBranch; 2429 2591 aoCases.extend([ 2430 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru 2592 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', 2593 ThrdFnVar.ksVariation_32_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_Flat_NoJmp), 2431 2594 Case('IEMMODE_32BIT', 2432 2595 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp), … … 2434 2597 if ThrdFnVar.ksVariation_32f_Jmp in dByVari: 2435 2598 aoCases.extend([ 2436 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru 2599 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', 2600 ThrdFnVar.ksVariation_32f_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_Flat_NoJmp), 2437 2601 Case('IEMMODE_32BIT | 32', 2438 2602 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp), … … 2498 2662 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)): 2499 2663 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]); 2500 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));2664 if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,)); 2501 2665 if fAllSameCases: 2502 2666 aoStmts = [ -
trunk/src/VBox/VMM/include/IEMInline.h
r105465 r105768 2357 2357 2358 2358 /** 2359 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit2360 * code (never 64-bit).2359 * Adds a 8-bit signed jump offset to RIP from 64-bit code when the caller is 2360 * sure it stays within the same page. 2361 2361 * 2362 2362 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code … … 2371 2371 * taking the wrong conditional branhc. 2372 2372 */ 2373 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2374 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2375 { 2376 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2377 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 2378 2379 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr; 2380 if (enmEffOpSize == IEMMODE_16BIT) 2381 uNewEip &= UINT16_MAX; 2382 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2383 pVCpu->cpum.GstCtx.rip = uNewEip; 2384 else 2385 return iemRaiseGeneralProtectionFault0(pVCpu); 2373 DECL_FORCE_INLINE(VBOXSTRICTRC) 2374 iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2375 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2376 { 2377 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2378 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize); 2379 2380 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 2381 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT)); 2382 pVCpu->cpum.GstCtx.rip = uNewRip; 2386 2383 2387 2384 #ifndef IEM_WITH_CODE_TLB … … 2397 2394 2398 2395 /** 2399 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU. 2400 * 2401 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2402 * segment limit. 2403 * 2404 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2405 * @param cbInstr Instruction size. 2406 * @param offNextInstr The offset of the next instruction. 2407 * @param rcNormal VINF_SUCCESS to continue TB. 2408 * VINF_IEM_REEXEC_BREAK to force TB exit when 2409 * taking the wrong conditional branhc. 2410 */ 2411 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2412 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT 2413 { 2414 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2415 2416 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr; 2417 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2418 pVCpu->cpum.GstCtx.rip = uNewIp; 2419 else 2420 return iemRaiseGeneralProtectionFault0(pVCpu); 2421 2422 #ifndef IEM_WITH_CODE_TLB 2423 iemOpcodeFlushLight(pVCpu, cbInstr); 2424 #endif 2425 2426 /* 2427 * Clear RF and finish the instruction (maybe raise #DB). 2428 */ 2429 return iemRegFinishClearingRF(pVCpu, rcNormal); 2430 } 2431 2432 2433 /** 2434 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or 2435 * clearing of flags. 2396 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit 2397 * code (never 64-bit). 2436 2398 * 2437 2399 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code … … 2446 2408 * taking the wrong conditional branhc. 2447 2409 */ 2448 DECL_FORCE_INLINE(VBOXSTRICTRC) iemReg Rip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,2449 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT2450 { 2451 Assert( IEM_IS_64BIT_CODE(pVCpu));2452 Assert(enmEffOpSize == IEMMODE_ 64BIT || enmEffOpSize == IEMMODE_16BIT);2453 2454 uint 64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;2410 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2411 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2412 { 2413 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2414 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 2415 2416 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr; 2455 2417 if (enmEffOpSize == IEMMODE_16BIT) 2456 uNewRip &= UINT16_MAX; 2457 2458 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 2459 pVCpu->cpum.GstCtx.rip = uNewRip; 2418 uNewEip &= UINT16_MAX; 2419 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2420 pVCpu->cpum.GstCtx.rip = uNewEip; 2460 2421 else 2461 2422 return iemRaiseGeneralProtectionFault0(pVCpu); … … 2464 2425 iemOpcodeFlushLight(pVCpu, cbInstr); 2465 2426 #endif 2466 return iemRegFinishNoFlags(pVCpu, rcNormal); 2467 } 2468 2469 2470 /** 2471 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit 2472 * code (never 64-bit), no checking or clearing of flags. 2427 2428 /* 2429 * Clear RF and finish the instruction (maybe raise #DB). 2430 */ 2431 return iemRegFinishClearingRF(pVCpu, rcNormal); 2432 } 2433 2434 2435 /** 2436 * Adds a 8-bit signed jump offset to EIP, on 386 or later from FLAT 32-bit code 2437 * (never 64-bit). 2473 2438 * 2474 2439 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code … … 2483 2448 * taking the wrong conditional branhc. 2484 2449 */ 2450 DECL_FORCE_INLINE(VBOXSTRICTRC) 2451 iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2452 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2453 { 2454 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2455 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 2456 2457 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr; 2458 if (enmEffOpSize == IEMMODE_16BIT) 2459 uNewEip &= UINT16_MAX; 2460 pVCpu->cpum.GstCtx.rip = uNewEip; 2461 2462 #ifndef IEM_WITH_CODE_TLB 2463 iemOpcodeFlushLight(pVCpu, cbInstr); 2464 #endif 2465 2466 /* 2467 * Clear RF and finish the instruction (maybe raise #DB). 2468 */ 2469 return iemRegFinishClearingRF(pVCpu, rcNormal); 2470 } 2471 2472 2473 /** 2474 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU. 2475 * 2476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2477 * segment limit. 2478 * 2479 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2480 * @param cbInstr Instruction size. 2481 * @param offNextInstr The offset of the next instruction. 2482 * @param rcNormal VINF_SUCCESS to continue TB. 2483 * VINF_IEM_REEXEC_BREAK to force TB exit when 2484 * taking the wrong conditional branhc. 2485 */ 2486 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2487 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT 2488 { 2489 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2490 2491 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr; 2492 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2493 pVCpu->cpum.GstCtx.rip = uNewIp; 2494 else 2495 return iemRaiseGeneralProtectionFault0(pVCpu); 2496 2497 #ifndef IEM_WITH_CODE_TLB 2498 iemOpcodeFlushLight(pVCpu, cbInstr); 2499 #endif 2500 2501 /* 2502 * Clear RF and finish the instruction (maybe raise #DB). 2503 */ 2504 return iemRegFinishClearingRF(pVCpu, rcNormal); 2505 } 2506 2507 2508 /** 2509 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or 2510 * clearing of flags. 2511 * 2512 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2513 * segment limit. 2514 * 2515 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2516 * @param cbInstr Instruction size. 2517 * @param offNextInstr The offset of the next instruction. 2518 * @param enmEffOpSize Effective operand size. 2519 * @param rcNormal VINF_SUCCESS to continue TB. 2520 * VINF_IEM_REEXEC_BREAK to force TB exit when 2521 * taking the wrong conditional branhc. 2522 */ 2523 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2524 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2525 { 2526 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2527 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT); 2528 2529 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 2530 if (enmEffOpSize == IEMMODE_16BIT) 2531 uNewRip &= UINT16_MAX; 2532 2533 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 2534 pVCpu->cpum.GstCtx.rip = uNewRip; 2535 else 2536 return iemRaiseGeneralProtectionFault0(pVCpu); 2537 2538 #ifndef IEM_WITH_CODE_TLB 2539 iemOpcodeFlushLight(pVCpu, cbInstr); 2540 #endif 2541 return iemRegFinishNoFlags(pVCpu, rcNormal); 2542 } 2543 2544 2545 /** 2546 * Adds a 8-bit signed jump offset to RIP from 64-bit code when caller is sure 2547 * it stays within the same page, no checking or clearing of flags. 2548 * 2549 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2550 * segment limit. 2551 * 2552 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2553 * @param cbInstr Instruction size. 2554 * @param offNextInstr The offset of the next instruction. 2555 * @param enmEffOpSize Effective operand size. 2556 * @param rcNormal VINF_SUCCESS to continue TB. 2557 * VINF_IEM_REEXEC_BREAK to force TB exit when 2558 * taking the wrong conditional branhc. 2559 */ 2560 DECL_FORCE_INLINE(VBOXSTRICTRC) 2561 iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2562 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2563 { 2564 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2565 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize); 2566 2567 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 2568 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT)); 2569 pVCpu->cpum.GstCtx.rip = uNewRip; 2570 2571 #ifndef IEM_WITH_CODE_TLB 2572 iemOpcodeFlushLight(pVCpu, cbInstr); 2573 #endif 2574 return iemRegFinishNoFlags(pVCpu, rcNormal); 2575 } 2576 2577 2578 /** 2579 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit 2580 * code (never 64-bit), no checking or clearing of flags. 2581 * 2582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2583 * segment limit. 2584 * 2585 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2586 * @param cbInstr Instruction size. 2587 * @param offNextInstr The offset of the next instruction. 2588 * @param enmEffOpSize Effective operand size. 2589 * @param rcNormal VINF_SUCCESS to continue TB. 2590 * VINF_IEM_REEXEC_BREAK to force TB exit when 2591 * taking the wrong conditional branhc. 2592 */ 2485 2593 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2486 2594 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT … … 2505 2613 2506 2614 /** 2615 * Adds a 8-bit signed jump offset to EIP, on 386 or later from flat 32-bit code 2616 * (never 64-bit), no checking or clearing of flags. 2617 * 2618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2619 * segment limit. 2620 * 2621 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2622 * @param cbInstr Instruction size. 2623 * @param offNextInstr The offset of the next instruction. 2624 * @param enmEffOpSize Effective operand size. 2625 * @param rcNormal VINF_SUCCESS to continue TB. 2626 * VINF_IEM_REEXEC_BREAK to force TB exit when 2627 * taking the wrong conditional branhc. 2628 */ 2629 DECL_FORCE_INLINE(VBOXSTRICTRC) 2630 iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr, 2631 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT 2632 { 2633 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2634 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 2635 2636 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr; 2637 if (enmEffOpSize == IEMMODE_16BIT) 2638 uNewEip &= UINT16_MAX; 2639 pVCpu->cpum.GstCtx.rip = uNewEip; 2640 2641 #ifndef IEM_WITH_CODE_TLB 2642 iemOpcodeFlushLight(pVCpu, cbInstr); 2643 #endif 2644 return iemRegFinishNoFlags(pVCpu, rcNormal); 2645 } 2646 2647 2648 /** 2507 2649 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or 2508 2650 * clearing of flags. … … 2605 2747 2606 2748 /** 2607 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or 2608 * clearing of flags. 2749 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code. 2750 * 2751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2752 * segment limit. 2609 2753 * 2610 2754 * @returns Strict VBox status code. … … 2615 2759 * VINF_IEM_REEXEC_BREAK to force TB exit when 2616 2760 * taking the wrong conditional branhc. 2617 */ 2618 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2619 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT 2620 { 2621 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2622 2623 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr); 2761 * 2762 * @note This is also used by 16-bit code in pre-386 mode, as the code is 2763 * identical. 2764 */ 2765 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2766 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT 2767 { 2768 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2769 2770 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr; 2771 pVCpu->cpum.GstCtx.rip = uNewIp; 2624 2772 2625 2773 #ifndef IEM_WITH_CODE_TLB 2626 2774 iemOpcodeFlushLight(pVCpu, cbInstr); 2627 2775 #endif 2628 return iemRegFinishNoFlags(pVCpu, rcNormal); 2629 } 2630 2631 2632 /** 2633 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code, 2634 * no checking or clearing of flags. 2635 * 2636 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2637 * segment limit. 2776 2777 /* 2778 * Clear RF and finish the instruction (maybe raise #DB). 2779 */ 2780 return iemRegFinishClearingRF(pVCpu, rcNormal); 2781 } 2782 2783 2784 /** 2785 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or 2786 * clearing of flags. 2638 2787 * 2639 2788 * @returns Strict VBox status code. … … 2644 2793 * VINF_IEM_REEXEC_BREAK to force TB exit when 2645 2794 * taking the wrong conditional branhc. 2646 * 2647 * @note This is also used by 16-bit code in pre-386 mode, as the code is 2648 * identical. 2649 */ 2650 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2795 */ 2796 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2651 2797 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT 2652 2798 { 2653 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2654 2655 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr; 2656 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2657 pVCpu->cpum.GstCtx.rip = uNewIp; 2658 else 2659 return iemRaiseGeneralProtectionFault0(pVCpu); 2799 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2800 2801 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr); 2660 2802 2661 2803 #ifndef IEM_WITH_CODE_TLB … … 2667 2809 2668 2810 /** 2669 * Adds a 32-bit signed jump offset to RIP from 64-bit code. 2811 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code, 2812 * no checking or clearing of flags. 2670 2813 * 2671 2814 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2672 2815 * segment limit. 2673 *2674 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the2675 * only alternative for relative jumps in 64-bit code and that is already2676 * handled in the decoder stage.2677 2816 * 2678 2817 * @returns Strict VBox status code. … … 2683 2822 * VINF_IEM_REEXEC_BREAK to force TB exit when 2684 2823 * taking the wrong conditional branhc. 2685 */ 2686 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2687 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2688 { 2689 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2690 2691 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 2692 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 2693 pVCpu->cpum.GstCtx.rip = uNewRip; 2824 * 2825 * @note This is also used by 16-bit code in pre-386 mode, as the code is 2826 * identical. 2827 */ 2828 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2829 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT 2830 { 2831 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2832 2833 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr; 2834 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2835 pVCpu->cpum.GstCtx.rip = uNewIp; 2694 2836 else 2695 2837 return iemRaiseGeneralProtectionFault0(pVCpu); … … 2698 2840 iemOpcodeFlushLight(pVCpu, cbInstr); 2699 2841 #endif 2700 2701 /* 2702 * Clear RF and finish the instruction (maybe raise #DB). 2703 */ 2704 return iemRegFinishClearingRF(pVCpu, rcNormal); 2705 } 2706 2707 2708 /** 2709 * Adds a 32-bit signed jump offset to RIP from 64-bit code. 2842 return iemRegFinishNoFlags(pVCpu, rcNormal); 2843 } 2844 2845 2846 /** 2847 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code, no checking or 2848 * clearing of flags. 2710 2849 * 2711 2850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2712 2851 * segment limit. 2713 *2714 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the2715 * only alternative for relative jumps in 32-bit code and that is already2716 * handled in the decoder stage.2717 2852 * 2718 2853 * @returns Strict VBox status code. … … 2723 2858 * VINF_IEM_REEXEC_BREAK to force TB exit when 2724 2859 * taking the wrong conditional branhc. 2725 */ 2726 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2727 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2860 * 2861 * @note This is also used by 16-bit code in pre-386 mode, as the code is 2862 * identical. 2863 */ 2864 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2865 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT 2728 2866 { 2729 2867 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2730 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 2731 2732 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr; 2733 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2734 pVCpu->cpum.GstCtx.rip = uNewEip; 2735 else 2736 return iemRaiseGeneralProtectionFault0(pVCpu); 2868 2869 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr; 2870 pVCpu->cpum.GstCtx.rip = uNewIp; 2737 2871 2738 2872 #ifndef IEM_WITH_CODE_TLB 2739 2873 iemOpcodeFlushLight(pVCpu, cbInstr); 2740 2874 #endif 2741 2742 /* 2743 * Clear RF and finish the instruction (maybe raise #DB). 2744 */ 2745 return iemRegFinishClearingRF(pVCpu, rcNormal); 2746 } 2747 2748 2749 /** 2750 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or 2751 * clearing of flags. 2875 return iemRegFinishNoFlags(pVCpu, rcNormal); 2876 } 2877 2878 2879 /** 2880 * Adds a 32-bit signed jump offset to RIP from 64-bit code. 2752 2881 * 2753 2882 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code … … 2766 2895 * taking the wrong conditional branhc. 2767 2896 */ 2768 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinish NoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,2769 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT2897 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2898 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2770 2899 { 2771 2900 Assert(IEM_IS_64BIT_CODE(pVCpu)); … … 2780 2909 iemOpcodeFlushLight(pVCpu, cbInstr); 2781 2910 #endif 2782 return iemRegFinishNoFlags(pVCpu, rcNormal); 2783 } 2784 2785 2786 /** 2787 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or 2788 * clearing of flags. 2911 2912 /* 2913 * Clear RF and finish the instruction (maybe raise #DB). 2914 */ 2915 return iemRegFinishClearingRF(pVCpu, rcNormal); 2916 } 2917 2918 2919 /** 2920 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is 2921 * sure the target is in the same page. 2789 2922 * 2790 2923 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2791 2924 * segment limit. 2792 2925 * 2793 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the2794 * only alternative for relative jumps in 32-bit code and that is already2926 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the 2927 * only alternative for relative jumps in 64-bit code and that is already 2795 2928 * handled in the decoder stage. 2796 2929 * … … 2803 2936 * taking the wrong conditional branhc. 2804 2937 */ 2805 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 2806 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2938 DECL_FORCE_INLINE(VBOXSTRICTRC) 2939 iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2940 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2941 { 2942 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2943 2944 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 2945 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT)); 2946 pVCpu->cpum.GstCtx.rip = uNewRip; 2947 2948 #ifndef IEM_WITH_CODE_TLB 2949 iemOpcodeFlushLight(pVCpu, cbInstr); 2950 #endif 2951 2952 /* 2953 * Clear RF and finish the instruction (maybe raise #DB). 2954 */ 2955 return iemRegFinishClearingRF(pVCpu, rcNormal); 2956 } 2957 2958 2959 /** 2960 * Adds a 32-bit signed jump offset to RIP from 64-bit code. 2961 * 2962 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2963 * segment limit. 2964 * 2965 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the 2966 * only alternative for relative jumps in 32-bit code and that is already 2967 * handled in the decoder stage. 2968 * 2969 * @returns Strict VBox status code. 2970 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2971 * @param cbInstr Instruction size. 2972 * @param offNextInstr The offset of the next instruction. 2973 * @param rcNormal VINF_SUCCESS to continue TB. 2974 * VINF_IEM_REEXEC_BREAK to force TB exit when 2975 * taking the wrong conditional branhc. 2976 */ 2977 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 2978 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 2807 2979 { 2808 2980 Assert(!IEM_IS_64BIT_CODE(pVCpu)); … … 2814 2986 else 2815 2987 return iemRaiseGeneralProtectionFault0(pVCpu); 2988 2989 #ifndef IEM_WITH_CODE_TLB 2990 iemOpcodeFlushLight(pVCpu, cbInstr); 2991 #endif 2992 2993 /* 2994 * Clear RF and finish the instruction (maybe raise #DB). 2995 */ 2996 return iemRegFinishClearingRF(pVCpu, rcNormal); 2997 } 2998 2999 3000 /** 3001 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code. 3002 * 3003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 3004 * segment limit. 3005 * 3006 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the 3007 * only alternative for relative jumps in 32-bit code and that is already 3008 * handled in the decoder stage. 3009 * 3010 * @returns Strict VBox status code. 3011 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3012 * @param cbInstr Instruction size. 3013 * @param offNextInstr The offset of the next instruction. 3014 * @param rcNormal VINF_SUCCESS to continue TB. 3015 * VINF_IEM_REEXEC_BREAK to force TB exit when 3016 * taking the wrong conditional branhc. 3017 */ 3018 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, 3019 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 3020 { 3021 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 3022 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 3023 3024 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr; 3025 pVCpu->cpum.GstCtx.rip = uNewEip; 3026 3027 #ifndef IEM_WITH_CODE_TLB 3028 iemOpcodeFlushLight(pVCpu, cbInstr); 3029 #endif 3030 3031 /* 3032 * Clear RF and finish the instruction (maybe raise #DB). 3033 */ 3034 return iemRegFinishClearingRF(pVCpu, rcNormal); 3035 } 3036 3037 3038 3039 /** 3040 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or 3041 * clearing of flags. 3042 * 3043 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 3044 * segment limit. 3045 * 3046 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the 3047 * only alternative for relative jumps in 64-bit code and that is already 3048 * handled in the decoder stage. 3049 * 3050 * @returns Strict VBox status code. 3051 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3052 * @param cbInstr Instruction size. 3053 * @param offNextInstr The offset of the next instruction. 3054 * @param rcNormal VINF_SUCCESS to continue TB. 3055 * VINF_IEM_REEXEC_BREAK to force TB exit when 3056 * taking the wrong conditional branhc. 3057 */ 3058 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 3059 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 3060 { 3061 Assert(IEM_IS_64BIT_CODE(pVCpu)); 3062 3063 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 3064 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 3065 pVCpu->cpum.GstCtx.rip = uNewRip; 3066 else 3067 return iemRaiseGeneralProtectionFault0(pVCpu); 3068 3069 #ifndef IEM_WITH_CODE_TLB 3070 iemOpcodeFlushLight(pVCpu, cbInstr); 3071 #endif 3072 return iemRegFinishNoFlags(pVCpu, rcNormal); 3073 } 3074 3075 3076 /** 3077 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is 3078 * sure it stays within the same page, no checking or clearing of flags. 3079 * 3080 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 3081 * segment limit. 3082 * 3083 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the 3084 * only alternative for relative jumps in 64-bit code and that is already 3085 * handled in the decoder stage. 3086 * 3087 * @returns Strict VBox status code. 3088 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3089 * @param cbInstr Instruction size. 3090 * @param offNextInstr The offset of the next instruction. 3091 * @param rcNormal VINF_SUCCESS to continue TB. 3092 * VINF_IEM_REEXEC_BREAK to force TB exit when 3093 * taking the wrong conditional branhc. 3094 */ 3095 DECL_FORCE_INLINE(VBOXSTRICTRC) 3096 iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 3097 { 3098 Assert(IEM_IS_64BIT_CODE(pVCpu)); 3099 3100 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr; 3101 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT)); 3102 pVCpu->cpum.GstCtx.rip = uNewRip; 3103 3104 #ifndef IEM_WITH_CODE_TLB 3105 iemOpcodeFlushLight(pVCpu, cbInstr); 3106 #endif 3107 return iemRegFinishNoFlags(pVCpu, rcNormal); 3108 } 3109 3110 3111 /** 3112 * Adds a 32-bit signed jump offset to RIP from 32-bit code, no checking or 3113 * clearing of flags. 3114 * 3115 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 3116 * segment limit. 3117 * 3118 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the 3119 * only alternative for relative jumps in 32-bit code and that is already 3120 * handled in the decoder stage. 3121 * 3122 * @returns Strict VBox status code. 3123 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3124 * @param cbInstr Instruction size. 3125 * @param offNextInstr The offset of the next instruction. 3126 * @param rcNormal VINF_SUCCESS to continue TB. 3127 * VINF_IEM_REEXEC_BREAK to force TB exit when 3128 * taking the wrong conditional branhc. 3129 */ 3130 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 3131 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 3132 { 3133 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 3134 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 3135 3136 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr; 3137 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 3138 pVCpu->cpum.GstCtx.rip = uNewEip; 3139 else 3140 return iemRaiseGeneralProtectionFault0(pVCpu); 3141 3142 #ifndef IEM_WITH_CODE_TLB 3143 iemOpcodeFlushLight(pVCpu, cbInstr); 3144 #endif 3145 return iemRegFinishNoFlags(pVCpu, rcNormal); 3146 } 3147 3148 3149 /** 3150 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code, no checking or 3151 * clearing of flags. 3152 * 3153 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 3154 * segment limit. 3155 * 3156 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the 3157 * only alternative for relative jumps in 32-bit code and that is already 3158 * handled in the decoder stage. 3159 * 3160 * @returns Strict VBox status code. 3161 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3162 * @param cbInstr Instruction size. 3163 * @param offNextInstr The offset of the next instruction. 3164 * @param rcNormal VINF_SUCCESS to continue TB. 3165 * VINF_IEM_REEXEC_BREAK to force TB exit when 3166 * taking the wrong conditional branhc. 3167 */ 3168 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, 3169 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT 3170 { 3171 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 3172 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 3173 3174 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr; 3175 pVCpu->cpum.GstCtx.rip = uNewEip; 2816 3176 2817 3177 #ifndef IEM_WITH_CODE_TLB -
trunk/src/VBox/VMM/include/IEMOpHlp.h
r105295 r105768 751 751 } while (0) 752 752 753 /** 754 * Used the threaded code generator to check if a jump stays within the same 755 * page in 64-bit code. 756 */ 757 #define IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(a_offDisp) \ 758 ( ((pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (a_offDisp)) >> GUEST_PAGE_SHIFT) \ 759 == (pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT)) 760 753 761 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT; 754 762 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT;
Note:
See TracChangeset
for help on using the changeset viewer.

