VirtualBox

Changeset 105768 in vbox


Ignore:
Timestamp:
Aug 21, 2024 2:01:05 PM (5 weeks ago)
Author:
vboxsync
Message:

VMM/IEM: Eliminated an unnecessary CS.LIM check in IEM_MC_REL_JMP_XXX for FLAT 32-bit mode together with a unnecessary canonical target RIP check for 64-bit mode jumps within the same page (todo 5). bugref:10720

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp

    r105652 r105768  
    243243
    244244
    245 #define IEM_LIVENESS_PC_NO_FLAGS()          NOP()
    246 #define IEM_LIVENESS_PC_WITH_FLAGS()        IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
    247 #define IEM_LIVENESS_PC16_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
    248 #define IEM_LIVENESS_PC32_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
    249 #define IEM_LIVENESS_PC64_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL()
    250 #define IEM_LIVENESS_PC16_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
    251 #define IEM_LIVENESS_PC32_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
    252 #define IEM_LIVENESS_PC64_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     245#define IEM_LIVENESS_PC_NO_FLAGS()                  NOP()
     246#define IEM_LIVENESS_PC_WITH_FLAGS()                IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     247#define IEM_LIVENESS_PC16_JMP_NO_FLAGS()            IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     248#define IEM_LIVENESS_PC32_JMP_NO_FLAGS()            IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     249#define IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS()       IEM_LIVENESS_MARK_XCPT_OR_CALL()
     250#define IEM_LIVENESS_PC64_JMP_NO_FLAGS()            IEM_LIVENESS_MARK_XCPT_OR_CALL()
     251#define IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS()    IEM_LIVENESS_MARK_XCPT_OR_CALL() /* Typically ends TB. */
     252#define IEM_LIVENESS_PC16_JMP_WITH_FLAGS()          IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     253#define IEM_LIVENESS_PC32_JMP_WITH_FLAGS()          IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther); IEM_LIVENESS_SEG_LIMIT_INPUT(X86_SREG_CS)
     254#define IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS()     IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     255#define IEM_LIVENESS_PC64_JMP_WITH_FLAGS()          IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
     256#define IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS()  IEM_LIVENESS_MARK_XCPT_OR_CALL(); IEM_LIVENESS_ONE_EFLAG_INPUT(fEflOther)
    253257
    254258#ifndef IEMLIVENESS_EXTENDED_LAYOUT
     
    308312#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal)   IEM_LIVENESS_PC_WITH_FLAGS()
    309313
    310 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal)                             IEM_LIVENESS_PC16_JMP_NO_FLAGS()
    311 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC32_JMP_NO_FLAGS()
    312 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    313 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal)                  IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
    314 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
    315 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    316 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC16_JMP_NO_FLAGS()
    317 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC32_JMP_NO_FLAGS()
    318 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    319 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
    320 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
    321 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    322 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC16_JMP_NO_FLAGS()
    323 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC32_JMP_NO_FLAGS()
    324 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    325 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
    326 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
    327 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    328 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP)                                             IEM_LIVENESS_PC16_JMP_NO_FLAGS()
    329 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP)                                             IEM_LIVENESS_PC32_JMP_NO_FLAGS()
    330 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP)                                             IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    331 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
    332 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
    333 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP)                                  IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    334 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP)                                            IEM_LIVENESS_PC32_JMP_NO_FLAGS()
    335 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    336 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
    337 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    338 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                            IEM_LIVENESS_PC64_JMP_NO_FLAGS()
    339 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                 IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     314#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal)                                     IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     315#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)                     IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     316#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)                IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS()
     317#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)                     IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     318#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)             IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS()
     319#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal)                          IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     320#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)          IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     321#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)     IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS()
     322#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)          IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     323#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal)  IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS()
     324#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     325#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     326#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal)                              IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS()
     327#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     328#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS()
     329#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     330#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     331#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                   IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS()
     332#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     333#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS()
     334#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     335#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     336#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal)                              IEM_LIVENESS_PC32_FLAT_JMP_NO_FLAGS()
     337#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal)                                   IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     338#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal)                           IEM_LIVENESS_PC64_INTRAPG_JMP_NO_FLAGS()
     339#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     340#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     341#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                   IEM_LIVENESS_PC32_FLAT_JMP_WITH_FLAGS()
     342#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                        IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     343#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal)                IEM_LIVENESS_PC64_INTRAPG_JMP_WITH_FLAGS()
     344#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP)                                                     IEM_LIVENESS_PC16_JMP_NO_FLAGS()
     345#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP)                                                     IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     346#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP)                                                     IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     347#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP)                                          IEM_LIVENESS_PC16_JMP_WITH_FLAGS()
     348#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP)                                          IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     349#define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP)                                          IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     350#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP)                                                    IEM_LIVENESS_PC32_JMP_NO_FLAGS()
     351#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                                    IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     352#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP)                                         IEM_LIVENESS_PC32_JMP_WITH_FLAGS()
     353#define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                         IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
     354#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP)                                                    IEM_LIVENESS_PC64_JMP_NO_FLAGS()
     355#define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP)                                         IEM_LIVENESS_PC64_JMP_WITH_FLAGS()
    340356
    341357#define IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr)                      do { IEM_LIVENESS_PC16_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r105652 r105768  
    6565    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16':                        (None, True,  True,  True,  ),
    6666    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32':                        (None, True,  True,  True,  ),
     67    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT':                   (None, True,  True,  True,  ),
    6768    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64':                        (None, True,  True,  True,  ),
     69    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG':                (None, True,  True,  True,  ),
    6870    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16':                       (None, True,  True,  True,  ),
    6971    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32':                       (None, True,  True,  True,  ),
     72    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT':                  (None, True,  True,  True,  ),
    7073    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64':                       (None, True,  True,  True,  ),
     74    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG':               (None, True,  True,  True,  ),
    7175    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32':                       (None, True,  True,  True,  ),
     76    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT':                  (None, True,  True,  True,  ),
    7277    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64':                       (None, True,  True,  True,  ),
     78    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG':               (None, True,  True,  True,  ),
    7379
    7480    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS':             (None, True,  True,  True,  ),
    7581    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS':             (None, True,  True,  True,  ),
     82    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS':        (None, True,  True,  True,  ),
    7683    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS':             (None, True,  True,  True,  ),
     84    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS':     (None, True,  True,  True,  ),
    7785    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS':            (None, True,  True,  True,  ),
    7886    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS':            (None, True,  True,  True,  ),
     87    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS':       (None, True,  True,  True,  ),
    7988    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS':            (None, True,  True,  True,  ),
     89    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS':    (None, True,  True,  True,  ),
    8090    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS':            (None, True,  True,  True,  ),
     91    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS':       (None, True,  True,  True,  ),
    8192    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS':            (None, True,  True,  True,  ),
     93    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS':    (None, True,  True,  True,  ),
    8294
    8395    'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC16':                      (None, True,  True,  True,  ),
     96    'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32':                      (None, True,  True,  True,  ),
    8497    'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC32':                      (None, True,  True,  True,  ),
    8598    'IEM_MC_REL_CALL_S16_AND_FINISH_THREADED_PC64':                      (None, True,  True,  True,  ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r105739 r105768  
    610610
    611611#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
    612     off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
    613                                                             (a_enmEffOpSize), pCallEntry->idxInstr); \
     612    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     613                                                                   (a_enmEffOpSize), pCallEntry->idxInstr); \
    614614    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
    615615
    616616#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
    617     off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
    618                                                             (a_enmEffOpSize), pCallEntry->idxInstr); \
     617    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     618                                                                   (a_enmEffOpSize), pCallEntry->idxInstr); \
    619619    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    620620    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
    621621
    622622#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
    623     off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
    624                                                             IEMMODE_16BIT, pCallEntry->idxInstr); \
     623    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     624                                                                   IEMMODE_16BIT, pCallEntry->idxInstr); \
    625625    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
    626626
    627627#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
    628         off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
    629                                                                 IEMMODE_16BIT, pCallEntry->idxInstr); \
     628        off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     629                                                                       IEMMODE_16BIT, pCallEntry->idxInstr); \
    630630    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    631631    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
    632632
    633633#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
    634     off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
    635                                                             IEMMODE_64BIT, pCallEntry->idxInstr); \
     634    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \
     635                                                                   IEMMODE_64BIT, pCallEntry->idxInstr); \
    636636    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
    637637
    638638#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
    639     off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
    640                                                             IEMMODE_64BIT, pCallEntry->idxInstr); \
     639    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \
     640                                                                   IEMMODE_64BIT, pCallEntry->idxInstr); \
     641    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     642    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     643
     644
     645#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     646    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     647                                                                  (a_enmEffOpSize), pCallEntry->idxInstr); \
     648    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
     649
     650#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     651    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     652                                                                  (a_enmEffOpSize), pCallEntry->idxInstr); \
     653    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     654    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
     655
     656#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \
     657    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     658                                                                  IEMMODE_16BIT, pCallEntry->idxInstr); \
     659    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
     660
     661#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
     662    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     663                                                                  IEMMODE_16BIT, pCallEntry->idxInstr); \
     664    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     665    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
     666
     667#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \
     668    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \
     669                                                                  IEMMODE_64BIT, pCallEntry->idxInstr); \
     670    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     671
     672#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
     673    off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \
     674                                                                  IEMMODE_64BIT, pCallEntry->idxInstr); \
    641675    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    642676    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     
    645679 *  iemRegRip64RelativeJumpS16AndFinishNoFlags and
    646680 *  iemRegRip64RelativeJumpS32AndFinishNoFlags. */
     681template<bool const a_fWithinPage>
    647682DECL_INLINE_THROW(uint32_t)
    648683iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr,
     
    668703    if (RT_LIKELY(enmEffOpSize == IEMMODE_64BIT))
    669704    {
    670         /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
    671         off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     705        /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't.
     706           We can skip this if the target is within the same page. */
     707        if (!a_fWithinPage)
     708            off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
    672709    }
    673710    else
     
    687724
    688725#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
    689     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
    690                                                             (a_enmEffOpSize), pCallEntry->idxInstr); \
     726    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     727                                                                   (a_enmEffOpSize), pCallEntry->idxInstr); \
    691728    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
    692729
    693730#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
    694     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
    695                                                             (a_enmEffOpSize), pCallEntry->idxInstr); \
     731    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     732                                                                   (a_enmEffOpSize), pCallEntry->idxInstr); \
    696733    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    697734    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
    698735
    699736#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \
    700     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
    701                                                             IEMMODE_16BIT, pCallEntry->idxInstr); \
     737    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     738                                                                   IEMMODE_16BIT, pCallEntry->idxInstr); \
    702739    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
    703740
    704741#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
    705     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
    706                                                             IEMMODE_16BIT, pCallEntry->idxInstr); \
     742    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     743                                                                   IEMMODE_16BIT, pCallEntry->idxInstr); \
    707744    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    708745    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
    709746
    710747#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \
    711     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
    712                                                             IEMMODE_32BIT, pCallEntry->idxInstr); \
     748    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \
     749                                                                   IEMMODE_32BIT, pCallEntry->idxInstr); \
    713750    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
    714751
    715752#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
    716     off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
    717                                                             IEMMODE_32BIT, pCallEntry->idxInstr); \
     753    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<false>(pReNative, off, (a_cbInstr), (a_i32), \
     754                                                                   IEMMODE_32BIT, pCallEntry->idxInstr); \
     755    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     756    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     757
     758
     759#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     760    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     761                                                                  (a_enmEffOpSize), pCallEntry->idxInstr); \
     762    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
     763
     764#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     765    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
     766                                                                  (a_enmEffOpSize), pCallEntry->idxInstr); \
     767    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     768    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int8_t)(a_i8))
     769
     770#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \
     771    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     772                                                                  IEMMODE_16BIT, pCallEntry->idxInstr); \
     773    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
     774
     775#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
     776    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
     777                                                                  IEMMODE_16BIT, pCallEntry->idxInstr); \
     778    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     779    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (int16_t)(a_i16))
     780
     781#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \
     782    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \
     783                                                                  IEMMODE_32BIT, pCallEntry->idxInstr); \
     784    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     785
     786#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
     787    off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags<true>(pReNative, off, (a_cbInstr), (a_i32), \
     788                                                                  IEMMODE_32BIT, pCallEntry->idxInstr); \
    718789    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    719790    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, true /*a_fIsJump*/>(pReNative, off, pCallEntry, (a_i32))
     
    722793 *  iemRegEip32RelativeJumpS16AndFinishNoFlags and
    723794 *  iemRegEip32RelativeJumpS32AndFinishNoFlags. */
     795template<bool const a_fFlat>
    724796DECL_INLINE_THROW(uint32_t)
    725797iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr,
     
    748820
    749821    /* Perform limit checking, potentially raising #GP(0) and exit the TB. */
    750 /** @todo we can skip this in 32-bit FLAT mode. */
    751     off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     822    if (!a_fFlat)
     823        off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
    752824
    753825    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp

    r104419 r105768  
    126126
    127127/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
     128 * size as extra parameters, for use in flat 32-bit code on 386 and later
     129 * CPUs. */
     130#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     131    return iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
     132
     133/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
    128134 * size as extra parameters, for use in 64-bit code. */
    129135#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
    130136    return iemRegRip64RelativeJumpS8AndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
     137
     138/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
     139 * size as extra parameters, for use in 64-bit code jumping within a page. */
     140#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     141    return iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
    131142
    132143
     
    144155
    145156/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
     157 * size as extra parameters, for use in flat 32-bit code on 386 and later
     158 * CPUs and we need to check and clear flags. */
     159#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     160    return iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
     161
     162/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
    146163 * size as extra parameters, for use in 64-bit code and we need to check and
    147164 * clear flags. */
     
    149166    return iemRegRip64RelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
    150167
     168/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length and operand
     169 * size as extra parameters, for use in 64-bit code jumping within a page and we
     170 * need to check and clear flags. */
     171#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
     172    return iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize, a_rcNormal)
     173
    151174#undef  IEM_MC_REL_JMP_S8_AND_FINISH
    152175
     
    163186
    164187/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
     188 *  param, for use in flat 32-bit code on 386 and later CPUs. */
     189#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT(a_i16, a_cbInstr, a_rcNormal) \
     190    return iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
     191
     192/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
    165193 *  param, for use in 64-bit code. */
    166194#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
     195    return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
     196
     197/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
     198 *  param, for use in 64-bit code jumping with a page.
     199 * @note No special function for this, there is nothing to save here.  */
     200#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG(a_i16, a_cbInstr, a_rcNormal) \
    167201    return iemRegRip64RelativeJumpS16AndFinishNoFlags(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
    168202
     
    181215
    182216/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
     217 *  param, for use in flat 32-bit code on 386 and later CPUs and we need
     218 *  to check and clear flags. */
     219#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
     220    return iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
     221
     222/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
    183223 *  param, for use in 64-bit code and we need to check and clear flags. */
    184224#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
     225    return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
     226
     227/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as
     228 *  param, for use in 64-bit code jumping within a page and we need to check and
     229 *  clear flags.
     230 * @note No special function for this, there is nothing to save here.  */
     231#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
    185232    return iemRegRip64RelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16), a_rcNormal)
    186233
     
    200247
    201248/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
     249 *  an extra parameter, for use in flat 32-bit code on 386+. */
     250#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT(a_i32, a_cbInstr, a_rcNormal) \
     251    return iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
     252
     253/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
    202254 *  an extra parameter, for use in 64-bit code. */
    203255#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
    204256    return iemRegRip64RelativeJumpS32AndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
     257
     258/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
     259 *  an extra parameter, for use in 64-bit code jumping within a page. */
     260#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG(a_i32, a_cbInstr, a_rcNormal) \
     261    return iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
    205262
    206263
     
    218275
    219276/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
     277 *  an extra parameter, for use in flat 32-bit code on 386+ and we need
     278 *  to check and clear flags. */
     279#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_FLAT_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
     280    return iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
     281
     282/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
    220283 *  an extra parameter, for use in 64-bit code and we need to check and clear
    221284 *  flags. */
    222285#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
    223286    return iemRegRip64RelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
     287
     288/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as
     289 *  an extra parameter, for use in 64-bit code jumping within a page and we need
     290 *  to check and clear flags. */
     291#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_INTRAPG_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
     292    return iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_rcNormal)
    224293
    225294#undef  IEM_MC_REL_JMP_S32_AND_FINISH
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r105673 r105768  
    199199    ksVariation_32_NoJmp          = '_32_NoJmp';         ##< 32-bit mode code (386+), conditional jump not taken.
    200200    ksVariation_32f_NoJmp         = '_32f_NoJmp';        ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
    201     ksVariation_32_Flat           = '_32_Flat';          ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
    202     ksVariation_32f_Flat          = '_32f_Flat';         ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
     201    ksVariation_32_Flat_Jmp       = '_32_Flat_Jmp';      ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump taken.
     202    ksVariation_32f_Flat_Jmp      = '_32f_Flat_Jmp';     ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump taken.
     203    ksVariation_32_Flat_NoJmp     = '_32_Flat_NoJmp';    ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump not taken.
     204    ksVariation_32f_Flat_NoJmp    = '_32f_Flat_NoJmp';   ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump not taken.
     205    ksVariation_32_Flat           = '_32_Flat';          ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide.
     206    ksVariation_32f_Flat          = '_32f_Flat';         ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide, eflags.
    203207    ksVariation_32_Addr16         = '_32_Addr16';        ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
    204208    ksVariation_32f_Addr16        = '_32f_Addr16';       ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
     
    208212    ksVariation_64f_Jmp           = '_64f_Jmp';          ##< 64-bit mode code, check+clear eflags, conditional jump taken.
    209213    ksVariation_64_NoJmp          = '_64_NoJmp';         ##< 64-bit mode code, conditional jump not taken.
    210     ksVariation_64f_NoJmp         = '_64f_NoJmp';        ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
     214    ksVariation_64f_NoJmp         = '_64f_NoJmp';        ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken.
     215    ksVariation_64_SamePg_Jmp     = '_64_SamePg_Jmp';    ##< 64-bit mode code, conditional jump within page taken.
     216    ksVariation_64f_SamePg_Jmp    = '_64f_SamePg_Jmp';   ##< 64-bit mode code, check+clear eflags, conditional jump taken.
     217    ksVariation_64_SamePg_NoJmp   = '_64_SamePg_NoJmp';  ##< 64-bit mode code, conditional jump within page not taken.
     218    ksVariation_64f_SamePg_NoJmp  = '_64f_SamePg_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken.
    211219    ksVariation_64_FsGs           = '_64_FsGs';          ##< 64-bit mode code, with memory accesses via FS or GS.
    212220    ksVariation_64f_FsGs          = '_64f_FsGs';         ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
     
    236244        ksVariation_32_NoJmp,
    237245        ksVariation_32f_NoJmp,
     246        ksVariation_32_Flat_Jmp,
     247        ksVariation_32f_Flat_Jmp,
     248        ksVariation_32_Flat_NoJmp,
     249        ksVariation_32f_Flat_NoJmp,
    238250        ksVariation_32_Flat,
    239251        ksVariation_32f_Flat,
     
    246258        ksVariation_64_NoJmp,
    247259        ksVariation_64f_NoJmp,
     260        ksVariation_64_SamePg_Jmp,
     261        ksVariation_64f_SamePg_Jmp,
     262        ksVariation_64_SamePg_NoJmp,
     263        ksVariation_64f_SamePg_NoJmp,
    248264        ksVariation_64_FsGs,
    249265        ksVariation_64f_FsGs,
     
    369385        ksVariation_64_Jmp,
    370386        ksVariation_64f_Jmp,
     387        ksVariation_64_SamePg_Jmp,
     388        ksVariation_64f_SamePg_Jmp,
    371389        ksVariation_64_NoJmp,
    372390        ksVariation_64f_NoJmp,
     391        ksVariation_64_SamePg_NoJmp,
     392        ksVariation_64f_SamePg_NoJmp,
    373393        ksVariation_64_FsGs,
    374394        ksVariation_64f_FsGs,
    375395        ksVariation_32_Flat,
    376396        ksVariation_32f_Flat,
     397        ksVariation_32_Flat_Jmp,
     398        ksVariation_32f_Flat_Jmp,
     399        ksVariation_32_Flat_NoJmp,
     400        ksVariation_32f_Flat_NoJmp,
    377401        ksVariation_32,
    378402        ksVariation_32f,
     
    422446        ksVariation_32_NoJmp:         '32-bit w/ conditional jump not taken',
    423447        ksVariation_32f_NoJmp:        '32-bit w/ eflag checking and clearing and conditional jump not taken',
     448        ksVariation_32_Flat_Jmp:      '32-bit flat+wide CS, ++ w/ conditional jump taken',
     449        ksVariation_32f_Flat_Jmp:     '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump taken',
     450        ksVariation_32_Flat_NoJmp:    '32-bit flat+wide CS, ++ w/ conditional jump not taken',
     451        ksVariation_32f_Flat_NoJmp:   '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump not taken',
    424452        ksVariation_32_Flat:          '32-bit flat and wide open CS, SS, DS and ES',
    425453        ksVariation_32f_Flat:         '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
     
    429457        ksVariation_64f:              '64-bit w/ eflag checking and clearing',
    430458        ksVariation_64_Jmp:           '64-bit w/ conditional jump taken',
    431         ksVariation_64f_Jmp:          '64-bit w/ eflag checking and clearing and  conditional jump taken',
     459        ksVariation_64f_Jmp:          '64-bit w/ eflag checking and clearing and conditional jump taken',
    432460        ksVariation_64_NoJmp:         '64-bit w/ conditional jump not taken',
    433         ksVariation_64f_NoJmp:        '64-bit w/ eflag checking and clearing and  conditional jump not taken',
     461        ksVariation_64f_NoJmp:        '64-bit w/ eflag checking and clearing and conditional jump not taken',
     462        ksVariation_64_SamePg_Jmp:    '64-bit w/ conditional jump within page taken',
     463        ksVariation_64f_SamePg_Jmp:   '64-bit w/ eflag checking and clearing and conditional jumpwithin page taken',
     464        ksVariation_64_SamePg_NoJmp:  '64-bit w/ conditional jump within page not taken',
     465        ksVariation_64f_SamePg_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump within page not taken',
    434466        ksVariation_64_FsGs:          '64-bit with memory accessed via FS or GS',
    435467        ksVariation_64f_FsGs:         '64-bit with memory accessed via FS or GS and eflag checking and clearing',
     
    449481        ksVariation_32f_NoJmp: True,
    450482        ksVariation_32f_Flat: True,
     483        ksVariation_32f_Flat_Jmp: True,
     484        ksVariation_32f_Flat_NoJmp: True,
    451485        ksVariation_32f_Addr16: True,
    452486        ksVariation_64f: True,
    453487        ksVariation_64f_Jmp: True,
    454488        ksVariation_64f_NoJmp: True,
     489        ksVariation_64f_SamePg_Jmp: True,
     490        ksVariation_64f_SamePg_NoJmp: True,
    455491        ksVariation_64f_FsGs: True,
    456492        ksVariation_64f_Addr32: True,
    457493    };
    458494    kdVariationsOnly64NoFlags = {
    459         ksVariation_64:        True,
    460         ksVariation_64_Jmp:    True,
    461         ksVariation_64_NoJmp:  True,
    462         ksVariation_64_FsGs:   True,
    463         ksVariation_64_Addr32: True,
     495        ksVariation_64:                 True,
     496        ksVariation_64_Jmp:             True,
     497        ksVariation_64_NoJmp:           True,
     498        ksVariation_64_SamePg_Jmp:      True,
     499        ksVariation_64_SamePg_NoJmp:    True,
     500        ksVariation_64_FsGs:            True,
     501        ksVariation_64_Addr32:          True,
    464502    };
    465503    kdVariationsOnly64WithFlags = {
    466         ksVariation_64f:        True,
    467         ksVariation_64f_Jmp:    True,
    468         ksVariation_64f_NoJmp:  True,
    469         ksVariation_64f_FsGs:   True,
    470         ksVariation_64f_Addr32: True,
     504        ksVariation_64f:                True,
     505        ksVariation_64f_Jmp:            True,
     506        ksVariation_64f_NoJmp:          True,
     507        ksVariation_64f_SamePg_Jmp:     True,
     508        ksVariation_64f_SamePg_NoJmp:   True,
     509        ksVariation_64f_FsGs:           True,
     510        ksVariation_64f_Addr32:         True,
    471511    };
    472512    kdVariationsOnlyPre386NoFlags = {
     
    537577        ksVariation_32_Jmp:             True,
    538578        ksVariation_32_NoJmp:           True,
     579        ksVariation_32_Flat_Jmp:        True,
     580        ksVariation_32_Flat_NoJmp:      True,
    539581        ksVariation_64_Jmp:             True,
    540582        ksVariation_64_NoJmp:           True,
     583        ksVariation_64_SamePg_Jmp:      True,
     584        ksVariation_64_SamePg_NoJmp:    True,
    541585        ksVariation_16f_Jmp:            True,
    542586        ksVariation_16f_NoJmp:          True,
     
    545589        ksVariation_32f_Jmp:            True,
    546590        ksVariation_32f_NoJmp:          True,
     591        ksVariation_32f_Flat_Jmp:       True,
     592        ksVariation_32f_Flat_NoJmp:     True,
    547593        ksVariation_64f_Jmp:            True,
    548594        ksVariation_64f_NoJmp:          True,
     595        ksVariation_64f_SamePg_Jmp:     True,
     596        ksVariation_64f_SamePg_NoJmp:   True,
    549597    };
    550598    kdVariationsWithConditionalNoJmp = {
     
    552600        ksVariation_16_Pre386_NoJmp:    True,
    553601        ksVariation_32_NoJmp:           True,
     602        ksVariation_32_Flat_NoJmp:      True,
    554603        ksVariation_64_NoJmp:           True,
     604        ksVariation_64_SamePg_NoJmp:    True,
    555605        ksVariation_16f_NoJmp:          True,
    556606        ksVariation_16f_Pre386_NoJmp:   True,
    557607        ksVariation_32f_NoJmp:          True,
     608        ksVariation_32f_Flat_NoJmp:     True,
    558609        ksVariation_64f_NoJmp:          True,
     610        ksVariation_64f_SamePg_NoJmp:   True,
     611    };
     612    kdVariationsWithFlat32Conditional = {
     613        ksVariation_32_Flat_Jmp:        True,
     614        ksVariation_32_Flat_NoJmp:      True,
     615        ksVariation_32f_Flat_Jmp:       True,
     616        ksVariation_32f_Flat_NoJmp:     True,
     617    };
     618    kdVariationsWithSamePgConditional = {
     619        ksVariation_64_SamePg_Jmp:      True,
     620        ksVariation_64_SamePg_NoJmp:    True,
     621        ksVariation_64f_SamePg_Jmp:     True,
     622        ksVariation_64f_SamePg_NoJmp:   True,
    559623    };
    560624    kdVariationsOnlyPre386 = {
     
    9671031    };
    9681032
     1033    kdRelJmpMcWithFlatOrSamePageVariations = {
     1034        'IEM_MC_REL_JMP_S8_AND_FINISH':  True,
     1035        'IEM_MC_REL_JMP_S16_AND_FINISH': True,
     1036        'IEM_MC_REL_JMP_S32_AND_FINISH': True,
     1037    };
     1038
    9691039    def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
    9701040        """
     
    10501120                        and self.sVariation not in self.kdVariationsOnlyPre386):
    10511121                        oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
    1052                     oNewStmt.sName += '_THREADED';
    10531122                    if   self.sVariation in self.kdVariationsOnly64NoFlags:
    1054                         oNewStmt.sName += '_PC64';
     1123                        if (  self.sVariation not in self.kdVariationsWithSamePgConditional
     1124                            or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations):
     1125                            oNewStmt.sName += '_THREADED_PC64';
     1126                        else:
     1127                            oNewStmt.sName += '_THREADED_PC64_INTRAPG';
    10551128                    elif self.sVariation in self.kdVariationsOnly64WithFlags:
    1056                         oNewStmt.sName += '_PC64_WITH_FLAGS';
     1129                        if (  self.sVariation not in self.kdVariationsWithSamePgConditional
     1130                            or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations):
     1131                            oNewStmt.sName += '_THREADED_PC64_WITH_FLAGS';
     1132                        else:
     1133                            oNewStmt.sName += '_THREADED_PC64_INTRAPG_WITH_FLAGS';
    10571134                    elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
    1058                         oNewStmt.sName += '_PC16';
     1135                        oNewStmt.sName += '_THREADED_PC16';
    10591136                    elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
    1060                         oNewStmt.sName += '_PC16_WITH_FLAGS';
    1061                     elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
    1062                         assert self.sVariation != self.ksVariation_Default;
    1063                         oNewStmt.sName += '_PC32';
     1137                        oNewStmt.sName += '_THREADED_PC16_WITH_FLAGS';
     1138                    elif oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations:
     1139                        if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
     1140                            assert self.sVariation != self.ksVariation_Default;
     1141                            oNewStmt.sName += '_THREADED_PC32';
     1142                        else:
     1143                            oNewStmt.sName += '_THREADED_PC32_WITH_FLAGS';
    10641144                    else:
    1065                         oNewStmt.sName += '_PC32_WITH_FLAGS';
     1145                        if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
     1146                            assert self.sVariation != self.ksVariation_Default;
     1147                            oNewStmt.sName += '_THREADED_PC32_FLAT';
     1148                        else:
     1149                            oNewStmt.sName += '_THREADED_PC32_FLAT_WITH_FLAGS';
    10661150
    10671151                    # This is making the wrong branch of conditionals break out of the TB.
     
    22142298                assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
    22152299
     2300                # We've got some Flat variations we need to add manually to avoid unnecessary CS.LIM checks.
     2301                if ThrdFnVar.ksVariation_32 in asVariationsBase:
     2302                    assert ThrdFnVar.ksVariation_32f in asVariationsBase;
     2303                    asVariations.extend([
     2304                        ThrdFnVar.ksVariation_32_Flat_Jmp,
     2305                        ThrdFnVar.ksVariation_32_Flat_NoJmp,
     2306                        ThrdFnVar.ksVariation_32f_Flat_Jmp,
     2307                        ThrdFnVar.ksVariation_32f_Flat_NoJmp,
     2308                    ]);
     2309
     2310                # Similarly, if there are 64-bit variants, we need the within same page variations.
     2311                # We skip this when the operand size prefix forces is used because it cuts RIP down
     2312                # to 16-bit only and the same-page assumptions are most likely wrong then.
     2313                if (    ThrdFnVar.ksVariation_64 in asVariationsBase
     2314                    and not iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_REL_JMP_S16_AND_FINISH': True })):
     2315                    assert ThrdFnVar.ksVariation_64f in asVariationsBase;
     2316                    asVariations.extend([
     2317                        ThrdFnVar.ksVariation_64_SamePg_Jmp,
     2318                        ThrdFnVar.ksVariation_64_SamePg_NoJmp,
     2319                        ThrdFnVar.ksVariation_64f_SamePg_Jmp,
     2320                        ThrdFnVar.ksVariation_64f_SamePg_NoJmp,
     2321                    ]);
     2322
    22162323        if not iai.McStmt.findStmtByNames(aoStmts,
    22172324                                          { 'IEM_MC_ADVANCE_RIP_AND_FINISH':  True,
     
    22722379        The sBranch parameter is used with conditional branches where we'll emit
    22732380        different threaded calls depending on whether we're in the jump-taken or
    2274         no-jump code path.
     2381        no-jump code path.  Values are either None, 'Jmp' or 'NoJmp'.
    22752382
    22762383        The fTbLookupTable parameter can either be False, True or whatever else
    2277         (like 2) - in the latte case this means a large lookup table.
     2384        (like 2) - in the latter case this means a large lookup table.
    22782385        """
    22792386        # Special case for only default variation:
     
    22862393        #
    22872394        dByVari = self.dVariations;
    2288         #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
     2395        fDbg = self.oMcBlock.sFunction == 'iemOp_jnl_Jv';
    22892396        class Case:
    2290             def __init__(self, sCond, sVarNm = None):
     2397            def __init__(self, sCond, sVarNm = None, sIntraPgVarNm = None, sIntraPgDispVariable = None):
    22912398                self.sCond  = sCond;
    22922399                self.sVarNm = sVarNm;
    22932400                self.oVar   = dByVari[sVarNm] if sVarNm else None;
    22942401                self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
     2402                # Some annoying complications just to skip canonical jump target checks for intrapage jumps.
     2403                self.sIntraPgDispVariable = sIntraPgDispVariable;
     2404                self.oIntraPgVar   = dByVari[sIntraPgVarNm] if sIntraPgVarNm else None;
     2405                self.aoIntraPgBody = self.oIntraPgVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sIntraPgVarNm \
     2406                                     else None;
    22952407
    22962408            def toCode(self):
    22972409                aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
    22982410                if self.aoBody:
    2299                     aoStmts.extend(self.aoBody);
    2300                     aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
     2411                    if not self.aoIntraPgBody:
     2412                        aoStmts.extend(self.aoBody);
     2413                        aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
     2414                    else:
     2415                        aoStmts.extend([
     2416                            iai.McCppCond('!IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s)' % (self.sIntraPgDispVariable,),
     2417                                          True, self.aoBody, self.aoIntraPgBody, cchIndent = 8),
     2418                            iai.McCppGeneric('break;', cchIndent = 8),
     2419                        ]);
    23012420                return aoStmts;
    23022421
     
    23042423                aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
    23052424                if self.aoBody:
    2306                     aoStmts.extend([
    2307                         iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
    2308                         iai.McCppGeneric('break;', cchIndent = 8),
    2309                     ]);
     2425                    if not self.aoIntraPgBody:
     2426                        aoStmts.extend([
     2427                            iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
     2428                            iai.McCppGeneric('break;', cchIndent = 8),
     2429                        ]);
     2430                    else:
     2431                        aoStmts.extend([
     2432                            iai.McCppGeneric('enmFunction = !IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s) ? %s : %s;'
     2433                                             % (self.sIntraPgDispVariable, self.oVar.getIndexName(),
     2434                                                self.oIntraPgVar.getIndexName(),), cchIndent = 8),
     2435                            iai.McCppGeneric('break;', cchIndent = 8),
     2436                        ]);
    23102437                return aoStmts;
    23112438
    2312             def isSame(self, oThat):
    2313                 if not self.aoBody:                 # fall thru always matches.
    2314                     return True;
    2315                 if len(self.aoBody) != len(oThat.aoBody):
    2316                     #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
     2439            @staticmethod
     2440            def isSameBody(aoThisBody, sThisIndexName, aoThatBody, sThatIndexName, sBody = ''):
     2441                if len(aoThisBody) != len(aoThatBody):
     2442                    if fDbg: print('dbg: %sbody len diff: %s vs %s' % (sBody, len(aoThisBody), len(aoThatBody),));
    23172443                    return False;
    2318                 for iStmt, oStmt in enumerate(self.aoBody):
    2319                     oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
     2444                for iStmt, oStmt in enumerate(aoThisBody):
     2445                    oThatStmt = aoThatBody[iStmt] # type: iai.McStmt
    23202446                    assert isinstance(oStmt, iai.McCppGeneric);
    23212447                    assert not isinstance(oStmt, iai.McStmtCond);
     
    23232449                        return False;
    23242450                    if oStmt.sName != oThatStmt.sName:
    2325                         #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
     2451                        if fDbg: print('dbg: %sstmt #%s name: %s vs %s' % (sBody, iStmt, oStmt.sName, oThatStmt.sName,));
    23262452                        return False;
    23272453                    if len(oStmt.asParams) != len(oThatStmt.asParams):
    2328                         #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
    2329                         #               % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
     2454                        if fDbg: print('dbg: %sstmt #%s param count: %s vs %s'
     2455                                       % (sBody, iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
    23302456                        return False;
    23312457                    for iParam, sParam in enumerate(oStmt.asParams):
    23322458                        if (    sParam != oThatStmt.asParams[iParam]
    2333                             and (   iParam != 1
     2459                            and (   iParam not in (1, 2)
    23342460                                 or not isinstance(oStmt, iai.McCppCall)
    23352461                                 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
    2336                                  or sParam != self.oVar.getIndexName()
    2337                                  or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
    2338                             #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
    2339                             #               % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
     2462                                 or sParam != sThisIndexName
     2463                                 or oThatStmt.asParams[iParam] != sThatIndexName )):
     2464                            if fDbg: print('dbg: %sstmt #%s, param #%s: %s vs %s'
     2465                                           % (sBody, iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
    23402466                            return False;
     2467                return True;
     2468
     2469            def isSame(self, oThat):
     2470                if self.aoBody:  # no body == fall thru - that always matches.
     2471                    if not self.isSameBody(self.aoBody,  self.oVar.getIndexName(),
     2472                                           oThat.aoBody, oThat.oVar.getIndexName()):
     2473                        return False;
     2474                    if self.aoIntraPgBody and not self.isSameBody(self.aoIntraPgBody,   self.oIntraPgVar.getIndexName(),
     2475                                                                  oThat.aoBody,         oThat.oVar.getIndexName(),
     2476                                                                  'intrapg/left '):
     2477                        return False;
     2478                    if oThat.aoIntraPgBody and not self.isSameBody(self.aoBody,         self.oVar.getIndexName(),
     2479                                                                   oThat.aoIntraPgBody, oThat.oIntraPgVar.getIndexName(),
     2480                                                                   'intrapg/right '):
     2481                        return False;
    23412482                return True;
    23422483
     
    23852526        elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
    23862527            assert fSimple and sBranch;
    2387             aoCases.append(Case('IEMMODE_64BIT',
    2388                                 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
    2389             if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
    2390                 aoCases.append(Case('IEMMODE_64BIT | 32',
    2391                                     ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
     2528            if ThrdFnVar.ksVariation_64_SamePg_Jmp not in dByVari:
     2529                assert ThrdFnVar.ksVariation_64f_Jmp in dByVari;
     2530                aoCases.extend([
     2531                    Case('IEMMODE_64BIT',
     2532                         ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp),
     2533                    Case('IEMMODE_64BIT | 32',
     2534                         ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp),
     2535                ]);
     2536            else:
     2537                assert ThrdFnVar.ksVariation_64f_SamePg_Jmp in dByVari;
     2538                oStmtRelJmp = iai.McStmt.findStmtByNames(self.oMcBlock.decode(),
     2539                                                         { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
     2540                                                           'IEM_MC_REL_JMP_S16_AND_FINISH': True,
     2541                                                           'IEM_MC_REL_JMP_S32_AND_FINISH': True,});
     2542                sIntraPgDispVariable = oStmtRelJmp.asParams[0];
     2543                aoCases.extend([
     2544                    Case('IEMMODE_64BIT',
     2545                         ThrdFnVar.ksVariation_64_Jmp        if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp,
     2546                         ThrdFnVar.ksVariation_64_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_SamePg_NoJmp,
     2547                         sIntraPgDispVariable),
     2548                    Case('IEMMODE_64BIT | 32',
     2549                         ThrdFnVar.ksVariation_64f_Jmp        if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp,
     2550                         ThrdFnVar.ksVariation_64f_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_SamePg_NoJmp,
     2551                         sIntraPgDispVariable),
     2552                ]);
     2553
    23922554
    23932555        if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
     
    24282590            assert fSimple and sBranch;
    24292591            aoCases.extend([
    2430                 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
     2592                Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
     2593                     ThrdFnVar.ksVariation_32_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_Flat_NoJmp),
    24312594                Case('IEMMODE_32BIT',
    24322595                     ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
     
    24342597            if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
    24352598                aoCases.extend([
    2436                     Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
     2599                    Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
     2600                         ThrdFnVar.ksVariation_32f_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_Flat_NoJmp),
    24372601                    Case('IEMMODE_32BIT                                       | 32',
    24382602                         ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
     
    24982662        for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
    24992663            fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
    2500         #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
     2664        if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
    25012665        if fAllSameCases:
    25022666            aoStmts = [
  • trunk/src/VBox/VMM/include/IEMInline.h

    r105465 r105768  
    23572357
    23582358/**
    2359  * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
    2360  * code (never 64-bit).
     2359 * Adds a 8-bit signed jump offset to RIP from 64-bit code when the caller is
     2360 * sure it stays within the same page.
    23612361 *
    23622362 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    23712371 *                              taking the wrong conditional branhc.
    23722372 */
    2373 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    2374                                                                              IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
    2375 {
    2376     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2377     Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
    2378 
    2379     uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    2380     if (enmEffOpSize == IEMMODE_16BIT)
    2381         uNewEip &= UINT16_MAX;
    2382     if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2383         pVCpu->cpum.GstCtx.rip = uNewEip;
    2384     else
    2385         return iemRaiseGeneralProtectionFault0(pVCpu);
     2373DECL_FORCE_INLINE(VBOXSTRICTRC)
     2374iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2375                                                    IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2376{
     2377    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2378    Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
     2379
     2380    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2381    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2382    pVCpu->cpum.GstCtx.rip = uNewRip;
    23862383
    23872384#ifndef IEM_WITH_CODE_TLB
     
    23972394
    23982395/**
    2399  * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
    2400  *
    2401  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    2402  * segment limit.
    2403  *
    2404  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2405  * @param   cbInstr             Instruction size.
    2406  * @param   offNextInstr        The offset of the next instruction.
    2407  * @param   rcNormal            VINF_SUCCESS to continue TB.
    2408  *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    2409  *                              taking the wrong conditional branhc.
    2410  */
    2411 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2412                                                                             int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2413 {
    2414     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2415 
    2416     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    2417     if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2418         pVCpu->cpum.GstCtx.rip = uNewIp;
    2419     else
    2420         return iemRaiseGeneralProtectionFault0(pVCpu);
    2421 
    2422 #ifndef IEM_WITH_CODE_TLB
    2423     iemOpcodeFlushLight(pVCpu, cbInstr);
    2424 #endif
    2425 
    2426     /*
    2427      * Clear RF and finish the instruction (maybe raise #DB).
    2428      */
    2429     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2430 }
    2431 
    2432 
    2433 /**
    2434  * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
    2435  * clearing of flags.
     2396 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
     2397 * code (never 64-bit).
    24362398 *
    24372399 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    24462408 *                              taking the wrong conditional branhc.
    24472409 */
    2448 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    2449                                                                           IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
    2450 {
    2451     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2452     Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
    2453 
    2454     uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2410DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2411                                                                             IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2412{
     2413    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2414    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2415
     2416    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    24552417    if (enmEffOpSize == IEMMODE_16BIT)
    2456         uNewRip &= UINT16_MAX;
    2457 
    2458     if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    2459         pVCpu->cpum.GstCtx.rip = uNewRip;
     2418        uNewEip &= UINT16_MAX;
     2419    if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2420        pVCpu->cpum.GstCtx.rip = uNewEip;
    24602421    else
    24612422        return iemRaiseGeneralProtectionFault0(pVCpu);
     
    24642425    iemOpcodeFlushLight(pVCpu, cbInstr);
    24652426#endif
    2466     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2467 }
    2468 
    2469 
    2470 /**
    2471  * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
    2472  * code (never 64-bit), no checking or clearing of flags.
     2427
     2428    /*
     2429     * Clear RF and finish the instruction (maybe raise #DB).
     2430     */
     2431    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2432}
     2433
     2434
     2435/**
     2436 * Adds a 8-bit signed jump offset to EIP, on 386 or later from FLAT 32-bit code
     2437 * (never 64-bit).
    24732438 *
    24742439 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    24832448 *                              taking the wrong conditional branhc.
    24842449 */
     2450DECL_FORCE_INLINE(VBOXSTRICTRC)
     2451 iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2452                                                  IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2453{
     2454    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2455    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2456
     2457    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
     2458    if (enmEffOpSize == IEMMODE_16BIT)
     2459        uNewEip &= UINT16_MAX;
     2460    pVCpu->cpum.GstCtx.rip = uNewEip;
     2461
     2462#ifndef IEM_WITH_CODE_TLB
     2463    iemOpcodeFlushLight(pVCpu, cbInstr);
     2464#endif
     2465
     2466    /*
     2467     * Clear RF and finish the instruction (maybe raise #DB).
     2468     */
     2469    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2470}
     2471
     2472
     2473/**
     2474 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
     2475 *
     2476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2477 * segment limit.
     2478 *
     2479 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2480 * @param   cbInstr             Instruction size.
     2481 * @param   offNextInstr        The offset of the next instruction.
     2482 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2483 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2484 *                              taking the wrong conditional branhc.
     2485 */
     2486DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2487                                                                            int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2488{
     2489    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2490
     2491    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
     2492    if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2493        pVCpu->cpum.GstCtx.rip = uNewIp;
     2494    else
     2495        return iemRaiseGeneralProtectionFault0(pVCpu);
     2496
     2497#ifndef IEM_WITH_CODE_TLB
     2498    iemOpcodeFlushLight(pVCpu, cbInstr);
     2499#endif
     2500
     2501    /*
     2502     * Clear RF and finish the instruction (maybe raise #DB).
     2503     */
     2504    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2505}
     2506
     2507
     2508/**
     2509 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
     2510 * clearing of flags.
     2511 *
     2512 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2513 * segment limit.
     2514 *
     2515 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2516 * @param   cbInstr             Instruction size.
     2517 * @param   offNextInstr        The offset of the next instruction.
     2518 * @param   enmEffOpSize        Effective operand size.
     2519 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2520 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2521 *                              taking the wrong conditional branhc.
     2522 */
     2523DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2524                                                                          IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2525{
     2526    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2527    Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
     2528
     2529    uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2530    if (enmEffOpSize == IEMMODE_16BIT)
     2531        uNewRip &= UINT16_MAX;
     2532
     2533    if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
     2534        pVCpu->cpum.GstCtx.rip = uNewRip;
     2535    else
     2536        return iemRaiseGeneralProtectionFault0(pVCpu);
     2537
     2538#ifndef IEM_WITH_CODE_TLB
     2539    iemOpcodeFlushLight(pVCpu, cbInstr);
     2540#endif
     2541    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2542}
     2543
     2544
     2545/**
     2546 * Adds a 8-bit signed jump offset to RIP from 64-bit code when caller is sure
     2547 * it stays within the same page, no checking or clearing of flags.
     2548 *
     2549 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2550 * segment limit.
     2551 *
     2552 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2553 * @param   cbInstr             Instruction size.
     2554 * @param   offNextInstr        The offset of the next instruction.
     2555 * @param   enmEffOpSize        Effective operand size.
     2556 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2557 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2558 *                              taking the wrong conditional branhc.
     2559 */
     2560DECL_FORCE_INLINE(VBOXSTRICTRC)
     2561iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2562                                                 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2563{
     2564    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2565    Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
     2566
     2567    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2568    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2569    pVCpu->cpum.GstCtx.rip = uNewRip;
     2570
     2571#ifndef IEM_WITH_CODE_TLB
     2572    iemOpcodeFlushLight(pVCpu, cbInstr);
     2573#endif
     2574    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2575}
     2576
     2577
     2578/**
     2579 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
     2580 * code (never 64-bit), no checking or clearing of flags.
     2581 *
     2582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2583 * segment limit.
     2584 *
     2585 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2586 * @param   cbInstr             Instruction size.
     2587 * @param   offNextInstr        The offset of the next instruction.
     2588 * @param   enmEffOpSize        Effective operand size.
     2589 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2590 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2591 *                              taking the wrong conditional branhc.
     2592 */
    24852593DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    24862594                                                                          IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     
    25052613
    25062614/**
     2615 * Adds a 8-bit signed jump offset to EIP, on 386 or later from flat 32-bit code
     2616 * (never 64-bit), no checking or clearing of flags.
     2617 *
     2618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2619 * segment limit.
     2620 *
     2621 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2622 * @param   cbInstr             Instruction size.
     2623 * @param   offNextInstr        The offset of the next instruction.
     2624 * @param   enmEffOpSize        Effective operand size.
     2625 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2626 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2627 *                              taking the wrong conditional branhc.
     2628 */
     2629DECL_FORCE_INLINE(VBOXSTRICTRC)
     2630iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2631                                              IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2632{
     2633    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2634    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2635
     2636    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
     2637    if (enmEffOpSize == IEMMODE_16BIT)
     2638        uNewEip &= UINT16_MAX;
     2639    pVCpu->cpum.GstCtx.rip = uNewEip;
     2640
     2641#ifndef IEM_WITH_CODE_TLB
     2642    iemOpcodeFlushLight(pVCpu, cbInstr);
     2643#endif
     2644    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2645}
     2646
     2647
     2648/**
    25072649 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
    25082650 * clearing of flags.
     
    26052747
    26062748/**
    2607  * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
    2608  * clearing of flags.
     2749 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code.
     2750 *
     2751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2752 * segment limit.
    26092753 *
    26102754 * @returns Strict VBox status code.
     
    26152759 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26162760 *                              taking the wrong conditional branhc.
    2617  */
    2618 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2619                                                                            int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2620 {
    2621     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2622 
    2623     pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
     2761 *
     2762 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2763 *          identical.
     2764 */
     2765DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2766                                                                                  int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2767{
     2768    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2769
     2770    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2771    pVCpu->cpum.GstCtx.rip = uNewIp;
    26242772
    26252773#ifndef IEM_WITH_CODE_TLB
    26262774    iemOpcodeFlushLight(pVCpu, cbInstr);
    26272775#endif
    2628     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2629 }
    2630 
    2631 
    2632 /**
    2633  * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
    2634  * no checking or clearing of flags.
    2635  *
    2636  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    2637  * segment limit.
     2776
     2777    /*
     2778     * Clear RF and finish the instruction (maybe raise #DB).
     2779     */
     2780    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2781}
     2782
     2783
     2784/**
     2785 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
     2786 * clearing of flags.
    26382787 *
    26392788 * @returns Strict VBox status code.
     
    26442793 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26452794 *                              taking the wrong conditional branhc.
    2646  *
    2647  * @note    This is also used by 16-bit code in pre-386 mode, as the code is
    2648  *          identical.
    2649  */
    2650 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2795 */
     2796DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    26512797                                                                           int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    26522798{
    2653     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2654 
    2655     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    2656     if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2657         pVCpu->cpum.GstCtx.rip = uNewIp;
    2658     else
    2659         return iemRaiseGeneralProtectionFault0(pVCpu);
     2799    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2800
     2801    pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
    26602802
    26612803#ifndef IEM_WITH_CODE_TLB
     
    26672809
    26682810/**
    2669  * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2811 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
     2812 * no checking or clearing of flags.
    26702813 *
    26712814 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    26722815 * segment limit.
    2673  *
    2674  * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
    2675  * only alternative for relative jumps in 64-bit code and that is already
    2676  * handled in the decoder stage.
    26772816 *
    26782817 * @returns Strict VBox status code.
     
    26832822 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26842823 *                              taking the wrong conditional branhc.
    2685  */
    2686 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2687                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2688 {
    2689     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2690 
    2691     uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    2692     if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    2693         pVCpu->cpum.GstCtx.rip = uNewRip;
     2824 *
     2825 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2826 *          identical.
     2827 */
     2828DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2829                                                                           int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2830{
     2831    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2832
     2833    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2834    if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2835        pVCpu->cpum.GstCtx.rip = uNewIp;
    26942836    else
    26952837        return iemRaiseGeneralProtectionFault0(pVCpu);
     
    26982840    iemOpcodeFlushLight(pVCpu, cbInstr);
    26992841#endif
    2700 
    2701     /*
    2702      * Clear RF and finish the instruction (maybe raise #DB).
    2703      */
    2704     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2705 }
    2706 
    2707 
    2708 /**
    2709  * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2842    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2843}
     2844
     2845
     2846/**
     2847 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code, no checking or
     2848 * clearing of flags.
    27102849 *
    27112850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    27122851 * segment limit.
    2713  *
    2714  * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
    2715  * only alternative for relative jumps in 32-bit code and that is already
    2716  * handled in the decoder stage.
    27172852 *
    27182853 * @returns Strict VBox status code.
     
    27232858 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    27242859 *                              taking the wrong conditional branhc.
    2725  */
    2726 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2727                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2860 *
     2861 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2862 *          identical.
     2863 */
     2864DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2865                                                                               int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    27282866{
    27292867    Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2730     Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    2731 
    2732     uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    2733     if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2734         pVCpu->cpum.GstCtx.rip = uNewEip;
    2735     else
    2736         return iemRaiseGeneralProtectionFault0(pVCpu);
     2868
     2869    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2870    pVCpu->cpum.GstCtx.rip = uNewIp;
    27372871
    27382872#ifndef IEM_WITH_CODE_TLB
    27392873    iemOpcodeFlushLight(pVCpu, cbInstr);
    27402874#endif
    2741 
    2742     /*
    2743      * Clear RF and finish the instruction (maybe raise #DB).
    2744      */
    2745     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2746 }
    2747 
    2748 
    2749 /**
    2750  * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
    2751  * clearing of flags.
     2875    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2876}
     2877
     2878
     2879/**
     2880 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
    27522881 *
    27532882 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    27662895 *                              taking the wrong conditional branhc.
    27672896 */
    2768 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2769                                                                            int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2897DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2898                                                                              int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    27702899{
    27712900    Assert(IEM_IS_64BIT_CODE(pVCpu));
     
    27802909    iemOpcodeFlushLight(pVCpu, cbInstr);
    27812910#endif
    2782     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2783 }
    2784 
    2785 
    2786 /**
    2787  * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
    2788  * clearing of flags.
     2911
     2912    /*
     2913     * Clear RF and finish the instruction (maybe raise #DB).
     2914     */
     2915    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2916}
     2917
     2918
     2919/**
     2920 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
     2921 * sure the target is in the same page.
    27892922 *
    27902923 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    27912924 * segment limit.
    27922925 *
    2793  * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
    2794  * only alternative for relative jumps in 32-bit code and that is already
     2926 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     2927 * only alternative for relative jumps in 64-bit code and that is already
    27952928 * handled in the decoder stage.
    27962929 *
     
    28032936 *                              taking the wrong conditional branhc.
    28042937 */
    2805 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2806                                                                            int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2938DECL_FORCE_INLINE(VBOXSTRICTRC)
     2939iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2940                                                     int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2941{
     2942    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2943
     2944    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2945    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2946    pVCpu->cpum.GstCtx.rip = uNewRip;
     2947
     2948#ifndef IEM_WITH_CODE_TLB
     2949    iemOpcodeFlushLight(pVCpu, cbInstr);
     2950#endif
     2951
     2952    /*
     2953     * Clear RF and finish the instruction (maybe raise #DB).
     2954     */
     2955    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2956}
     2957
     2958
     2959/**
     2960 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2961 *
     2962 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2963 * segment limit.
     2964 *
     2965 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     2966 * only alternative for relative jumps in 32-bit code and that is already
     2967 * handled in the decoder stage.
     2968 *
     2969 * @returns Strict VBox status code.
     2970 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2971 * @param   cbInstr             Instruction size.
     2972 * @param   offNextInstr        The offset of the next instruction.
     2973 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2974 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2975 *                              taking the wrong conditional branhc.
     2976 */
     2977DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2978                                                                              int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    28072979{
    28082980    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     
    28142986    else
    28152987        return iemRaiseGeneralProtectionFault0(pVCpu);
     2988
     2989#ifndef IEM_WITH_CODE_TLB
     2990    iemOpcodeFlushLight(pVCpu, cbInstr);
     2991#endif
     2992
     2993    /*
     2994     * Clear RF and finish the instruction (maybe raise #DB).
     2995     */
     2996    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2997}
     2998
     2999
     3000/**
     3001 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code.
     3002 *
     3003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3004 * segment limit.
     3005 *
     3006 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3007 * only alternative for relative jumps in 32-bit code and that is already
     3008 * handled in the decoder stage.
     3009 *
     3010 * @returns Strict VBox status code.
     3011 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3012 * @param   cbInstr             Instruction size.
     3013 * @param   offNextInstr        The offset of the next instruction.
     3014 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3015 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3016 *                              taking the wrong conditional branhc.
     3017 */
     3018DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     3019                                                                                  int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3020{
     3021    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3022    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3023
     3024    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3025    pVCpu->cpum.GstCtx.rip = uNewEip;
     3026
     3027#ifndef IEM_WITH_CODE_TLB
     3028    iemOpcodeFlushLight(pVCpu, cbInstr);
     3029#endif
     3030
     3031    /*
     3032     * Clear RF and finish the instruction (maybe raise #DB).
     3033     */
     3034    return iemRegFinishClearingRF(pVCpu, rcNormal);
     3035}
     3036
     3037
     3038
     3039/**
     3040 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
     3041 * clearing of flags.
     3042 *
     3043 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3044 * segment limit.
     3045 *
     3046 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     3047 * only alternative for relative jumps in 64-bit code and that is already
     3048 * handled in the decoder stage.
     3049 *
     3050 * @returns Strict VBox status code.
     3051 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3052 * @param   cbInstr             Instruction size.
     3053 * @param   offNextInstr        The offset of the next instruction.
     3054 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3055 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3056 *                              taking the wrong conditional branhc.
     3057 */
     3058DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3059                                                                           int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3060{
     3061    Assert(IEM_IS_64BIT_CODE(pVCpu));
     3062
     3063    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     3064    if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
     3065        pVCpu->cpum.GstCtx.rip = uNewRip;
     3066    else
     3067        return iemRaiseGeneralProtectionFault0(pVCpu);
     3068
     3069#ifndef IEM_WITH_CODE_TLB
     3070    iemOpcodeFlushLight(pVCpu, cbInstr);
     3071#endif
     3072    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3073}
     3074
     3075
     3076/**
     3077 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
     3078 * sure it stays within the same page, no checking or clearing of flags.
     3079 *
     3080 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3081 * segment limit.
     3082 *
     3083 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     3084 * only alternative for relative jumps in 64-bit code and that is already
     3085 * handled in the decoder stage.
     3086 *
     3087 * @returns Strict VBox status code.
     3088 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3089 * @param   cbInstr             Instruction size.
     3090 * @param   offNextInstr        The offset of the next instruction.
     3091 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3092 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3093 *                              taking the wrong conditional branhc.
     3094 */
     3095DECL_FORCE_INLINE(VBOXSTRICTRC)
     3096iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3097{
     3098    Assert(IEM_IS_64BIT_CODE(pVCpu));
     3099
     3100    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     3101    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     3102    pVCpu->cpum.GstCtx.rip = uNewRip;
     3103
     3104#ifndef IEM_WITH_CODE_TLB
     3105    iemOpcodeFlushLight(pVCpu, cbInstr);
     3106#endif
     3107    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3108}
     3109
     3110
     3111/**
     3112 * Adds a 32-bit signed jump offset to RIP from 32-bit code, no checking or
     3113 * clearing of flags.
     3114 *
     3115 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3116 * segment limit.
     3117 *
     3118 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3119 * only alternative for relative jumps in 32-bit code and that is already
     3120 * handled in the decoder stage.
     3121 *
     3122 * @returns Strict VBox status code.
     3123 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3124 * @param   cbInstr             Instruction size.
     3125 * @param   offNextInstr        The offset of the next instruction.
     3126 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3127 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3128 *                              taking the wrong conditional branhc.
     3129 */
     3130DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3131                                                                           int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3132{
     3133    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3134    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3135
     3136    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3137    if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
     3138        pVCpu->cpum.GstCtx.rip = uNewEip;
     3139    else
     3140        return iemRaiseGeneralProtectionFault0(pVCpu);
     3141
     3142#ifndef IEM_WITH_CODE_TLB
     3143    iemOpcodeFlushLight(pVCpu, cbInstr);
     3144#endif
     3145    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3146}
     3147
     3148
     3149/**
     3150 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code, no checking or
     3151 * clearing of flags.
     3152 *
     3153 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3154 * segment limit.
     3155 *
     3156 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3157 * only alternative for relative jumps in 32-bit code and that is already
     3158 * handled in the decoder stage.
     3159 *
     3160 * @returns Strict VBox status code.
     3161 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3162 * @param   cbInstr             Instruction size.
     3163 * @param   offNextInstr        The offset of the next instruction.
     3164 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3165 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3166 *                              taking the wrong conditional branhc.
     3167 */
     3168DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3169                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3170{
     3171    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3172    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3173
     3174    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3175    pVCpu->cpum.GstCtx.rip = uNewEip;
    28163176
    28173177#ifndef IEM_WITH_CODE_TLB
  • trunk/src/VBox/VMM/include/IEMOpHlp.h

    r105295 r105768  
    751751    } while (0)
    752752
     753/**
     754 * Used the threaded code generator to check if a jump stays within the same
     755 * page in 64-bit code.
     756 */
     757#define IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(a_offDisp) \
     758     (   ((pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (a_offDisp)) >> GUEST_PAGE_SHIFT) \
     759      == (pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT))
     760
    753761VBOXSTRICTRC    iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
    754762VBOXSTRICTRC    iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette