VirtualBox

Changeset 92408 in vbox


Ignore:
Timestamp:
Nov 12, 2021 9:49:06 PM (3 years ago)
Author:
vboxsync
Message:

VMM: Reworked most of the call-ring-3 stuff into setjmp-longjmp-on-assert and removed the stack switching/copying/resume code. bugref:10093 bugref:10124

Location:
trunk
Files:
2 deleted
24 edited

Legend:

Unmodified
Added
Removed
  • trunk/Config.kmk

    r92372 r92408  
    20032003 VBOX_LIB_XPCOM_X86     = $(PATH_STAGE_BIN)/VBoxXPCOM-x86.dylib
    20042004 LIB_DDU         = $(PATH_STAGE_BIN)/VBoxDDU.dylib
    2005  VBOX_LIB_SUPR0  = $(PATH_STAGE_LIB)/SUPR0$(VBOX_SUFF_LIB)
     2005 VBOX_LIB_SUPR0 :=
    20062006endif
    20072007if1of ($(KBUILD_TARGET), freebsd haiku linux netbsd openbsd solaris)
  • trunk/include/VBox/err.h

    r92392 r92408  
    12731273 * complete or try with a clean build. */
    12741274#define VERR_VMM_RC_VERSION_MISMATCH        (-2705)
    1275 /** VMM set jump error. */
    1276 #define VERR_VMM_SET_JMP_ERROR              (-2706)
    1277 /** VMM set jump stack overflow error. */
    1278 #define VERR_VMM_SET_JMP_STACK_OVERFLOW     (-2707)
    1279 /** VMM set jump resume error. */
    1280 #define VERR_VMM_SET_JMP_ABORTED_RESUME     (-2708)
    12811275/** VMM long jump error. */
    12821276#define VERR_VMM_LONG_JMP_ERROR             (-2709)
    1283 /** Unknown ring-3 call attempted. */
    1284 #define VERR_VMM_UNKNOWN_RING3_CALL         (-2710)
    1285 /** The ring-3 call didn't set an RC. */
    1286 #define VERR_VMM_RING3_CALL_NO_RC           (-2711)
    12871277/** Reason for leaving RC: Caller the tracer in ring-0. */
    12881278#define VINF_VMM_CALL_TRACER                (2712)
  • trunk/include/VBox/err.mac

    r92392 r92408  
    493493%define VERR_VMM_R0_VERSION_MISMATCH    (-2704)
    494494%define VERR_VMM_RC_VERSION_MISMATCH    (-2705)
    495 %define VERR_VMM_SET_JMP_ERROR    (-2706)
    496 %define VERR_VMM_SET_JMP_STACK_OVERFLOW    (-2707)
    497 %define VERR_VMM_SET_JMP_ABORTED_RESUME    (-2708)
    498495%define VERR_VMM_LONG_JMP_ERROR    (-2709)
    499 %define VERR_VMM_UNKNOWN_RING3_CALL    (-2710)
    500 %define VERR_VMM_RING3_CALL_NO_RC    (-2711)
    501496%define VINF_VMM_CALL_TRACER    (2712)
    502497%define VERR_VMM_SWITCHER_IPE_1    (-2713)
  • trunk/include/VBox/vmm/gvm.h

    r91250 r92408  
    120120        struct VMMR0PERVCPU s;
    121121#endif
    122         uint8_t             padding[512];
     122        uint8_t             padding[896];
    123123    } vmmr0;
    124124
     
    133133    /** Padding the structure size to page boundrary. */
    134134#ifdef VBOX_WITH_NEM_R0
    135     uint8_t                 abPadding3[4096 - 64*2 - 64 - 1024 - 64 - 512 - 64];
     135    uint8_t                 abPadding3[4096 - 64*2 - 64 - 1024 - 64 - 896 - 64];
    136136#else
    137     uint8_t                 abPadding3[4096 - 64*2 - 64 - 1024 - 512 - 64];
     137    uint8_t                 abPadding3[4096 - 64*2 - 64 - 1024 - 896 - 64];
    138138#endif
    139139} GVMCPU;
  • trunk/include/VBox/vmm/gvm.mac

    r91250 r92408  
    5151%endif
    5252        alignb 64
    53         .vmmr0              resb 512
     53        .vmmr0              resb 896
    5454        alignb 64
    5555        .pgmr0              resb 64
  • trunk/include/VBox/vmm/vm.h

    r92358 r92408  
    219219        struct VMMCPU       s;
    220220#endif
    221         uint8_t             padding[1344];       /* multiple of 64 */
     221        uint8_t             padding[9536];       /* multiple of 64 */
    222222    } vmm;
    223223
  • trunk/include/VBox/vmm/vm.mac

    r92362 r92408  
    6969    .tm                     resb 5760
    7070    alignb 64
    71     .vmm                    resb 1344
     71    .vmm                    resb 9536
    7272    alignb 64
    7373    .pdm                    resb 256
  • trunk/include/VBox/vmm/vmm.h

    r92392 r92408  
    203203VMMR3DECL(PVMCPUCC)         VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu);
    204204VMM_INT_DECL(uint32_t)      VMMGetSvnRev(void);
    205 VMM_INT_DECL(bool)          VMMIsInRing3Call(PVMCPUCC pVCpu);
    206205VMM_INT_DECL(void)          VMMTrashVolatileXMMRegs(void);
    207206
     
    488487VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM);
    489488VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu);
    490 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu);
    491489VMMR0_INT_DECL(int)  VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu);
    492490VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu);
  • trunk/src/VBox/HostDrivers/Support/Makefile.kmk

    r91802 r92408  
    4949endif
    5050if !defined(VBOX_ONLY_DOCS)
    51  if1of ($(VBOX_LDR_FMT), pe lx macho)
     51 if1of ($(VBOX_LDR_FMT), pe lx)
    5252  LIBRARIES += SUPR0
    5353 endif
     
    543543  endif
    544544 endif
    545 
    546 else ifeq ($(VBOX_LDR_FMT),macho)
    547  $(call KB_FN_DO_PASS0_ON_TARGET,SUPR0) # Defines SUPR0_0_OUTDIR so we can use it in SUPR0_VBOX_FILES w/o needing $$.
    548  SUPR0_VBOX_KMK_FILE = $(SUPR0_0_OUTDIR)/files.kmk
    549  include $(SUPR0_VBOX_KMK_FILE)
    550  SUPR0_SOURCES       = $(SUPR0_VBOX_FILES)
    551  SUPR0_CLEAN         = $(SUPR0_VBOX_FILES) $(SUPR0_0_OUTDIR)/SUPR0.asm $(SUPR0_VBOX_KMK_FILE) $(SUPR0_VBOX_KMK_FILE).ts
    552 
    553  # Generate a make include file which lists the wrapper source files.
    554 # $ (call KB_FN_AUTO_CMD_DEPS,$(SUPR0_VBOX_KMK_FILE).ts)
    555  $(SUPR0_VBOX_KMK_FILE).ts \
    556  +| $(SUPR0_VBOX_KMK_FILE): \
    557                 $(PATH_SUB_CURRENT)/SUPDrv.cpp \
    558                 $(PATH_SUB_CURRENT)/SUPR0-asm-files.sed
    559 #       $(call KB_FN_AUTO_CMD_DEPS_COMMANDS)
    560         $(call MSG_GENERATE,,$(SUPR0_VBOX_KMK_FILE))
    561         $(QUIET)$(RM) -f -- "$@"
    562         $(QUIET)$(MKDIR) -p -- "$(@D)"
    563         $(QUIET)$(SED) --output "$@" -f "$(VBOX_PATH_SUP_SRC)/SUPR0-asm-files.sed" "$(VBOX_PATH_SUP_SRC)/SUPDrv.cpp"
    564         $(QUIET)$(CP) --changed -fv "$@" $(SUPR0_VBOX_KMK_FILE)
    565 
    566  $(SUPR0_0_OUTDIR)/SUPR0.asm +| $(SUPR0_VBOX_FILES): \
    567                 $(PATH_SUB_CURRENT)/SUPDrv.cpp \
    568                 $(PATH_SUB_CURRENT)/SUPR0-asm.sed \
    569                | $$(dir $$@) $(VBOX_FILESPLIT)
    570 #       $(call KB_FN_AUTO_CMD_DEPS_COMMANDS)
    571         $(call MSG_GENERATE,,SUPR0.asm and friends)
    572         $(QUIET)$(RM) -f -- "$@"
    573         $(QUIET)$(SED) --output "$@" -f "$(VBOX_PATH_SUP_SRC)/SUPR0-asm.sed" "$(VBOX_PATH_SUP_SRC)/SUPDrv.cpp"
    574         $(VBOX_FILESPLIT) "$@" "$(dir $@)"
    575 
    576545endif
    577546
  • trunk/src/VBox/VMM/Makefile.kmk

    r92351 r92408  
    7878VBoxVMM_DEFS    += VBOX_WITH_DBGF_FLOW_TRACING
    7979endif
    80 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
    81 VBoxVMM_DEFS    += VMM_R0_SWITCH_STACK
    82 endif
    8380if "$(KBUILD_TYPE)" == "debug" && "$(USERNAME)" == "bird" && 0
    8481VBoxVMM_DEFS    += RTMEM_WRAP_TO_EF_APIS
    8582endif
    86 VBoxVMM_DEFS.darwin = VMM_R0_SWITCH_STACK
    8783
    8884VBoxVMM_INCS     = \
     
    466462VMMR0_DEFS     += VBOX_WITH_DBGF_TRACING
    467463 endif
    468  ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
    469 VMMR0_DEFS     += VMM_R0_SWITCH_STACK
    470  endif
    471464 if1of ($(KBUILD_TARGET), darwin linux win)
    472465VMMR0_DEFS     += VMM_R0_TOUCH_FPU
    473466 endif
    474 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK
    475467VMMR0_DEFS.win.amd64  = VBOX_WITH_KERNEL_USING_XMM
    476468
     
    572564VMMR0_SOURCES.x86 = \
    573565        VMMR0/VMMR0JmpA-x86.asm
    574 VMMR0_SOURCES.darwin.amd64 = \
    575         VMMR0/VMMR0StackBack-darwin.asm
    576566
    577567VMMR0_LIBS = \
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r92204 r92408  
    422422
    423423
    424 #if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
    425 /**
    426  * We must be on kernel stack before disabling preemption, thus this wrapper.
    427  */
    428 DECLASM(int) StkBack_pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
    429                                                           RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
    430 {
    431     VMMR0EMTBLOCKCTX Ctx;
    432     int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
    433     if (rc == VINF_SUCCESS)
    434     {
    435         Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    436 
    437         rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
    438 
    439         VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    440     }
    441     else
    442         STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
    443     return rc;
    444 }
    445 DECLASM(int) pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
    446                                                   RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos);
    447 #endif
    448 
    449 
    450424/**
    451425 * Common worker for the debug and normal APIs.
     
    548522    if (pVCpu)
    549523    {
    550 #  ifndef VMM_R0_SWITCH_STACK
    551524        VMMR0EMTBLOCKCTX Ctx;
    552525        int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
     
    562535            STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
    563536        return rc;
    564 #  else
    565         return pdmR0CritSectEnterContendedOnKrnlStk(pVM, pVCpu, pCritSect, hNativeSelf, rcBusy, pSrcPos);
    566 #  endif
    567537    }
    568538
     
    812782#endif /* IN_RING3 */
    813783
    814 
    815 #if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
    816 /**
    817  * We must be on kernel stack before disabling preemption, thus this wrapper.
    818  */
    819 DECLASM(int) StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
    820                                                            int32_t const cLockers, SUPSEMEVENT const hEventToSignal)
    821 {
    822     VMMR0EMTBLOCKCTX    Ctx;
    823     bool                fLeaveCtx = false;
    824     if (cLockers < 0)
    825         AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
    826     else
    827     {
    828         /* Someone is waiting, wake up one of them. */
    829         Assert(cLockers < _8K);
    830         SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
    831         if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
    832         {
    833             int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
    834             VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
    835             fLeaveCtx = true;
    836         }
    837         int rc = SUPSemEventSignal(pVM->pSession, hEvent);
    838         AssertRC(rc);
    839     }
    840 
    841     /*
    842      * Signal exit event.
    843      */
    844     if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
    845     { /* likely */ }
    846     else
    847     {
    848         if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
    849         {
    850             int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
    851             VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
    852             fLeaveCtx = true;
    853         }
    854         Log8(("Signalling %#p\n", hEventToSignal));
    855         int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
    856         AssertRC(rc);
    857     }
    858 
    859     /*
    860      * Restore HM context if needed.
    861      */
    862     if (!fLeaveCtx)
    863     { /* contention should be unlikely */ }
    864     else
    865         VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    866 
    867 # ifdef DEBUG_bird
    868     VMMTrashVolatileXMMRegs();
    869 # endif
    870     return VINF_SUCCESS;
    871 }
    872 DECLASM(int) pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
    873                                                    int32_t const cLockers, SUPSEMEVENT const hEventToSignal);
    874 #endif
    875784
    876785/**
     
    1029938        if (!fQueueIt)
    1030939        {
    1031 # ifndef VMM_R0_SWITCH_STACK
    1032940            VMMR0EMTBLOCKCTX    Ctx;
    1033941            bool                fLeaveCtx = false;
     
    1075983                VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    1076984
    1077 #  ifdef DEBUG_bird
     985# ifdef DEBUG_bird
    1078986            VMMTrashVolatileXMMRegs();
    1079 #  endif
     987# endif
    1080988            return VINF_SUCCESS;
    1081 # else  /* VMM_R0_SWITCH_STACK */
    1082             return pdmR0CritSectLeaveSignallingOnKrnlStk(pVM, pVCpu, pCritSect, cLockers, hEventToSignal);
    1083 # endif /* VMM_R0_SWITCH_STACK */
    1084989        }
    1085990
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp

    r92204 r92408  
    101101*   Internal Functions                                                                                                           *
    102102*********************************************************************************************************************************/
    103 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
    104103static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
    105 #else
    106 DECLASM(int) pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
    107 DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
    108 #endif
    109104
    110105
     
    556551 * @param   fNoVal      No validation records.
    557552 */
    558 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
    559553static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    560554                                    PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
    561 #else
    562 DECLASM(int) pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    563                                       PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
    564 DECLASM(int) StkBack_pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    565                                               PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
    566 #endif
    567555{
    568556    /*
     
    899887 *          PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
    900888 */
    901 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
    902889static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
    903 #else
    904 DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
    905 #endif
    906890{
    907891    /*
     
    13211305 * @param   fNoVal      No validation records.
    13221306 */
    1323 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
    13241307static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    13251308                                  PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
    1326 #else
    1327 DECLASM(int) pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    1328                                     PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
    1329 DECLASM(int) StkBack_pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
    1330                                             PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
    1331 #endif
    13321309{
    13331310    /*
     
    16961673 * @sa      PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
    16971674 */
    1698 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
    16991675static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
    1700 #else
    1701 DECLASM(int) pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
    1702 DECLASM(int) StkBack_pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
    1703 #endif
    17041676{
    17051677    /*
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r90598 r92408  
    269269
    270270/**
    271  * Checks whether we're in a ring-3 call or not.
    272  *
    273  * @returns true / false.
    274  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    275  * @thread  EMT
    276  */
    277 VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu)
    278 {
    279 #ifdef RT_ARCH_X86
    280     return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
    281 #else
    282     return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
    283 #endif
    284 }
    285 
    286 
    287 /**
    288271 * Returns the build type for matching components.
    289272 *
  • trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp

    r91821 r92408  
    140140
    141141/**
    142  * Move this back to PGMR0PoolGrow when VMM_R0_SWITCH_STACK is gonne.
    143  */
    144 #ifndef VMM_R0_SWITCH_STACK
    145 static int pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool)
    146 #else
    147 DECLASM(int) pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool);
    148 DECLASM(int) StkBack_pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool)
    149 #endif
    150 {
    151     /*
    152      * Enter the grow critical section and call worker.
    153      */
    154     STAM_REL_PROFILE_START(&pPool->StatGrow, a);
    155 
    156     VMMR0EMTBLOCKCTX Ctx;
    157     int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx);
    158     AssertRCReturn(rc, rc);
    159 
    160     rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);
    161     AssertRCReturn(rc, rc);
    162 
    163     rc = pgmR0PoolGrowInner(pGVM, pPool);
    164 
    165     STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);
    166     RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
    167 
    168     VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
    169     return rc;
    170 }
    171 
    172 
    173 /**
    174142 * Grows the shadow page pool.
    175143 *
     
    194162    PGVMCPU const pGVCpu = &pGVM->aCpus[idCpu];
    195163
    196     return pgmR0PoolGrowOnKrnlStk(pGVM, pGVCpu, pPool);
     164    /*
     165     * Enter the grow critical section and call worker.
     166     */
     167    STAM_REL_PROFILE_START(&pPool->StatGrow, a);
     168
     169    VMMR0EMTBLOCKCTX Ctx;
     170    int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx);
     171    AssertRCReturn(rc, rc);
     172
     173    rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);
     174    AssertRCReturn(rc, rc);
     175
     176    rc = pgmR0PoolGrowInner(pGVM, pPool);
     177
     178    STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);
     179    RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
     180
     181    VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
     182    return rc;
    197183}
    198184
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r92392 r92408  
    323323        pGVCpu->vmmr0.s.pPreemptState               = NULL;
    324324        pGVCpu->vmmr0.s.hCtxHook                    = NIL_RTTHREADCTXHOOK;
     325        pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf     = &pGVCpu->vmm.s.AssertJmpBuf;
     326        pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf     = &pGVCpu->vmm.s.abAssertStack[0];
     327        pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf     = sizeof(pGVCpu->vmm.s.abAssertStack);
     328
    325329        for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++)
    326330            pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT;
     
    14411445                             * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
    14421446                             */
    1443                             rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
     1447                            rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu);
    14441448
    14451449                            /*
     
    15701574             */
    15711575#  ifdef VBOXSTRICTRC_STRICT_ENABLED
    1572             int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
     1576            int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
    15731577#  else
    1574             int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
     1578            int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu);
    15751579#  endif
    15761580            STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
     
    23572361}
    23582362
    2359 #ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */
     2363
    23602364/**
    23612365 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
     
    23742378                              pGVCpu->vmmr0.s.pSession);
    23752379}
    2376 #endif
    23772380
    23782381
     
    23942397                            PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
    23952398{
    2396 #ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */
    23972399    /*
    23982400     * Requests that should only happen on the EMT thread will be
    2399      * wrapped in a setjmp so we can assert without causing trouble.
     2401     * wrapped in a setjmp so we can assert without causing too much trouble.
    24002402     */
    24012403    if (   pVM  != NULL
     
    24042406        && idCpu < pGVM->cCpus
    24052407        && pGVM->pSession == pSession
    2406         && pGVM->pSelf    == pVM)
    2407     {
    2408         switch (enmOperation)
    2409         {
    2410             /* These might/will be called before VMMR3Init. */
    2411             case VMMR0_DO_GMM_INITIAL_RESERVATION:
    2412             case VMMR0_DO_GMM_UPDATE_RESERVATION:
    2413             case VMMR0_DO_GMM_ALLOCATE_PAGES:
    2414             case VMMR0_DO_GMM_FREE_PAGES:
    2415             case VMMR0_DO_GMM_BALLOONED_PAGES:
    2416             /* On the mac we might not have a valid jmp buf, so check these as well. */
    2417             case VMMR0_DO_VMMR0_INIT:
    2418             case VMMR0_DO_VMMR0_TERM:
    2419 
    2420             case VMMR0_DO_PDM_DEVICE_CREATE:
    2421             case VMMR0_DO_PDM_DEVICE_GEN_CALL:
    2422             case VMMR0_DO_IOM_GROW_IO_PORTS:
    2423             case VMMR0_DO_IOM_GROW_IO_PORT_STATS:
    2424             case VMMR0_DO_DBGF_BP_INIT:
    2425             case VMMR0_DO_DBGF_BP_CHUNK_ALLOC:
    2426             case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC:
    2427             {
    2428                 PGVMCPU        pGVCpu        = &pGVM->aCpus[idCpu];
    2429                 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
    2430                 if (RT_LIKELY(   pGVCpu->hEMT            == hNativeThread
    2431                               && pGVCpu->hNativeThreadR0 == hNativeThread))
    2432                 {
    2433                     if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
    2434                         break;
    2435 
    2436                     pGVCpu->vmmr0.s.pGVM         = pGVM;
    2437                     pGVCpu->vmmr0.s.idCpu        = idCpu;
    2438                     pGVCpu->vmmr0.s.enmOperation = enmOperation;
    2439                     pGVCpu->vmmr0.s.pReq         = pReq;
    2440                     pGVCpu->vmmr0.s.u64Arg       = u64Arg;
    2441                     pGVCpu->vmmr0.s.pSession     = pSession;
    2442                     return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu,
    2443                                                   ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
    2444                 }
    2445                 return VERR_VM_THREAD_NOT_EMT;
    2446             }
    2447 
    2448             default:
    2449             case VMMR0_DO_PGM_POOL_GROW:
    2450                 break;
    2451         }
    2452     }
    2453 #else
    2454     RT_NOREF(pVM);
    2455 #endif
     2408        && pGVM->pSelf    == pGVM
     2409        && enmOperation != VMMR0_DO_GVMM_DESTROY_VM
     2410        && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/
     2411       )
     2412    {
     2413        PGVMCPU        pGVCpu        = &pGVM->aCpus[idCpu];
     2414        RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
     2415        if (RT_LIKELY(   pGVCpu->hEMT            == hNativeThread
     2416                      && pGVCpu->hNativeThreadR0 == hNativeThread))
     2417        {
     2418            pGVCpu->vmmr0.s.pGVM         = pGVM;
     2419            pGVCpu->vmmr0.s.idCpu        = idCpu;
     2420            pGVCpu->vmmr0.s.enmOperation = enmOperation;
     2421            pGVCpu->vmmr0.s.pReq         = pReq;
     2422            pGVCpu->vmmr0.s.u64Arg       = u64Arg;
     2423            pGVCpu->vmmr0.s.pSession     = pSession;
     2424            return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu,
     2425                                          ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation);
     2426        }
     2427        return VERR_VM_THREAD_NOT_EMT;
     2428    }
    24562429    return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
    24572430}
     
    24732446{
    24742447#ifdef RT_ARCH_X86
    2475     return pVCpu->vmm.s.CallRing3JmpBufR0.eip
    2476         && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     2448    return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0;
    24772449#else
    2478     return pVCpu->vmm.s.CallRing3JmpBufR0.rip
    2479         && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
    2480 #endif
    2481 }
    2482 
    2483 
    2484 /**
    2485  * Checks whether we've done a ring-3 long jump.
    2486  *
    2487  * @returns @c true / @c false
    2488  * @param   pVCpu       The cross context virtual CPU structure.
    2489  * @thread  EMT
    2490  */
    2491 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
    2492 {
    2493     return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     2450    return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0;
     2451#endif
    24942452}
    24952453
     
    30052963 * Inner worker for vmmR0LoggerFlushCommon.
    30062964 */
    3007 #ifndef VMM_R0_SWITCH_STACK
    30082965static bool   vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
    3009 #else
    3010 DECLASM(bool) vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush);
    3011 DECLASM(bool) StkBack_vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)
    3012 #endif
    30132966{
    30142967    PVMMR0PERVCPULOGGER const pR0Log    = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger];
     
    35453498    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
    35463499
    3547     if (!pVCpu->vmm.s.pfnRing0AssertCallback)
    3548     {
    3549         pVCpu->vmm.s.pfnRing0AssertCallback    = pfnCallback;
    3550         pVCpu->vmm.s.pvRing0AssertCallbackUser = pvUser;
     3500    if (!pVCpu->vmmr0.s.pfnAssertCallback)
     3501    {
     3502        pVCpu->vmmr0.s.pfnAssertCallback    = pfnCallback;
     3503        pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser;
    35513504        return VINF_SUCCESS;
    35523505    }
     
    35623515VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
    35633516{
    3564     pVCpu->vmm.s.pfnRing0AssertCallback    = NULL;
    3565     pVCpu->vmm.s.pvRing0AssertCallbackUser = NULL;
     3517    pVCpu->vmmr0.s.pfnAssertCallback    = NULL;
     3518    pVCpu->vmmr0.s.pvAssertCallbackUser = NULL;
    35663519}
    35673520
     
    35753528VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
    35763529{
    3577     return pVCpu->vmm.s.pfnRing0AssertCallback != NULL;
     3530    return pVCpu->vmmr0.s.pfnAssertCallback != NULL;
    35783531}
    35793532
     
    35973550        {
    35983551# ifdef RT_ARCH_X86
    3599             if (    pVCpu->vmm.s.CallRing3JmpBufR0.eip
    3600                 &&  !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
     3552            if (pVCpu->vmmr0.s.AssertJmpBuf.eip)
    36013553# else
    3602             if (    pVCpu->vmm.s.CallRing3JmpBufR0.rip
    3603                 &&  !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
     3554            if (pVCpu->vmmr0.s.AssertJmpBuf.rip)
    36043555# endif
    36053556            {
    3606                 if (pVCpu->vmm.s.pfnRing0AssertCallback)
    3607                     pVCpu->vmm.s.pfnRing0AssertCallback(pVCpu, pVCpu->vmm.s.pvRing0AssertCallbackUser);
    3608                 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VERR_VMM_RING0_ASSERTION);
     3557                if (pVCpu->vmmr0.s.pfnAssertCallback)
     3558                    pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser);
     3559                int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION);
    36093560                return RT_FAILURE_NP(rc);
    36103561            }
  • trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm

    r91806 r92408  
    1616;
    1717
    18 ;*******************************************************************************
    19 ;* Header Files                                                                *
    20 ;*******************************************************************************
     18;*********************************************************************************************************************************
     19;*  Header Files                                                                                                                 *
     20;*********************************************************************************************************************************
    2121%define RT_ASM_WITH_SEH64_ALT
    2222%include "VBox/asmdefs.mac"
     
    2424%include "VBox/err.mac"
    2525%include "VBox/param.mac"
    26 %ifdef VMM_R0_SWITCH_STACK
    27  %include "VBox/SUPR0StackWrapper.mac"
    28 %endif
    29 
    30 
    31 ;*******************************************************************************
    32 ;*  Defined Constants And Macros                                               *
    33 ;*******************************************************************************
    34 %define RESUME_MAGIC    07eadf00dh
    35 %define STACK_PADDING   0eeeeeeeeeeeeeeeeh
    36 
    37 ;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
    38 ;; Update: This got worse with linux 5.13 and CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.
    39 ;;         The x86 arch_exit_to_user_mode_prepare code limits the offset to 255,
    40 ;;         while the generic limit is 1023.  See bugref:10064 for details.
    41 %ifdef VMM_R0_SWITCH_STACK
    42  %define STACK_FUZZ_SIZE 0
    43 %else
    44  %ifdef RT_OS_LINUX
    45   %define STACK_FUZZ_SIZE 384
    46  %else
    47   %define STACK_FUZZ_SIZE 128
    48  %endif
    49 %endif
    5026
    5127
    5228BEGINCODE
    53 
    5429
    5530;;
     
    7752    SEH64_SET_FRAME_xBP 0
    7853 %ifdef ASM_CALL64_MSC
    79     sub     rsp, 30h + STACK_FUZZ_SIZE  ; (10h is used by resume (??), 20h for callee spill area)
    80     SEH64_ALLOCATE_STACK 30h + STACK_FUZZ_SIZE
     54    sub     rsp, 30h                    ; (10h is used by resume (??), 20h for callee spill area)
     55    SEH64_ALLOCATE_STACK 30h
    8156SEH64_END_PROLOGUE
    8257    mov     r11, rdx                    ; pfn
    8358    mov     rdx, rcx                    ; pJmpBuf;
    8459 %else
    85     sub     rsp, 10h + STACK_FUZZ_SIZE  ; (10h is used by resume (??))
    86     SEH64_ALLOCATE_STACK 10h + STACK_FUZZ_SIZE
     60    sub     rsp, 10h                    ; (10h is used by resume (??))
     61    SEH64_ALLOCATE_STACK 10h
    8762SEH64_END_PROLOGUE
    8863    mov     r8, rdx                     ; pvUser1 (save it like MSC)
     
    126101
    127102    ;
    128     ; If we're not in a ring-3 call, call pfn and return.
    129     ;
    130     test    byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
    131     jnz     .resume
    132 
    133 .different_call_continue:
     103    ; Save the call then make it.
     104    ;
    134105    mov     [xDX + VMMR0JMPBUF.pfn], r11
    135106    mov     [xDX + VMMR0JMPBUF.pvUser1], r8
    136107    mov     [xDX + VMMR0JMPBUF.pvUser2], r9
    137 
    138  %ifdef VMM_R0_SWITCH_STACK
    139     mov     r15, [xDX + VMMR0JMPBUF.pvSavedStack]
    140     test    r15, r15
    141     jz      .entry_error
    142   %ifdef VBOX_STRICT
    143     cmp     dword [r15], 0h
    144     jne     .entry_error
    145     mov     rdi, r15
    146     mov     rcx, VMM_STACK_SIZE / 8
    147     mov     rax, qword 0eeeeeeeffeeeeeeeh
    148     repne stosq
    149     mov     [rdi - 10h], rbx
    150   %endif
    151 
    152     ; New RSP
    153   %ifdef WITHOUT_SUPR0STACKINFO
    154     lea     r15, [r15 + VMM_STACK_SIZE]
    155   %else
    156     lea     r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]
    157 
    158     ; Plant SUPR0 stack info.
    159     mov     [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp
    160     mov     [r15 + SUPR0STACKINFO.pSelf], r15
    161     mov     dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0
    162     mov     dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1
    163     mov     dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2
    164     mov     dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3
    165 
    166   %endif
    167 
    168     ; Switch stack!
    169   %ifdef ASM_CALL64_MSC
    170     lea     rsp, [r15 - 20h]
    171   %else
    172     mov     rsp, r15
    173   %endif
    174  %endif ; VMM_R0_SWITCH_STACK
    175108
    176109    mov     r12, rdx                    ; Save pJmpBuf.
     
    184117    call    r11
    185118    mov     rdx, r12                    ; Restore pJmpBuf
    186 
    187  %ifdef VMM_R0_SWITCH_STACK
    188     ; Reset the debug mark and the stack info header.
    189     mov     r15, [xDX + VMMR0JMPBUF.pvSavedStack]
    190   %ifndef WITHOUT_SUPR0STACKINFO
    191     mov     qword [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size + SUPR0STACKINFO.magic0], 0h
    192   %endif
    193   %ifdef VBOX_STRICT
    194     mov     dword [r15], 0h             ; Reset the marker
    195   %endif
    196  %endif
    197119
    198120    ;
     
    227149    popf
    228150    leave
    229     ret
    230 
    231 .entry_error:
    232     mov     eax, VERR_VMM_SET_JMP_ERROR
    233     jmp     .proper_return
    234 
    235 .stack_overflow:
    236     mov     eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
    237     jmp     .proper_return
    238 
    239     ;
    240     ; Aborting resume.
    241     ; Note! No need to restore XMM registers here since we haven't touched them yet.
    242     ;
    243 .bad:
    244     and     qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
    245     mov     rbx, [xDX + VMMR0JMPBUF.rbx]
    246  %ifdef ASM_CALL64_MSC
    247     mov     rsi, [xDX + VMMR0JMPBUF.rsi]
    248     mov     rdi, [xDX + VMMR0JMPBUF.rdi]
    249  %endif
    250     mov     r12, [xDX + VMMR0JMPBUF.r12]
    251     mov     r13, [xDX + VMMR0JMPBUF.r13]
    252     mov     r14, [xDX + VMMR0JMPBUF.r14]
    253     mov     r15, [xDX + VMMR0JMPBUF.r15]
    254     mov     eax, VERR_VMM_SET_JMP_ABORTED_RESUME
    255     leave
    256     ret
    257 
    258     ;
    259     ; Not the same call as went to ring-3.
    260     ;
    261 .different_call:
    262     mov     byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
    263     ;; @todo or should we fail here instead?
    264     jmp     .different_call_continue
    265 
    266     ;
    267     ; Resume VMMRZCallRing3 the call.
    268     ;
    269 .resume:
    270     ; Check if it's actually the same call, if not just continue with it
    271     ; as a regular call (ring-0 assert, then VM destroy).
    272     cmp     [xDX + VMMR0JMPBUF.pfn], r11
    273     jne     .different_call
    274     cmp     [xDX + VMMR0JMPBUF.pvUser1], r8
    275     jne     .different_call
    276     cmp     [xDX + VMMR0JMPBUF.pvUser2], r9
    277     jne     .different_call
    278 
    279  %ifndef VMM_R0_SWITCH_STACK
    280     ; Sanity checks incoming stack, applying fuzz if needed.
    281     sub     r10, [xDX + VMMR0JMPBUF.SpCheck]
    282     jz      .resume_stack_checked_out
    283     add     r10, STACK_FUZZ_SIZE        ; plus/minus STACK_FUZZ_SIZE is fine.
    284     cmp     r10, STACK_FUZZ_SIZE * 2
    285     ja      .bad
    286 
    287     mov     r10, [xDX + VMMR0JMPBUF.SpCheck]
    288     mov     [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).
    289 
    290 .resume_stack_checked_out:
    291     mov     ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
    292     cmp     rcx, VMM_STACK_SIZE
    293     ja      .bad
    294     test    rcx, 7
    295     jnz     .bad
    296     mov     rdi, [xDX + VMMR0JMPBUF.SpCheck]
    297     sub     rdi, [xDX + VMMR0JMPBUF.SpResume]
    298     cmp     rcx, rdi
    299     jne     .bad
    300  %endif
    301 
    302 %ifdef VMM_R0_SWITCH_STACK
    303     ; Update the signature in case the kernel stack moved.
    304     mov     r15, [xDX + VMMR0JMPBUF.pvSavedStack]
    305     test    r15, r15
    306     jz      .entry_error
    307  %ifndef WITHOUT_SUPR0STACKINFO
    308     lea     r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]
    309 
    310     mov     [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp
    311     mov     [r15 + SUPR0STACKINFO.pSelf], r15
    312     mov     dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0
    313     mov     dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1
    314     mov     dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2
    315     mov     dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3
    316  %endif
    317 
    318     ; Switch stack.
    319     mov     rsp, [xDX + VMMR0JMPBUF.SpResume]
    320 %else
    321     ; Restore the stack.
    322     mov     ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
    323     shr     ecx, 3
    324     mov     rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
    325     mov     rdi, [xDX + VMMR0JMPBUF.SpResume]
    326     mov     rsp, rdi
    327     rep movsq
    328 %endif ; !VMM_R0_SWITCH_STACK
    329     mov     byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
    330 
    331     ;
    332     ; Continue where we left off.
    333     ;
    334 %ifdef VBOX_STRICT
    335     pop     rax                         ; magic
    336     cmp     rax, RESUME_MAGIC
    337     je      .magic_ok
    338     mov     ecx, 0123h
    339     mov     [ecx], edx
    340 .magic_ok:
    341 %endif
    342 %ifdef RT_OS_WINDOWS
    343     movdqa  xmm6,  [rsp + 000h]
    344     movdqa  xmm7,  [rsp + 010h]
    345     movdqa  xmm8,  [rsp + 020h]
    346     movdqa  xmm9,  [rsp + 030h]
    347     movdqa  xmm10, [rsp + 040h]
    348     movdqa  xmm11, [rsp + 050h]
    349     movdqa  xmm12, [rsp + 060h]
    350     movdqa  xmm13, [rsp + 070h]
    351     movdqa  xmm14, [rsp + 080h]
    352     movdqa  xmm15, [rsp + 090h]
    353     add     rsp, 0a0h
    354 %endif
    355     popf
    356     pop     rbx
    357 %ifdef ASM_CALL64_MSC
    358     pop     rsi
    359     pop     rdi
    360 %endif
    361     pop     r12
    362     pop     r13
    363     pop     r14
    364     pop     r15
    365     pop     rbp
    366     xor     eax, eax                    ; VINF_SUCCESS
    367151    ret
    368152ENDPROC vmmR0CallRing3SetJmp
     
    416200    movdqa  [rsp + 090h], xmm15
    417201%endif
    418 %ifdef VBOX_STRICT
    419     push    RESUME_MAGIC
    420     SEH64_ALLOCATE_STACK 8
    421 %endif
    422202SEH64_END_PROLOGUE
    423203
     
    440220
    441221    ;
    442     ; Sanity checks.
    443     ;
    444     mov     rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
    445     test    rdi, rdi                    ; darwin may set this to 0.
    446     jz      .nok
    447     mov     [xDX + VMMR0JMPBUF.SpResume], rsp
    448  %ifndef VMM_R0_SWITCH_STACK
     222    ; Also check that the stack is in the vicinity of the RSP we entered
     223    ; on so the stack mirroring below doesn't go wild.
     224    ;
    449225    mov     rsi, rsp
    450226    mov     rcx, [xDX + VMMR0JMPBUF.rsp]
    451227    sub     rcx, rsi
    452 
    453     ; two sanity checks on the size.
    454     cmp     rcx, VMM_STACK_SIZE         ; check max size.
     228    cmp     rcx, _64K
    455229    jnbe    .nok
    456230
    457231    ;
    458     ; Copy the stack
    459     ;
    460     test    ecx, 7                      ; check alignment
    461     jnz     .nok
    462     mov     [xDX + VMMR0JMPBUF.cbSavedStack], ecx
    463     shr     ecx, 3
    464     rep movsq
    465 
    466  %endif ; !VMM_R0_SWITCH_STACK
    467 
    468232    ; Save a PC and return PC here to assist unwinding.
     233    ;
    469234.unwind_point:
    470235    lea     rcx, [.unwind_point wrt RIP]
    471     mov     [xDX + VMMR0JMPBUF.SavedEipForUnwind], rcx
     236    mov     [xDX + VMMR0JMPBUF.UnwindPc], rcx
    472237    mov     rcx, [xDX + VMMR0JMPBUF.rbp]
    473238    lea     rcx, [rcx + 8]
     
    477242
    478243    ; Save RSP & RBP to enable stack dumps
     244    mov     [xDX + VMMR0JMPBUF.UnwindSp], rsp
    479245    mov     rcx, rbp
    480     mov     [xDX + VMMR0JMPBUF.SavedEbp], rcx
     246    mov     [xDX + VMMR0JMPBUF.UnwindBp], rcx
    481247    sub     rcx, 8
    482     mov     [xDX + VMMR0JMPBUF.SavedEsp], rcx
    483 
    484     ; store the last pieces of info.
     248    mov     [xDX + VMMR0JMPBUF.UnwindRetSp], rcx
     249
     250    ;
     251    ; Make sure the direction flag is clear before we do any rep movsb below.
     252    ;
     253    cld
     254
     255    ;
     256    ; Mirror the stack.
     257    ;
     258    xor     ebx, ebx
     259
     260    mov     rdi, [xDX + VMMR0JMPBUF.pvStackBuf]
     261    or      rdi, rdi
     262    jz      .skip_stack_mirroring
     263
     264    mov     ebx, [xDX + VMMR0JMPBUF.cbStackBuf]
     265    or      ebx, ebx
     266    jz      .skip_stack_mirroring
     267
    485268    mov     rcx, [xDX + VMMR0JMPBUF.rsp]
    486     mov     [xDX + VMMR0JMPBUF.SpCheck], rcx
    487     mov     byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
     269    sub     rcx, rsp
     270    and     rcx, ~0fffh                 ; copy up to the page boundrary
     271
     272    cmp     rcx, rbx                    ; rbx = rcx = RT_MIN(rbx, rcx);
     273    jbe     .do_stack_buffer_big_enough
     274    mov     ecx, ebx                    ; too much to copy, limit to ebx
     275    jmp     .do_stack_copying
     276.do_stack_buffer_big_enough:
     277    mov     ebx, ecx                    ; ecx is smaller, update ebx for cbStackValid
     278
     279.do_stack_copying:
     280    mov     rsi, rsp
     281    rep movsb
     282
     283.skip_stack_mirroring:
     284    mov     [xDX + VMMR0JMPBUF.cbStackValid], ebx
     285
     286    ;
     287    ; Do buffer mirroring.
     288    ;
     289    mov     rdi, [xDX + VMMR0JMPBUF.pMirrorBuf]
     290    or      rdi, rdi
     291    jz      .skip_buffer_mirroring
     292    mov     rsi, rdx
     293    mov     ecx, VMMR0JMPBUF_size
     294    rep movsb
     295.skip_buffer_mirroring:
    488296
    489297    ;
     
    522330    ;
    523331.nok:
    524 %ifdef VBOX_STRICT
    525     pop     rax                         ; magic
    526     cmp     rax, RESUME_MAGIC
    527     je      .magic_ok
    528     mov     ecx, 0123h
    529     mov     [rcx], edx
    530 .magic_ok:
    531 %endif
    532332    mov     eax, VERR_VMM_LONG_JMP_ERROR
    533333%ifdef RT_OS_WINDOWS
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r92392 r92408  
    165165*   Internal Functions                                                                                                           *
    166166*********************************************************************************************************************************/
    167 static int                  vmmR3InitStacks(PVM pVM);
    168167static void                 vmmR3InitRegisterStats(PVM pVM);
    169168static DECLCALLBACK(int)    vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
     
    280279        return rc;
    281280
    282     /*
    283      * Init various sub-components.
    284      */
    285     rc = vmmR3InitStacks(pVM);
     281#ifdef VBOX_WITH_NMI
     282    /*
     283     * Allocate mapping for the host APIC.
     284     */
     285    rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
     286    AssertRC(rc);
     287#endif
    286288    if (RT_SUCCESS(rc))
    287289    {
    288 #ifdef VBOX_WITH_NMI
    289290        /*
    290          * Allocate mapping for the host APIC.
     291         * Start the log flusher thread.
    291292         */
    292         rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
    293         AssertRC(rc);
    294 #endif
     293        rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/,
     294                            RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk");
    295295        if (RT_SUCCESS(rc))
    296296        {
     297
    297298            /*
    298              * Start the log flusher thread.
     299             * Debug info and statistics.
    299300             */
    300             rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/,
    301                                 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk");
    302             if (RT_SUCCESS(rc))
    303             {
    304 
    305                 /*
    306                  * Debug info and statistics.
    307                  */
    308                 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
    309                 vmmR3InitRegisterStats(pVM);
    310                 vmmInitFormatTypes();
    311 
    312                 return VINF_SUCCESS;
    313             }
     301            DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
     302            vmmR3InitRegisterStats(pVM);
     303            vmmInitFormatTypes();
     304
     305            return VINF_SUCCESS;
    314306        }
    315307    }
    316308    /** @todo Need failure cleanup? */
    317 
    318     return rc;
    319 }
    320 
    321 
    322 /**
    323  * Allocate & setup the VMM RC stack(s) (for EMTs).
    324  *
    325  * The stacks are also used for long jumps in Ring-0.
    326  *
    327  * @returns VBox status code.
    328  * @param   pVM     The cross context VM structure.
    329  *
    330  * @remarks The optional guard page gets it protection setup up during R3 init
    331  *          completion because of init order issues.
    332  */
    333 static int vmmR3InitStacks(PVM pVM)
    334 {
    335     int rc = VINF_SUCCESS;
    336 #ifdef VMM_R0_SWITCH_STACK
    337     uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
    338 #else
    339     uint32_t fFlags = 0;
    340 #endif
    341 
    342     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    343     {
    344         PVMCPU pVCpu = pVM->apCpusR3[idCpu];
    345 
    346 #ifdef VBOX_STRICT_VMM_STACK
    347         rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
    348 #else
    349         rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
    350 #endif
    351                                        PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
    352         if (RT_SUCCESS(rc))
    353         {
    354 #ifdef VBOX_STRICT_VMM_STACK
    355             pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
    356 #endif
    357             pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
    358 
    359         }
    360     }
    361309
    362310    return rc;
     
    433381    STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up.");
    434382
    435 #ifdef VBOX_WITH_STATISTICS
    436     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    437     {
    438         PVMCPU pVCpu = pVM->apCpusR3[i];
    439         STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,      "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
    440         STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg,  STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,      "Average stack usage.",      "/VMM/Stack/CPU%u/Avg", i);
    441         STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64,       STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.",   "/VMM/Stack/CPU%u/Uses", i);
    442     }
    443 #endif
    444383    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    445384    {
     
    22652204    AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER);
    22662205
    2267     int rc;
    2268 #ifdef VMM_R0_SWITCH_STACK
    2269     RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
    2270 #else
    2271     RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
    2272 #endif
    2273     if (   off < VMM_STACK_SIZE
    2274         && off + cbRead <= VMM_STACK_SIZE)
    2275     {
    2276         memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
    2277         rc = VINF_SUCCESS;
     2206    /*
     2207     * Hopefully we've got all the requested bits.  If not supply what we
     2208     * can and zero the remaining stuff.
     2209     */
     2210    RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
     2211    if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid)
     2212    {
     2213        size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off;
     2214        if (cbRead <= cbValid)
     2215        {
     2216            memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead);
     2217            return VINF_SUCCESS;
     2218        }
     2219
     2220        memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid);
     2221        RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid);
    22782222    }
    22792223    else
    2280         rc = VERR_INVALID_POINTER;
    2281 
    2282     /* Supply the setjmp return RIP/EIP.  */
    2283     if (   pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
    2284         && pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation < R0Addr + cbRead)
    2285     {
    2286         uint8_t const  *pbSrc  = (uint8_t const *)&pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue;
    2287         size_t          cbSrc  = sizeof(pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue);
     2224        RT_BZERO(pvBuf, cbRead);
     2225
     2226    /*
     2227     * Supply the setjmp return RIP/EIP if requested.
     2228     */
     2229    if (   pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
     2230        && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead)
     2231    {
     2232        uint8_t const  *pbSrc  = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue;
     2233        size_t          cbSrc  = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue);
    22882234        size_t          offDst = 0;
    2289         if (R0Addr < pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation)
    2290             offDst = pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation - R0Addr;
    2291         else if (R0Addr > pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation)
    2292         {
    2293             size_t offSrc = R0Addr - pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation;
     2235        if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
     2236            offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr;
     2237        else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
     2238        {
     2239            size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation;
    22942240            Assert(offSrc < cbSrc);
    22952241            pbSrc -= offSrc;
     
    23002246        memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc);
    23012247
    2302         if (cbSrc == cbRead)
    2303             rc = VINF_SUCCESS;
    2304     }
    2305 
    2306     return rc;
     2248        //if (cbSrc == cbRead)
     2249        //    rc = VINF_SUCCESS;
     2250    }
     2251
     2252    return VINF_SUCCESS;
    23072253}
    23082254
     
    23212267
    23222268    /*
     2269     * This is all we really need here if we had proper unwind info (win64 only)...
     2270     */
     2271    pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp;
     2272    pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
     2273    pState->uPc                        = pVCpu->vmm.s.AssertJmpBuf.UnwindPc;
     2274
     2275    /*
    23232276     * Locate the resume point on the stack.
    23242277     */
    2325 #ifdef VMM_R0_SWITCH_STACK
    2326     uintptr_t off = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume - MMHyperCCToR0(pVCpu->pVMR3, pVCpu->vmm.s.pbEMTStackR3);
    2327     AssertReturnVoid(off < VMM_STACK_SIZE);
    2328 #else
    23292278    uintptr_t off = 0;
    2330 #endif
    23312279
    23322280#ifdef RT_ARCH_AMD64
    23332281    /*
    2334      * This code must match the .resume stuff in VMMR0JmpA-amd64.asm exactly.
    2335      */
    2336 # ifdef VBOX_STRICT
    2337     Assert(*(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d));
    2338     off += 8; /* RESUME_MAGIC */
    2339 # endif
     2282     * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly.
     2283     */
    23402284# ifdef RT_OS_WINDOWS
    23412285    off += 0xa0; /* XMM6 thru XMM15 */
    23422286# endif
    2343     pState->u.x86.uRFlags              = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2287    pState->u.x86.uRFlags              = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23442288    off += 8;
    2345     pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2289    pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23462290    off += 8;
    23472291# ifdef RT_OS_WINDOWS
    2348     pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2292    pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23492293    off += 8;
    2350     pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2294    pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23512295    off += 8;
    23522296# endif
    2353     pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2297    pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23542298    off += 8;
    2355     pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2299    pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23562300    off += 8;
    2357     pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2301    pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23582302    off += 8;
    2359     pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2303    pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23602304    off += 8;
    2361     pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2305    pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23622306    off += 8;
    2363     pState->uPc                        = *(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
    2364     off += 8;
     2307    pState->uPc                        = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
     2308    pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
    23652309
    23662310#elif defined(RT_ARCH_X86)
    23672311    /*
    2368      * This code must match the .resume stuff in VMMR0JmpA-x86.asm exactly.
    2369      */
    2370 # ifdef VBOX_STRICT
    2371     Assert(*(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d));
    2372     off += 4; /* RESUME_MAGIC */
    2373 # endif
    2374     pState->u.x86.uRFlags              = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2312     * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly.
     2313     */
     2314    pState->u.x86.uRFlags              = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23752315    off += 4;
    2376     pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2316    pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23772317    off += 4;
    2378     pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2318    pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23792319    off += 4;
    2380     pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2320    pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23812321    off += 4;
    2382     pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
     2322    pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
    23832323    off += 4;
    2384     pState->uPc                        = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off];
    2385     off += 4;
     2324    pState->uPc                        = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
     2325    pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
    23862326#else
    23872327# error "Port me"
    23882328#endif
    2389 
    2390     /*
    2391      * This is all we really need here, though the above helps if the assembly
    2392      * doesn't contain unwind info (currently only on win/64, so that is useful).
    2393      */
    2394     pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp;
    2395     pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume;
    23962329}
    23972330
     
    24642397static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
    24652398{
    2466     /*
    2467      * Signal a ring 0 hypervisor assertion.
    2468      * Cancel the longjmp operation that's in progress.
    2469      */
    2470     pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
    2471 #ifdef RT_ARCH_X86
    2472     pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
    2473 #else
    2474     pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
    2475 #endif
    2476 #ifdef VMM_R0_SWITCH_STACK
    2477     *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
    2478 #endif
     2399    RT_NOREF(pVCpu);
    24792400    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
    24802401    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
  • trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp

    r90829 r92408  
    362362        case VINF_EM_TRIPLE_FAULT:
    363363        case VERR_VMM_HYPER_CR3_MISMATCH:
    364         case VERR_VMM_SET_JMP_ERROR:
    365         case VERR_VMM_SET_JMP_ABORTED_RESUME:
    366         case VERR_VMM_SET_JMP_STACK_OVERFLOW:
    367364        case VERR_VMM_LONG_JMP_ERROR:
    368365        {
     
    398395             * Dump the relevant hypervisor registers and stack.
    399396             */
    400             if (   rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */
    401                 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
     397            if (rcErr == VERR_VMM_RING0_ASSERTION)
    402398            {
    403399                /* Dump the jmpbuf.  */
    404400                pHlp->pfnPrintf(pHlp,
    405401                                "!!\n"
    406                                 "!! CallRing3JmpBuf:\n"
     402                                "!! AssertJmpBuf:\n"
    407403                                "!!\n");
    408404                pHlp->pfnPrintf(pHlp,
    409                                 "SavedEsp=%RHv SavedEbp=%RHv SpResume=%RHv SpCheck=%RHv\n",
    410                                 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp,
    411                                 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp,
    412                                 pVCpu->vmm.s.CallRing3JmpBufR0.SpResume,
    413                                 pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck);
    414                 pHlp->pfnPrintf(pHlp,
    415                                 "pvSavedStack=%RHv cbSavedStack=%#x  fInRing3Call=%RTbool\n",
    416                                 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack,
    417                                 pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack,
    418                                 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call);
    419                 pHlp->pfnPrintf(pHlp,
    420                                 "cbUsedMax=%#x cbUsedAvg=%#x cbUsedTotal=%#llx cUsedTotal=%#llx\n",
    421                                 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax,
    422                                 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg,
    423                                 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedTotal,
    424                                 pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal);
     405                                "UnwindSp=%RHv UnwindRetSp=%RHv UnwindBp=%RHv UnwindPc=%RHv\n",
     406                                pVCpu->vmm.s.AssertJmpBuf.UnwindSp,
     407                                pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp,
     408                                pVCpu->vmm.s.AssertJmpBuf.UnwindBp,
     409                                pVCpu->vmm.s.AssertJmpBuf.UnwindPc);
     410                pHlp->pfnPrintf(pHlp,
     411                                "UnwindRetPcValue=%RHv UnwindRetPcLocation=%RHv\n",
     412                                pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue,
     413                                pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation);
    425414                pHlp->pfnPrintf(pHlp,
    426415                                "pfn=%RHv pvUser1=%RHv pvUser2=%RHv\n",
    427                                 pVCpu->vmm.s.CallRing3JmpBufR0.pfn,
    428                                 pVCpu->vmm.s.CallRing3JmpBufR0.pvUser1,
    429                                 pVCpu->vmm.s.CallRing3JmpBufR0.pvUser2);
     416                                pVCpu->vmm.s.AssertJmpBuf.pfn,
     417                                pVCpu->vmm.s.AssertJmpBuf.pvUser1,
     418                                pVCpu->vmm.s.AssertJmpBuf.pvUser2);
    430419
    431420                /* Dump the resume register frame on the stack. */
    432                 PRTHCUINTPTR pBP;
    433 #ifdef VMM_R0_SWITCH_STACK
    434                 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[  pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp
    435                                                                - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)];
    436 #else
    437                 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[  pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack
    438                                                                - pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck
    439                                                                + pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp];
    440 #endif
     421                PRTHCUINTPTR const pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.abAssertStack[  pVCpu->vmm.s.AssertJmpBuf.UnwindBp
     422                                                                                   - pVCpu->vmm.s.AssertJmpBuf.UnwindSp];
    441423#if HC_ARCH_BITS == 32
    442424                pHlp->pfnPrintf(pHlp,
     
    445427                                ,
    446428                                pBP[-3], pBP[-2], pBP[-1],
    447                                 pBP[1], pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp - 8, pBP[0], pBP[-4]);
     429                                pBP[1], pVCpu->vmm.s.AssertJmpBuf.SavedEbp - 8, pBP[0], pBP[-4]);
    448430#else
    449431# ifdef RT_OS_WINDOWS
     
    459441                                pBP[-4], pBP[-3],
    460442                                pBP[-2], pBP[-1],
    461                                 pBP[1], pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-8]);
     443                                pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-8]);
    462444# else
    463445                pHlp->pfnPrintf(pHlp,
     
    471453                                pBP[-4], pBP[-3],
    472454                                pBP[-2], pBP[-1],
    473                                 pBP[1], pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-6]);
     455                                pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-6]);
    474456# endif
    475457#endif
     
    479461                PCDBGFSTACKFRAME pFirstFrame;
    480462                rc2 = DBGFR3StackWalkBeginEx(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_RING0,
    481                                              DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp),
    482                                              DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s.CallRing3JmpBufR0.SpResume),
    483                                              DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s.CallRing3JmpBufR0.SavedEipForUnwind),
     463                                             DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s.AssertJmpBuf.UnwindBp),
     464                                             DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s.AssertJmpBuf.UnwindSp),
     465                                             DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s.AssertJmpBuf.UnwindPc),
    484466                                             RTDBGRETURNTYPE_INVALID, &pFirstFrame);
    485467                if (RT_SUCCESS(rc2))
     
    548530
    549531                /* Symbols on the stack. */
    550 #ifdef VMM_R0_SWITCH_STACK
    551                 uint32_t const   iLast   = VMM_STACK_SIZE / sizeof(uintptr_t);
    552                 uint32_t         iAddr   = (uint32_t)(  pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp
    553                                                       - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)) / sizeof(uintptr_t);
    554                 if (iAddr > iLast)
    555                     iAddr = 0;
    556 #else
    557                 uint32_t const   iLast   = RT_MIN(pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack, VMM_STACK_SIZE)
    558                                          / sizeof(uintptr_t);
    559                 uint32_t         iAddr   = 0;
    560 #endif
     532                uint32_t const          cbRawStack = RT_MIN(pVCpu->vmm.s.AssertJmpBuf.cbStackValid, sizeof(pVCpu->vmm.s.abAssertStack));
     533                uintptr_t const * const pauAddr    = (uintptr_t const *)&pVCpu->vmm.s.abAssertStack[0];
     534                uint32_t const          iEnd       = cbRawStack / sizeof(uintptr_t);
     535                uint32_t                iAddr      = 0;
    561536                pHlp->pfnPrintf(pHlp,
    562537                                "!!\n"
    563                                 "!! Addresses on the stack (iAddr=%#x, iLast=%#x)\n"
     538                                "!! Addresses on the stack (iAddr=%#x, iEnd=%#x)\n"
    564539                                "!!\n",
    565                                 iAddr, iLast);
    566                 uintptr_t const *paAddr  = (uintptr_t const *)pVCpu->vmm.s.pbEMTStackR3;
    567                 while (iAddr < iLast)
     540                                iAddr, iEnd);
     541                while (iAddr < iEnd)
    568542                {
    569                     uintptr_t const uAddr = paAddr[iAddr];
     543                    uintptr_t const uAddr = pauAddr[iAddr];
    570544                    if (uAddr > X86_PAGE_SIZE)
    571545                    {
    572546                        DBGFADDRESS  Addr;
    573547                        DBGFR3AddrFromFlat(pVM->pUVM, &Addr, uAddr);
    574                         RTGCINTPTR   offDisp = 0;
    575                         PRTDBGSYMBOL pSym  = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr,
    576                                                                    RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
    577                                                                    &offDisp, NULL);
    578                         RTGCINTPTR   offLineDisp;
    579                         PRTDBGLINE   pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL);
     548                        RTGCINTPTR   offDisp     = 0;
     549                        RTGCINTPTR   offLineDisp = 0;
     550                        PRTDBGSYMBOL pSym        = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr,
     551                                                                           RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
     552                                                                         | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
     553                                                                         &offDisp, NULL);
     554                        PRTDBGLINE   pLine       = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL);
    580555                        if (pLine || pSym)
    581556                        {
     
    599574                                "!!\n"
    600575                                "!! Raw stack (mind the direction).\n"
    601                                 "!! pbEMTStackR0=%RHv pbEMTStackBottomR0=%RHv VMM_STACK_SIZE=%#x\n"
     576                                "!! pbEMTStackR0=%RHv cbRawStack=%#x\n"
    602577                                "!! pbEmtStackR3=%p\n"
    603578                                "!!\n"
    604579                                "%.*Rhxd\n",
    605                                 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3),
    606                                 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3) + VMM_STACK_SIZE,
    607                                 VMM_STACK_SIZE,
    608                                 pVCpu->vmm.s.pbEMTStackR3,
    609                                 VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3);
     580                                pVCpu->vmm.s.AssertJmpBuf.UnwindSp, cbRawStack,
     581                                &pVCpu->vmm.s.abAssertStack[0],
     582                                cbRawStack, &pVCpu->vmm.s.abAssertStack[0]);
    610583            }
    611584            else
  • trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp

    r92395 r92408  
    4444#endif
    4545
    46     Assert(pVCpu->vmm.s.cCallRing3Disabled < 16);
    47     if (ASMAtomicUoIncU32(&pVCpu->vmm.s.cCallRing3Disabled) == 1)
     46    Assert(pVCpu->vmmr0.s.cCallRing3Disabled < 16);
     47    if (ASMAtomicUoIncU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 1)
    4848    {
    4949#ifdef IN_RC
     
    7373#endif
    7474
    75     Assert(pVCpu->vmm.s.cCallRing3Disabled > 0);
    76     if (ASMAtomicUoDecU32(&pVCpu->vmm.s.cCallRing3Disabled) == 0)
     75    Assert(pVCpu->vmmr0.s.cCallRing3Disabled > 0);
     76    if (ASMAtomicUoDecU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 0)
    7777    {
    7878#ifdef IN_RC
     
    9898{
    9999    VMCPU_ASSERT_EMT(pVCpu);
    100     Assert(pVCpu->vmm.s.cCallRing3Disabled <= 16);
    101     return pVCpu->vmm.s.cCallRing3Disabled == 0;
     100    Assert(pVCpu->vmmr0.s.cCallRing3Disabled <= 16);
     101    return pVCpu->vmmr0.s.cCallRing3Disabled == 0;
    102102}
    103103
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r92392 r92408  
    138138
    139139
     140/** Pointer to a ring-0 jump buffer. */
     141typedef struct VMMR0JMPBUF *PVMMR0JMPBUF;
    140142/**
    141143 * Jump buffer for the setjmp/longjmp like constructs used to
     
    184186    /** @} */
    185187
    186     /** Flag that indicates that we've done a ring-3 call. */
    187     bool                        fInRing3Call;
    188     /** The number of bytes we've saved. */
    189     uint32_t                    cbSavedStack;
    190     /** Pointer to the buffer used to save the stack.
    191      * This is assumed to be 8KB. */
    192     RTR0PTR                     pvSavedStack;
    193     /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
    194     RTHCUINTREG                 SpCheck;
    195     /** The esp we should resume execution with after the restore. */
    196     RTHCUINTREG                 SpResume;
    197     /** ESP/RSP at the time of the jump to ring 3. */
    198     RTHCUINTREG                 SavedEsp;
    199     /** EBP/RBP at the time of the jump to ring 3. */
    200     RTHCUINTREG                 SavedEbp;
    201     /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */
    202     RTHCUINTREG                 SavedEipForUnwind;
     188    /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */
     189    RTHCUINTREG                 UnwindSp;
     190    /** RSP/ESP at the time of the long jump call. */
     191    RTHCUINTREG                 UnwindRetSp;
     192    /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */
     193    RTHCUINTREG                 UnwindBp;
     194    /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */
     195    RTHCUINTREG                 UnwindPc;
    203196    /** Unwind: The vmmR0CallRing3SetJmp return address value. */
    204197    RTHCUINTREG                 UnwindRetPcValue;
     
    213206    RTHCUINTREG                 pvUser2;
    214207
    215 #if HC_ARCH_BITS == 32
    216     /** Alignment padding. */
    217     uint32_t                    uPadding;
    218 #endif
    219 
    220     /** Stats: Max amount of stack used. */
    221     uint32_t                    cbUsedMax;
    222     /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
    223     uint32_t                    cbUsedAvg;
    224     /** Stats: Total amount of stack used. */
    225     uint64_t                    cbUsedTotal;
    226     /** Stats: Number of stack usages. */
    227     uint64_t                    cUsedTotal;
     208    /** Number of valid bytes in pvStackBuf.  */
     209    uint32_t                    cbStackValid;
     210    /** Size of buffer pvStackBuf points to. */
     211    uint32_t                    cbStackBuf;
     212    /** Pointer to buffer for mirroring the stack. Optional. */
     213    RTR0PTR                     pvStackBuf;
     214    /** Pointer to a ring-3 accessible jump buffer structure for automatic
     215     *  mirroring on longjmp. Optional. */
     216    R0PTRTYPE(PVMMR0JMPBUF)     pMirrorBuf;
    228217} VMMR0JMPBUF;
    229 /** Pointer to a ring-0 jump buffer. */
    230 typedef VMMR0JMPBUF *PVMMR0JMPBUF;
    231218
    232219
     
    429416    uint32_t                    u32Padding0;
    430417
    431     /** VMM stack, pointer to the top of the stack in R3.
    432      * Stack is allocated from the hypervisor heap and is page aligned
    433      * and always writable in RC. */
    434     R3PTRTYPE(uint8_t *)        pbEMTStackR3;
    435 
    436418    /** @name Rendezvous
    437419     * @{ */
     
    465447    /** @} */
    466448
    467     /** @name Call Ring-3
    468      * Formerly known as host calls.
    469      * @{ */
    470     /** The disable counter. */
    471     uint32_t                    cCallRing3Disabled;
    472     uint32_t                    u32Padding3;
    473     /** Ring-0 assertion notification callback. */
    474     R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnRing0AssertCallback;
    475     /** Argument for pfnRing0AssertionNotificationCallback. */
    476     R0PTRTYPE(void *)           pvRing0AssertCallbackUser;
    477     /** The Ring-0 jmp buffer.
    478      * @remarks The size of this type isn't stable in assembly, so don't put
    479      *          anything that needs to be accessed from assembly after it. */
    480     VMMR0JMPBUF                 CallRing3JmpBufR0;
     449    /** @name Ring-0 assertion info for this EMT.
     450     * @{ */
     451    /** Copy of the ring-0 jmp buffer after an assertion. */
     452    VMMR0JMPBUF                 AssertJmpBuf;
     453    /** Copy of the assertion stack. */
     454    uint8_t                     abAssertStack[8192];
    481455    /** @} */
    482456
     
    540514     * @note Cannot be put on the stack as the location may change and upset the
    541515     *       validation of resume-after-ring-3-call logic.
     516     * @todo This no longer needs to be here now that we don't call ring-3 and mess
     517     *       around with stack restoring/switching.
    542518     * @{ */
    543519    PGVM                                pGVM;
     
    547523    uint64_t                            u64Arg;
    548524    PSUPDRVSESSION                      pSession;
     525    /** @} */
     526
     527    /** @name Ring-0 setjmp / assertion handling.
     528     * @{ */
     529    /** The ring-0 setjmp buffer. */
     530    VMMR0JMPBUF                         AssertJmpBuf;
     531    /** The disable counter. */
     532    uint32_t                            cCallRing3Disabled;
     533    uint32_t                            u32Padding3;
     534    /** Ring-0 assertion notification callback. */
     535    R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback;
     536    /** Argument for pfnRing0AssertionNotificationCallback. */
     537    R0PTRTYPE(void *)                   pvAssertCallbackUser;
    549538    /** @} */
    550539
     
    569558AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger)
    570559              == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE);
     560AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64);
    571561/** Pointer to VMM ring-0 VMCPU instance data. */
    572562typedef VMMR0PERVCPU *PVMMR0PERVCPU;
  • trunk/src/VBox/VMM/include/VMMInternal.mac

    r92392 r92408  
    1919%include "VBox/sup.mac"
    2020
    21 ;
    22 ; Determine the default stack switching unless specified explicitly.
    23 ;
    24 %ifndef VMM_R0_SWITCH_STACK
    25  %ifndef VMM_R0_NO_SWITCH_STACK
    26   %ifdef RT_OS_DARWIN
    27    %define VMM_R0_SWITCH_STACK
    28   %endif
     21
     22struc VMMR0JMPBUF
     23        ;
     24        ; traditional jmp_buf
     25        ;
     26%ifdef RT_ARCH_X86
     27        .ebx                    resd 1
     28        .esi                    resd 1
     29        .edi                    resd 1
     30        .ebp                    resd 1
     31        .esp                    resd 1
     32        .eip                    resd 1
     33        .eflags                 resd 1
     34%endif
     35%ifdef RT_ARCH_AMD64
     36        .rbx                    resq 1
     37 %ifdef RT_OS_WINDOWS
     38        .rsi                    resq 1
     39        .rdi                    resq 1
    2940 %endif
     41        .rbp                    resq 1
     42        .r12                    resq 1
     43        .r13                    resq 1
     44        .r14                    resq 1
     45        .r15                    resq 1
     46        .rsp                    resq 1
     47        .rip                    resq 1
     48 %ifdef RT_OS_WINDOWS
     49        .xmm6                   resq 2
     50        .xmm7                   resq 2
     51        .xmm8                   resq 2
     52        .xmm9                   resq 2
     53        .xmm10                  resq 2
     54        .xmm11                  resq 2
     55        .xmm12                  resq 2
     56        .xmm13                  resq 2
     57        .xmm14                  resq 2
     58        .xmm15                  resq 2
     59 %endif
     60        .rflags                 resq 1
    3061%endif
    3162
     63        ;
     64        ; Additional state and stack info for unwinding.
     65        ;
     66        .UnwindSp               RTR0PTR_RES 1
     67        .UnwindRetSp            RTR0PTR_RES 1
     68        .UnwindBp               RTR0PTR_RES 1
     69        .UnwindPc               RTR0PTR_RES 1
     70        .UnwindRetPcValue       RTR0PTR_RES 1
     71        .UnwindRetPcLocation    RTR0PTR_RES 1
    3272
    33 struc VMMR0JMPBUF
    34 %ifdef RT_ARCH_X86
    35     ; traditional jmp_buf
    36     .ebx            resd 1
    37     .esi            resd 1
    38     .edi            resd 1
    39     .ebp            resd 1
    40     .esp            resd 1
    41     .eip            resd 1
    42     .eflags         resd 1
     73        ;
     74        ; Info about what we were doing in case it's helpful.
     75        ;
     76        .pfn                    RTR0PTR_RES 1
     77        .pvUser1                RTR0PTR_RES 1
     78        .pvUser2                RTR0PTR_RES 1
    4379
    44     ; additional state and stack info.
    45     .fInRing3Call   resd 1
    46     .cbSavedStack   resd 1
    47     .pvSavedStack   resd 1
    48     .SpCheck        resd 1
    49     .SpResume       resd 1
    50     .SavedEsp       resd 1
    51     .SavedEbp       resd 1
    52     .SavedEipForUnwind      resd 1
    53     .UnwindRetPcValue       resd 1
    54     .UnwindRetPcLocation    resd 1
    55     .pfn            resd 1
    56     .pvUser1        resd 1
    57     .pvUser2        resd 1
    58 %endif
    59 %ifdef RT_ARCH_AMD64
    60     ; traditional jmp_buf
    61     .rbx            resq 1
    62  %ifdef RT_OS_WINDOWS
    63     .rsi            resq 1
    64     .rdi            resq 1
    65  %endif
    66     .rbp            resq 1
    67     .r12            resq 1
    68     .r13            resq 1
    69     .r14            resq 1
    70     .r15            resq 1
    71     .rsp            resq 1
    72     .rip            resq 1
    73  %ifdef RT_OS_WINDOWS
    74     .xmm6           resq 2
    75     .xmm7           resq 2
    76     .xmm8           resq 2
    77     .xmm9           resq 2
    78     .xmm10          resq 2
    79     .xmm11          resq 2
    80     .xmm12          resq 2
    81     .xmm13          resq 2
    82     .xmm14          resq 2
    83     .xmm15          resq 2
    84  %endif
    85     .rflags         resq 1
    86 
    87     ; additional state and stack info.
    88     .fInRing3Call   resd 1
    89     .cbSavedStack   resd 1
    90     .pvSavedStack   resq 1
    91     .SpCheck        resq 1
    92     .SpResume       resq 1
    93     .SavedEsp       resq 1
    94     .SavedEbp       resq 1
    95     .SavedEipForUnwind      resq 1
    96     .UnwindRetPcValue       resq 1
    97     .UnwindRetPcLocation    resq 1
    98     .pfn            resq 1
    99     .pvUser1        resq 1
    100     .pvUser2        resq 1
    101 %endif
    102 
    103     ; Statistics
    104     alignb 8
    105     .cbUsedMax      resd 1
    106     .cbUsedAvg      resd 1
    107     .cbUsedTotal    resq 1
    108     .cUsedTotal     resq 1
     80        ;
     81        ; For mirroring the jump buffer and stack to ring-3 for unwinding and analysis.
     82        ;
     83        .cbStackValid           resd        1
     84        .cbStackBuf             resd        1
     85        .pvStackBuf             RTR0PTR_RES 1
     86        .pMirrorBuf             RTR0PTR_RES 1
    10987endstruc
    11088
     
    11492        .iLastGZRc              resd 1
    11593        alignb 8
    116         .pbEMTStackR3           RTR3PTR_RES 1
    11794
    11895        .fInRendezvous          resb 1
     
    127104        .TracerCtx              resb SUPDRVTRACERUSRCTX64_size
    128105
    129         .cCallRing3Disabled     resd 1
    130106        alignb 8
    131         .pfnRing0AssertCallback RTR0PTR_RES 1
    132         .pvRing0AssertCallbackUser RTR0PTR_RES 1
    133         alignb 16
    134         .CallRing3JmpBufR0      resb 1
     107        .AssertJmpBuf           resb 1
    135108endstruc
    136109
  • trunk/src/VBox/VMM/testcase/Makefile.kmk

    r91775 r92408  
    7171        tstSSM \
    7272        tstVMMR0CallHost-1 \
    73         tstVMMR0CallHost-2 \
    7473        tstX86-FpuSaveRestore
    7574  ifn1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), solaris.x86 solaris.amd64 win.amd64 ) ## TODO: Fix the code.
     
    300299
    301300#
    302 # Two testcases for checking the ring-3 "long jump" code.
     301# Two testcases for checking the ring-0 setjmp/longjmp code.
    303302#
    304303tstVMMR0CallHost-1_TEMPLATE = VBOXR3TSTEXE
    305 tstVMMR0CallHost-1_DEFS = VMM_R0_NO_SWITCH_STACK
    306304tstVMMR0CallHost-1_INCS = $(VBOX_PATH_VMM_SRC)/include
    307305tstVMMR0CallHost-1_SOURCES = \
     
    312310        $(VBOX_PATH_VMM_SRC)/VMMR0/VMMR0JmpA-x86.asm
    313311
    314 tstVMMR0CallHost-2_EXTENDS = tstVMMR0CallHost-1
    315 tstVMMR0CallHost-2_DEFS = VMM_R0_SWITCH_STACK
    316 tstVMMR0CallHost-2_SOURCES.amd64 = \
    317         $(tstVMMR0CallHost-1_SOURCES.amd64) \
    318         tstVMMR0CallHost-2A.asm
    319312
    320313#
  • trunk/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp

    r91806 r92408  
    3636
    3737/*********************************************************************************************************************************
    38 *   Defined Constants And Macros                                                                                                 *
    39 *********************************************************************************************************************************/
    40 #if !defined(VMM_R0_SWITCH_STACK) && !defined(VMM_R0_NO_SWITCH_STACK)
    41 # error "VMM_R0_SWITCH_STACK or VMM_R0_NO_SWITCH_STACK has to be defined."
    42 #endif
    43 
    44 
    45 /*********************************************************************************************************************************
    4638*   Global Variables                                                                                                             *
    4739*********************************************************************************************************************************/
    4840/** The jump buffer. */
    4941static VMMR0JMPBUF          g_Jmp;
     42/** The mirror jump buffer. */
     43static VMMR0JMPBUF          g_JmpMirror;
    5044/** The number of jumps we've done. */
    5145static unsigned volatile    g_cJmps;
     
    6761    char  *pv = (char *)alloca(cb);
    6862    RTStrPrintf(pv, cb, "i=%d%*s\n", i, cb, "");
    69 #ifdef VMM_R0_SWITCH_STACK
    70     g_cbFooUsed = VMM_STACK_SIZE - ((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack);
    71     RTTESTI_CHECK_MSG_RET(g_cbFooUsed < (intptr_t)VMM_STACK_SIZE - 128, ("%#x - (%p - %p) -> %#x; cb=%#x i=%d\n", VMM_STACK_SIZE, pv, g_Jmp.pvSavedStack, g_cbFooUsed, cb, i), -15);
    72 #elif defined(RT_ARCH_AMD64)
     63#if defined(RT_ARCH_AMD64)
    7364    g_cbFooUsed = (uintptr_t)g_Jmp.rsp - (uintptr_t)pv;
    7465    RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.rsp, pv, g_cbFooUsed, cb, i), -15);
     
    122113void tst(int iFrom, int iTo, int iInc)
    123114{
    124 #ifdef VMM_R0_SWITCH_STACK
    125     int const cIterations = iFrom > iTo ? iFrom - iTo : iTo - iFrom;
    126     void   *pvPrev = alloca(1);
    127 #endif
    128 
    129     RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack;
    130     RT_ZERO(g_Jmp);
    131     g_Jmp.pvSavedStack = R0PtrSaved;
    132     memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE);
     115    RT_BZERO(&g_Jmp, RT_UOFFSETOF(VMMR0JMPBUF, cbStackBuf));
     116    g_Jmp.cbStackValid = _1M;
     117    memset((void *)g_Jmp.pvStackBuf, '\0', g_Jmp.cbStackBuf);
    133118    g_cbFoo = 0;
    134119    g_cJmps = 0;
     
    136121    g_fInLongJmp = false;
    137122
    138     int iOrg = iFrom;
    139123    for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++)
    140124    {
    141         if (!g_fInLongJmp)
    142             iOrg = i;
    143         int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tst2, (PVM)(uintptr_t)iOrg, 0);
     125        g_fInLongJmp = false;
     126        int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tst2, (PVM)(uintptr_t)i, 0);
    144127        RTTESTI_CHECK_MSG_RETV(rc == (g_fInLongJmp ? 42 : 0),
    145                                ("i=%d iOrg=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",
    146                                 i, iOrg, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));
     128                               ("i=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",
     129                                i, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));
    147130
    148 #ifdef VMM_R0_SWITCH_STACK
    149         /* Make the stack pointer slide for the second half of the calls. */
    150         if (iItr >= cIterations / 2)
    151         {
    152             /* Note! gcc does funny rounding up of alloca(). */
    153 # if !defined(VBOX_WITH_GCC_SANITIZER) && !defined(__MSVC_RUNTIME_CHECKS)
    154             void  *pv2 = alloca((i % 63) | 1);
    155             size_t cb2 = (uintptr_t)pvPrev - (uintptr_t)pv2;
    156 # else
    157             size_t cb2 = ((i % 3) + 1) * 16; /* We get what we ask for here, and it's not at RSP/ESP due to guards. */
    158             void  *pv2 = alloca(cb2);
    159 # endif
    160             RTTESTI_CHECK_MSG(cb2 >= 16 && cb2 <= 128, ("cb2=%zu pv2=%p pvPrev=%p iAlloca=%d\n", cb2, pv2, pvPrev, iItr));
    161             memset(pv2, 0xff, cb2);
    162             memset(pvPrev, 0xee, 1);
    163             pvPrev = pv2;
    164         }
    165 #endif
    166131    }
    167132    RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!"));
    168     if (g_Jmp.cbUsedAvg || g_Jmp.cUsedTotal)
    169         RTTestIPrintf(RTTESTLVL_ALWAYS, "cbUsedAvg=%#x cbUsedMax=%#x cUsedTotal=%#llx\n",
    170                       g_Jmp.cbUsedAvg, g_Jmp.cbUsedMax, g_Jmp.cUsedTotal);
    171133}
    172 
    173 
    174 #if defined(VMM_R0_SWITCH_STACK) && defined(RT_ARCH_AMD64)
    175 /*
    176  * Stack switch back tests.
    177  */
    178 RT_C_DECLS_BEGIN
    179 DECLCALLBACK(int) tstWrapped4(         PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4);
    180 DECLCALLBACK(int) StkBack_tstWrapped4( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4);
    181 DECLCALLBACK(int) tstWrapped5(         PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5);
    182 DECLCALLBACK(int) StkBack_tstWrapped5( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5);
    183 DECLCALLBACK(int) tstWrapped6(         PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6);
    184 DECLCALLBACK(int) StkBack_tstWrapped6( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6);
    185 DECLCALLBACK(int) tstWrapped7(         PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7);
    186 DECLCALLBACK(int) StkBack_tstWrapped7( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7);
    187 DECLCALLBACK(int) tstWrapped8(         PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8);
    188 DECLCALLBACK(int) StkBack_tstWrapped8( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3,  uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8);
    189 DECLCALLBACK(int) tstWrapped9(         PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9);
    190 DECLCALLBACK(int) StkBack_tstWrapped9( PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9);
    191 DECLCALLBACK(int) tstWrapped10(        PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10);
    192 DECLCALLBACK(int) StkBack_tstWrapped10(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10);
    193 DECLCALLBACK(int) tstWrapped16(        PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16);
    194 DECLCALLBACK(int) StkBack_tstWrapped16(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16);
    195 DECLCALLBACK(int) tstWrapped20(        PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20);
    196 DECLCALLBACK(int) StkBack_tstWrapped20(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20);
    197 
    198 DECLCALLBACK(int) tstWrappedThin(PVMMR0JMPBUF pJmp);
    199 DECLCALLBACK(int) StkBack_tstWrappedThin(PVMMR0JMPBUF pJmp);
    200 RT_C_DECLS_END
    201 
    202 
    203 
    204 DECLCALLBACK(int) StkBack_tstWrapped4(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4)
    205 {
    206     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    207     RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);
    208     RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);
    209     RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);
    210 
    211     void *pv = alloca(32);
    212     memset(pv, 'a', 32);
    213     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    214 
    215     return 42;
    216 }
    217 
    218 
    219 DECLCALLBACK(int) StkBack_tstWrapped5(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5)
    220 {
    221     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    222     RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);
    223     RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);
    224     RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);
    225     RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);
    226 
    227     void *pv = alloca(32);
    228     memset(pv, 'a', 32);
    229     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    230 
    231     return 42;
    232 }
    233 
    234 
    235 DECLCALLBACK(int) StkBack_tstWrapped6(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6)
    236 {
    237     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    238     RTTESTI_CHECK_RET(u2 ==  (uintptr_t)2U, -2);
    239     RTTESTI_CHECK_RET(u3 ==  (uintptr_t)3U, -3);
    240     RTTESTI_CHECK_RET(u4 ==  (uintptr_t)4U, -4);
    241     RTTESTI_CHECK_RET(u5 ==  (uintptr_t)5U, -5);
    242     RTTESTI_CHECK_RET(u6 ==  (uintptr_t)6U, -6);
    243 
    244     void *pv = alloca(32);
    245     memset(pv, 'a', 32);
    246     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    247 
    248     return 42;
    249 }
    250 
    251 
    252 DECLCALLBACK(int) StkBack_tstWrapped7(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7)
    253 {
    254     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    255     RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);
    256     RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);
    257     RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);
    258     RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);
    259     RTTESTI_CHECK_RET(u6 == ~(uintptr_t)6U, -6);
    260     RTTESTI_CHECK_RET(u7 == ~(uintptr_t)7U, -7);
    261 
    262     void *pv = alloca(32);
    263     memset(pv, 'a', 32);
    264     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    265 
    266     return 42;
    267 }
    268 
    269 
    270 DECLCALLBACK(int) StkBack_tstWrapped8(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8)
    271 {
    272     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    273     RTTESTI_CHECK_RET(u2 ==  (uintptr_t)2U, -2);
    274     RTTESTI_CHECK_RET(u3 ==  (uintptr_t)3U, -3);
    275     RTTESTI_CHECK_RET(u4 ==  (uintptr_t)4U, -4);
    276     RTTESTI_CHECK_RET(u5 ==  (uintptr_t)5U, -5);
    277     RTTESTI_CHECK_RET(u6 ==  (uintptr_t)6U, -6);
    278     RTTESTI_CHECK_RET(u7 ==  (uintptr_t)7U, -7);
    279     RTTESTI_CHECK_RET(u8 ==  (uintptr_t)8U, -8);
    280 
    281     void *pv = alloca(32);
    282     memset(pv, 'a', 32);
    283     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    284 
    285     return 42;
    286 }
    287 
    288 DECLCALLBACK(int) StkBack_tstWrapped9(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9)
    289 {
    290     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    291     RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);
    292     RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);
    293     RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);
    294     RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);
    295     RTTESTI_CHECK_RET(u6 == ~(uintptr_t)6U, -6);
    296     RTTESTI_CHECK_RET(u7 == ~(uintptr_t)7U, -7);
    297     RTTESTI_CHECK_RET(u8 == ~(uintptr_t)8U, -8);
    298     RTTESTI_CHECK_RET(u9 == ~(uintptr_t)9U, -9);
    299 
    300     void *pv = alloca(32);
    301     memset(pv, 'a', 32);
    302     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    303 
    304     return 42;
    305 }
    306 
    307 
    308 DECLCALLBACK(int) StkBack_tstWrapped10(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10)
    309 {
    310     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    311     RTTESTI_CHECK_RET(u2 ==  (uintptr_t)2U, -2);
    312     RTTESTI_CHECK_RET(u3 ==  (uintptr_t)3U, -3);
    313     RTTESTI_CHECK_RET(u4 ==  (uintptr_t)4U, -4);
    314     RTTESTI_CHECK_RET(u5 ==  (uintptr_t)5U, -5);
    315     RTTESTI_CHECK_RET(u6 ==  (uintptr_t)6U, -6);
    316     RTTESTI_CHECK_RET(u7 ==  (uintptr_t)7U, -7);
    317     RTTESTI_CHECK_RET(u8 ==  (uintptr_t)8U, -8);
    318     RTTESTI_CHECK_RET(u9 ==  (uintptr_t)9U, -9);
    319     RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);
    320 
    321     void *pv = alloca(32);
    322     memset(pv, 'a', 32);
    323     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    324 
    325     return 42;
    326 }
    327 
    328 
    329 DECLCALLBACK(int) StkBack_tstWrapped16(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16)
    330 {
    331     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    332     RTTESTI_CHECK_RET(u2 ==  (uintptr_t)2U, -2);
    333     RTTESTI_CHECK_RET(u3 ==  (uintptr_t)3U, -3);
    334     RTTESTI_CHECK_RET(u4 ==  (uintptr_t)4U, -4);
    335     RTTESTI_CHECK_RET(u5 ==  (uintptr_t)5U, -5);
    336     RTTESTI_CHECK_RET(u6 ==  (uintptr_t)6U, -6);
    337     RTTESTI_CHECK_RET(u7 ==  (uintptr_t)7U, -7);
    338     RTTESTI_CHECK_RET(u8 ==  (uintptr_t)8U, -8);
    339     RTTESTI_CHECK_RET(u9 ==  (uintptr_t)9U, -9);
    340     RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);
    341     RTTESTI_CHECK_RET(u11 == (uintptr_t)11U, -11);
    342     RTTESTI_CHECK_RET(u12 == (uintptr_t)12U, -12);
    343     RTTESTI_CHECK_RET(u13 == (uintptr_t)13U, -13);
    344     RTTESTI_CHECK_RET(u14 == (uintptr_t)14U, -14);
    345     RTTESTI_CHECK_RET(u15 == (uintptr_t)15U, -15);
    346     RTTESTI_CHECK_RET(u16 == (uintptr_t)16U, -16);
    347 
    348     void *pv = alloca(32);
    349     memset(pv, 'a', 32);
    350     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    351 
    352     return 42;
    353 }
    354 
    355 
    356 DECLCALLBACK(int) StkBack_tstWrapped20(PVMMR0JMPBUF pJmp, uintptr_t u2,  uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20)
    357 {
    358     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);
    359     RTTESTI_CHECK_RET(u2 ==  (uintptr_t)2U, -2);
    360     RTTESTI_CHECK_RET(u3 ==  (uintptr_t)3U, -3);
    361     RTTESTI_CHECK_RET(u4 ==  (uintptr_t)4U, -4);
    362     RTTESTI_CHECK_RET(u5 ==  (uintptr_t)5U, -5);
    363     RTTESTI_CHECK_RET(u6 ==  (uintptr_t)6U, -6);
    364     RTTESTI_CHECK_RET(u7 ==  (uintptr_t)7U, -7);
    365     RTTESTI_CHECK_RET(u8 ==  (uintptr_t)8U, -8);
    366     RTTESTI_CHECK_RET(u9 ==  (uintptr_t)9U, -9);
    367     RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);
    368     RTTESTI_CHECK_RET(u11 == (uintptr_t)11U, -11);
    369     RTTESTI_CHECK_RET(u12 == (uintptr_t)12U, -12);
    370     RTTESTI_CHECK_RET(u13 == (uintptr_t)13U, -13);
    371     RTTESTI_CHECK_RET(u14 == (uintptr_t)14U, -14);
    372     RTTESTI_CHECK_RET(u15 == (uintptr_t)15U, -15);
    373     RTTESTI_CHECK_RET(u16 == (uintptr_t)16U, -16);
    374     RTTESTI_CHECK_RET(u17 == (uintptr_t)17U, -17);
    375     RTTESTI_CHECK_RET(u18 == (uintptr_t)18U, -18);
    376     RTTESTI_CHECK_RET(u19 == (uintptr_t)19U, -19);
    377     RTTESTI_CHECK_RET(u20 == (uintptr_t)20U, -20);
    378 
    379     void *pv = alloca(32);
    380     memset(pv, 'a', 32);
    381     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);
    382 
    383     return 42;
    384 }
    385 
    386 
    387 DECLCALLBACK(int) tstSwitchBackInner(intptr_t i1, intptr_t i2)
    388 {
    389     RTTESTI_CHECK_RET(i1 == -42, -20);
    390     RTTESTI_CHECK_RET(i2 == (intptr_t)&g_Jmp, -21);
    391 
    392     void *pv = alloca(32);
    393     memset(pv, 'b', 32);
    394     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack < VMM_STACK_SIZE, -22);
    395 
    396     int rc;
    397     rc = tstWrapped4(&g_Jmp,  (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U);
    398     RTTESTI_CHECK_RET(rc == 42, -23);
    399 
    400     rc = tstWrapped5(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U);
    401     RTTESTI_CHECK_RET(rc == 42, -23);
    402 
    403     rc = tstWrapped6(&g_Jmp,  (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U,  (uintptr_t)5U,  (uintptr_t)6U);
    404     RTTESTI_CHECK_RET(rc == 42, -23);
    405 
    406     rc = tstWrapped7(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U, ~(uintptr_t)6U, ~(uintptr_t)7U);
    407     RTTESTI_CHECK_RET(rc == 42, -23);
    408 
    409     rc = tstWrapped8(&g_Jmp,  (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U,  (uintptr_t)5U,  (uintptr_t)6U,  (uintptr_t)7U,  (uintptr_t)8U);
    410     RTTESTI_CHECK_RET(rc == 42, -23);
    411 
    412     rc = tstWrapped9(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U, ~(uintptr_t)6U, ~(uintptr_t)7U, ~(uintptr_t)8U, ~(uintptr_t)9U);
    413     RTTESTI_CHECK_RET(rc == 42, -23);
    414 
    415     rc = tstWrapped10(&g_Jmp, (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U,  (uintptr_t)5U,  (uintptr_t)6U,  (uintptr_t)7U,  (uintptr_t)8U,  (uintptr_t)9U,  (uintptr_t)10);
    416     RTTESTI_CHECK_RET(rc == 42, -23);
    417 
    418     rc = tstWrapped16(&g_Jmp, (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U,  (uintptr_t)5U,  (uintptr_t)6U,  (uintptr_t)7U,  (uintptr_t)8U,  (uintptr_t)9U,  (uintptr_t)10,  (uintptr_t)11,  (uintptr_t)12,  (uintptr_t)13,  (uintptr_t)14,  (uintptr_t)15,  (uintptr_t)16);
    419     RTTESTI_CHECK_RET(rc == 42, -23);
    420 
    421     rc = tstWrapped20(&g_Jmp, (uintptr_t)2U,  (uintptr_t)3U,  (uintptr_t)4U,  (uintptr_t)5U,  (uintptr_t)6U,  (uintptr_t)7U,  (uintptr_t)8U,  (uintptr_t)9U,  (uintptr_t)10,  (uintptr_t)11,  (uintptr_t)12,  (uintptr_t)13,  (uintptr_t)14,  (uintptr_t)15,  (uintptr_t)16,  (uintptr_t)17,  (uintptr_t)18,  (uintptr_t)19,  (uintptr_t)20);
    422     RTTESTI_CHECK_RET(rc == 42, -23);
    423     return rc;
    424 }
    425 
    426 
    427 DECLCALLBACK(int) StkBack_tstWrappedThin(PVMMR0JMPBUF pJmp)
    428 {
    429     RTTESTI_CHECK_RET(pJmp == &g_Jmp, -31);
    430 
    431     void *pv = alloca(32);
    432     memset(pv, 'c', 32);
    433     RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -32);
    434 
    435     return 42;
    436 }
    437 
    438 DECLCALLBACK(int) tstSwitchBackInnerThin(intptr_t i1, intptr_t i2)
    439 {
    440     RT_NOREF(i1);
    441     return tstWrappedThin((PVMMR0JMPBUF)i2);
    442 }
    443 
    444 
    445 void tstSwitchBack(void)
    446 {
    447     RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack;
    448     RT_ZERO(g_Jmp);
    449     g_Jmp.pvSavedStack = R0PtrSaved;
    450     memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE);
    451     g_cbFoo = 0;
    452     g_cJmps = 0;
    453     g_cbFooUsed = 0;
    454     g_fInLongJmp = false;
    455 
    456     //for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++)
    457     {
    458         int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tstSwitchBackInner, (PVM)(intptr_t)-42, (PVMCPU)&g_Jmp);
    459         RTTESTI_CHECK_MSG_RETV(rc == 42,
    460                                ("i=%d iOrg=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",
    461                                 0, 0 /*i, iOrg*/, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));
    462 
    463         rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tstSwitchBackInnerThin, NULL, (PVMCPU)&g_Jmp);
    464         RTTESTI_CHECK_MSG_RETV(rc == 42,
    465                                ("i=%d iOrg=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",
    466                                 0, 0 /*i, iOrg*/, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));
    467 
    468     }
    469     //RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!"));
    470 }
    471 
    472 #endif
    473134
    474135
     
    479140     */
    480141    RTTEST hTest;
    481 #ifdef VMM_R0_NO_SWITCH_STACK
    482142    RTEXITCODE rcExit = RTTestInitAndCreate("tstVMMR0CallHost-1", &hTest);
    483 #else
    484     RTEXITCODE rcExit = RTTestInitAndCreate("tstVMMR0CallHost-2", &hTest);
    485 #endif
    486143    if (rcExit != RTEXITCODE_SUCCESS)
    487144        return rcExit;
    488145    RTTestBanner(hTest);
    489146
    490     g_Jmp.pvSavedStack = (RTR0PTR)RTTestGuardedAllocTail(hTest, VMM_STACK_SIZE);
     147    g_Jmp.cbStackBuf = PAGE_SIZE;
     148    g_Jmp.pvStackBuf = (uintptr_t)RTTestGuardedAllocTail(hTest, g_Jmp.cbStackBuf);
     149    g_Jmp.pMirrorBuf = (uintptr_t)&g_JmpMirror;
    491150
    492151    /*
     
    497156    RTTestSub(hTest, "Decreasing stack usage");
    498157    tst(7599, 0, -1);
    499 #if defined(VMM_R0_SWITCH_STACK) && defined(RT_ARCH_AMD64)
    500     RTTestSub(hTest, "Switch back");
    501     tstSwitchBack();
    502 #endif
    503158
    504159    return RTTestSummaryAndDestroy(hTest);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r92392 r92408  
    265265    PVM pVM = NULL; NOREF(pVM);
    266266
    267 #if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
    268     CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0, 16);
    269     CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0.xmm6, 16);
    270 #endif
    271 
    272267    /* the VMCPUs are page aligned TLB hit reasons. */
    273268    CHECK_SIZE_ALIGNMENT(VMCPU, 4096);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette