Changeset 92408 in vbox
- Timestamp:
- Nov 12, 2021 9:49:06 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 2 deleted
- 24 edited
-
Config.kmk (modified) (1 diff)
-
include/VBox/err.h (modified) (1 diff)
-
include/VBox/err.mac (modified) (1 diff)
-
include/VBox/vmm/gvm.h (modified) (2 diffs)
-
include/VBox/vmm/gvm.mac (modified) (1 diff)
-
include/VBox/vmm/vm.h (modified) (1 diff)
-
include/VBox/vmm/vm.mac (modified) (1 diff)
-
include/VBox/vmm/vmm.h (modified) (2 diffs)
-
src/VBox/HostDrivers/Support/Makefile.kmk (modified) (2 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/PDMAllCritSect.cpp (modified) (6 diffs)
-
src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMAll/VMMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR0/PGMR0Pool.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (13 diffs)
-
src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm (modified) (10 diffs)
-
src/VBox/VMM/VMMR0/VMMR0StackBack-darwin.asm (deleted)
-
src/VBox/VMM/VMMR3/VMM.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMRZ/VMMRZ.cpp (modified) (3 diffs)
-
src/VBox/VMM/include/VMMInternal.h (modified) (8 diffs)
-
src/VBox/VMM/include/VMMInternal.mac (modified) (3 diffs)
-
src/VBox/VMM/testcase/Makefile.kmk (modified) (3 diffs)
-
src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp (modified) (6 diffs)
-
src/VBox/VMM/testcase/tstVMMR0CallHost-2A.asm (deleted)
-
src/VBox/VMM/testcase/tstVMStructSize.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r92372 r92408 2003 2003 VBOX_LIB_XPCOM_X86 = $(PATH_STAGE_BIN)/VBoxXPCOM-x86.dylib 2004 2004 LIB_DDU = $(PATH_STAGE_BIN)/VBoxDDU.dylib 2005 VBOX_LIB_SUPR0 = $(PATH_STAGE_LIB)/SUPR0$(VBOX_SUFF_LIB)2005 VBOX_LIB_SUPR0 := 2006 2006 endif 2007 2007 if1of ($(KBUILD_TARGET), freebsd haiku linux netbsd openbsd solaris) -
trunk/include/VBox/err.h
r92392 r92408 1273 1273 * complete or try with a clean build. */ 1274 1274 #define VERR_VMM_RC_VERSION_MISMATCH (-2705) 1275 /** VMM set jump error. */1276 #define VERR_VMM_SET_JMP_ERROR (-2706)1277 /** VMM set jump stack overflow error. */1278 #define VERR_VMM_SET_JMP_STACK_OVERFLOW (-2707)1279 /** VMM set jump resume error. */1280 #define VERR_VMM_SET_JMP_ABORTED_RESUME (-2708)1281 1275 /** VMM long jump error. */ 1282 1276 #define VERR_VMM_LONG_JMP_ERROR (-2709) 1283 /** Unknown ring-3 call attempted. */1284 #define VERR_VMM_UNKNOWN_RING3_CALL (-2710)1285 /** The ring-3 call didn't set an RC. */1286 #define VERR_VMM_RING3_CALL_NO_RC (-2711)1287 1277 /** Reason for leaving RC: Caller the tracer in ring-0. */ 1288 1278 #define VINF_VMM_CALL_TRACER (2712) -
trunk/include/VBox/err.mac
r92392 r92408 493 493 %define VERR_VMM_R0_VERSION_MISMATCH (-2704) 494 494 %define VERR_VMM_RC_VERSION_MISMATCH (-2705) 495 %define VERR_VMM_SET_JMP_ERROR (-2706)496 %define VERR_VMM_SET_JMP_STACK_OVERFLOW (-2707)497 %define VERR_VMM_SET_JMP_ABORTED_RESUME (-2708)498 495 %define VERR_VMM_LONG_JMP_ERROR (-2709) 499 %define VERR_VMM_UNKNOWN_RING3_CALL (-2710)500 %define VERR_VMM_RING3_CALL_NO_RC (-2711)501 496 %define VINF_VMM_CALL_TRACER (2712) 502 497 %define VERR_VMM_SWITCHER_IPE_1 (-2713) -
trunk/include/VBox/vmm/gvm.h
r91250 r92408 120 120 struct VMMR0PERVCPU s; 121 121 #endif 122 uint8_t padding[ 512];122 uint8_t padding[896]; 123 123 } vmmr0; 124 124 … … 133 133 /** Padding the structure size to page boundrary. */ 134 134 #ifdef VBOX_WITH_NEM_R0 135 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 64 - 512- 64];135 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 64 - 896 - 64]; 136 136 #else 137 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 512- 64];137 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 896 - 64]; 138 138 #endif 139 139 } GVMCPU; -
trunk/include/VBox/vmm/gvm.mac
r91250 r92408 51 51 %endif 52 52 alignb 64 53 .vmmr0 resb 51253 .vmmr0 resb 896 54 54 alignb 64 55 55 .pgmr0 resb 64 -
trunk/include/VBox/vmm/vm.h
r92358 r92408 219 219 struct VMMCPU s; 220 220 #endif 221 uint8_t padding[ 1344]; /* multiple of 64 */221 uint8_t padding[9536]; /* multiple of 64 */ 222 222 } vmm; 223 223 -
trunk/include/VBox/vmm/vm.mac
r92362 r92408 69 69 .tm resb 5760 70 70 alignb 64 71 .vmm resb 134471 .vmm resb 9536 72 72 alignb 64 73 73 .pdm resb 256 -
trunk/include/VBox/vmm/vmm.h
r92392 r92408 203 203 VMMR3DECL(PVMCPUCC) VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu); 204 204 VMM_INT_DECL(uint32_t) VMMGetSvnRev(void); 205 VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPUCC pVCpu);206 205 VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void); 207 206 … … 488 487 VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM); 489 488 VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu); 490 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu);491 489 VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu); 492 490 VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu); -
trunk/src/VBox/HostDrivers/Support/Makefile.kmk
r91802 r92408 49 49 endif 50 50 if !defined(VBOX_ONLY_DOCS) 51 if1of ($(VBOX_LDR_FMT), pe lx macho)51 if1of ($(VBOX_LDR_FMT), pe lx) 52 52 LIBRARIES += SUPR0 53 53 endif … … 543 543 endif 544 544 endif 545 546 else ifeq ($(VBOX_LDR_FMT),macho)547 $(call KB_FN_DO_PASS0_ON_TARGET,SUPR0) # Defines SUPR0_0_OUTDIR so we can use it in SUPR0_VBOX_FILES w/o needing $$.548 SUPR0_VBOX_KMK_FILE = $(SUPR0_0_OUTDIR)/files.kmk549 include $(SUPR0_VBOX_KMK_FILE)550 SUPR0_SOURCES = $(SUPR0_VBOX_FILES)551 SUPR0_CLEAN = $(SUPR0_VBOX_FILES) $(SUPR0_0_OUTDIR)/SUPR0.asm $(SUPR0_VBOX_KMK_FILE) $(SUPR0_VBOX_KMK_FILE).ts552 553 # Generate a make include file which lists the wrapper source files.554 # $ (call KB_FN_AUTO_CMD_DEPS,$(SUPR0_VBOX_KMK_FILE).ts)555 $(SUPR0_VBOX_KMK_FILE).ts \556 +| $(SUPR0_VBOX_KMK_FILE): \557 $(PATH_SUB_CURRENT)/SUPDrv.cpp \558 $(PATH_SUB_CURRENT)/SUPR0-asm-files.sed559 # $(call KB_FN_AUTO_CMD_DEPS_COMMANDS)560 $(call MSG_GENERATE,,$(SUPR0_VBOX_KMK_FILE))561 $(QUIET)$(RM) -f -- "$@"562 $(QUIET)$(MKDIR) -p -- "$(@D)"563 $(QUIET)$(SED) --output "$@" -f "$(VBOX_PATH_SUP_SRC)/SUPR0-asm-files.sed" "$(VBOX_PATH_SUP_SRC)/SUPDrv.cpp"564 $(QUIET)$(CP) --changed -fv "$@" $(SUPR0_VBOX_KMK_FILE)565 566 $(SUPR0_0_OUTDIR)/SUPR0.asm +| $(SUPR0_VBOX_FILES): \567 $(PATH_SUB_CURRENT)/SUPDrv.cpp \568 $(PATH_SUB_CURRENT)/SUPR0-asm.sed \569 | $$(dir $$@) $(VBOX_FILESPLIT)570 # $(call KB_FN_AUTO_CMD_DEPS_COMMANDS)571 $(call MSG_GENERATE,,SUPR0.asm and friends)572 $(QUIET)$(RM) -f -- "$@"573 $(QUIET)$(SED) --output "$@" -f "$(VBOX_PATH_SUP_SRC)/SUPR0-asm.sed" "$(VBOX_PATH_SUP_SRC)/SUPDrv.cpp"574 $(VBOX_FILESPLIT) "$@" "$(dir $@)"575 576 545 endif 577 546 -
trunk/src/VBox/VMM/Makefile.kmk
r92351 r92408 78 78 VBoxVMM_DEFS += VBOX_WITH_DBGF_FLOW_TRACING 79 79 endif 80 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK81 VBoxVMM_DEFS += VMM_R0_SWITCH_STACK82 endif83 80 if "$(KBUILD_TYPE)" == "debug" && "$(USERNAME)" == "bird" && 0 84 81 VBoxVMM_DEFS += RTMEM_WRAP_TO_EF_APIS 85 82 endif 86 VBoxVMM_DEFS.darwin = VMM_R0_SWITCH_STACK87 83 88 84 VBoxVMM_INCS = \ … … 466 462 VMMR0_DEFS += VBOX_WITH_DBGF_TRACING 467 463 endif 468 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK469 VMMR0_DEFS += VMM_R0_SWITCH_STACK470 endif471 464 if1of ($(KBUILD_TARGET), darwin linux win) 472 465 VMMR0_DEFS += VMM_R0_TOUCH_FPU 473 466 endif 474 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK475 467 VMMR0_DEFS.win.amd64 = VBOX_WITH_KERNEL_USING_XMM 476 468 … … 572 564 VMMR0_SOURCES.x86 = \ 573 565 VMMR0/VMMR0JmpA-x86.asm 574 VMMR0_SOURCES.darwin.amd64 = \575 VMMR0/VMMR0StackBack-darwin.asm576 566 577 567 VMMR0_LIBS = \ -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r92204 r92408 422 422 423 423 424 #if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)425 /**426 * We must be on kernel stack before disabling preemption, thus this wrapper.427 */428 DECLASM(int) StkBack_pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,429 RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)430 {431 VMMR0EMTBLOCKCTX Ctx;432 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);433 if (rc == VINF_SUCCESS)434 {435 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));436 437 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);438 439 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);440 }441 else442 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);443 return rc;444 }445 DECLASM(int) pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,446 RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos);447 #endif448 449 450 424 /** 451 425 * Common worker for the debug and normal APIs. … … 548 522 if (pVCpu) 549 523 { 550 # ifndef VMM_R0_SWITCH_STACK551 524 VMMR0EMTBLOCKCTX Ctx; 552 525 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx); … … 562 535 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy); 563 536 return rc; 564 # else565 return pdmR0CritSectEnterContendedOnKrnlStk(pVM, pVCpu, pCritSect, hNativeSelf, rcBusy, pSrcPos);566 # endif567 537 } 568 538 … … 812 782 #endif /* IN_RING3 */ 813 783 814 815 #if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)816 /**817 * We must be on kernel stack before disabling preemption, thus this wrapper.818 */819 DECLASM(int) StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,820 int32_t const cLockers, SUPSEMEVENT const hEventToSignal)821 {822 VMMR0EMTBLOCKCTX Ctx;823 bool fLeaveCtx = false;824 if (cLockers < 0)825 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));826 else827 {828 /* Someone is waiting, wake up one of them. */829 Assert(cLockers < _8K);830 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;831 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)832 {833 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);834 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);835 fLeaveCtx = true;836 }837 int rc = SUPSemEventSignal(pVM->pSession, hEvent);838 AssertRC(rc);839 }840 841 /*842 * Signal exit event.843 */844 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))845 { /* likely */ }846 else847 {848 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)849 {850 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);851 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);852 fLeaveCtx = true;853 }854 Log8(("Signalling %#p\n", hEventToSignal));855 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);856 AssertRC(rc);857 }858 859 /*860 * Restore HM context if needed.861 */862 if (!fLeaveCtx)863 { /* contention should be unlikely */ }864 else865 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);866 867 # ifdef DEBUG_bird868 VMMTrashVolatileXMMRegs();869 # endif870 return VINF_SUCCESS;871 }872 DECLASM(int) pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,873 int32_t const cLockers, SUPSEMEVENT const hEventToSignal);874 #endif875 784 876 785 /** … … 1029 938 if (!fQueueIt) 1030 939 { 1031 # ifndef VMM_R0_SWITCH_STACK1032 940 VMMR0EMTBLOCKCTX Ctx; 1033 941 bool fLeaveCtx = false; … … 1075 983 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx); 1076 984 1077 # ifdef DEBUG_bird985 # ifdef DEBUG_bird 1078 986 VMMTrashVolatileXMMRegs(); 1079 # endif987 # endif 1080 988 return VINF_SUCCESS; 1081 # else /* VMM_R0_SWITCH_STACK */1082 return pdmR0CritSectLeaveSignallingOnKrnlStk(pVM, pVCpu, pCritSect, cLockers, hEventToSignal);1083 # endif /* VMM_R0_SWITCH_STACK */1084 989 } 1085 990 -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r92204 r92408 101 101 * Internal Functions * 102 102 *********************************************************************************************************************************/ 103 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)104 103 static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal); 105 #else106 DECLASM(int) pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);107 DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);108 #endif109 104 110 105 … … 556 551 * @param fNoVal No validation records. 557 552 */ 558 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)559 553 static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, 560 554 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal) 561 #else562 DECLASM(int) pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,563 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);564 DECLASM(int) StkBack_pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,565 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)566 #endif567 555 { 568 556 /* … … 899 887 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared. 900 888 */ 901 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)902 889 static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal) 903 #else904 DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)905 #endif906 890 { 907 891 /* … … 1321 1305 * @param fNoVal No validation records. 1322 1306 */ 1323 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)1324 1307 static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, 1325 1308 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal) 1326 #else1327 DECLASM(int) pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,1328 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);1329 DECLASM(int) StkBack_pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,1330 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)1331 #endif1332 1309 { 1333 1310 /* … … 1696 1673 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl. 1697 1674 */ 1698 #if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)1699 1675 static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal) 1700 #else1701 DECLASM(int) pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);1702 DECLASM(int) StkBack_pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)1703 #endif1704 1676 { 1705 1677 /* -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r90598 r92408 269 269 270 270 /** 271 * Checks whether we're in a ring-3 call or not.272 *273 * @returns true / false.274 * @param pVCpu The cross context virtual CPU structure of the calling EMT.275 * @thread EMT276 */277 VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu)278 {279 #ifdef RT_ARCH_X86280 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;281 #else282 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;283 #endif284 }285 286 287 /**288 271 * Returns the build type for matching components. 289 272 * -
trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp
r91821 r92408 140 140 141 141 /** 142 * Move this back to PGMR0PoolGrow when VMM_R0_SWITCH_STACK is gonne.143 */144 #ifndef VMM_R0_SWITCH_STACK145 static int pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool)146 #else147 DECLASM(int) pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool);148 DECLASM(int) StkBack_pgmR0PoolGrowOnKrnlStk(PGVM pGVM, PGVMCPU pGVCpu, PPGMPOOL pPool)149 #endif150 {151 /*152 * Enter the grow critical section and call worker.153 */154 STAM_REL_PROFILE_START(&pPool->StatGrow, a);155 156 VMMR0EMTBLOCKCTX Ctx;157 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx);158 AssertRCReturn(rc, rc);159 160 rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);161 AssertRCReturn(rc, rc);162 163 rc = pgmR0PoolGrowInner(pGVM, pPool);164 165 STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);166 RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);167 168 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);169 return rc;170 }171 172 173 /**174 142 * Grows the shadow page pool. 175 143 * … … 194 162 PGVMCPU const pGVCpu = &pGVM->aCpus[idCpu]; 195 163 196 return pgmR0PoolGrowOnKrnlStk(pGVM, pGVCpu, pPool); 164 /* 165 * Enter the grow critical section and call worker. 166 */ 167 STAM_REL_PROFILE_START(&pPool->StatGrow, a); 168 169 VMMR0EMTBLOCKCTX Ctx; 170 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx); 171 AssertRCReturn(rc, rc); 172 173 rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect); 174 AssertRCReturn(rc, rc); 175 176 rc = pgmR0PoolGrowInner(pGVM, pPool); 177 178 STAM_REL_PROFILE_STOP(&pPool->StatGrow, a); 179 RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect); 180 181 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx); 182 return rc; 197 183 } 198 184 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r92392 r92408 323 323 pGVCpu->vmmr0.s.pPreemptState = NULL; 324 324 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 325 pGVCpu->vmmr0.s.AssertJmpBuf.pMirrorBuf = &pGVCpu->vmm.s.AssertJmpBuf; 326 pGVCpu->vmmr0.s.AssertJmpBuf.pvStackBuf = &pGVCpu->vmm.s.abAssertStack[0]; 327 pGVCpu->vmmr0.s.AssertJmpBuf.cbStackBuf = sizeof(pGVCpu->vmm.s.abAssertStack); 328 325 329 for (size_t iLogger = 0; iLogger < RT_ELEMENTS(pGVCpu->vmmr0.s.u.aLoggers); iLogger++) 326 330 pGVCpu->vmmr0.s.u.aLoggers[iLogger].hEventFlushWait = NIL_RTSEMEVENT; … … 1441 1445 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1442 1446 */ 1443 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm .s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);1447 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmmr0.s.AssertJmpBuf, HMR0RunGuestCode, pGVM, pGVCpu); 1444 1448 1445 1449 /* … … 1570 1574 */ 1571 1575 # ifdef VBOXSTRICTRC_STRICT_ENABLED 1572 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm .s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);1576 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu); 1573 1577 # else 1574 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm .s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);1578 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu); 1575 1579 # endif 1576 1580 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); … … 2357 2361 } 2358 2362 2359 #ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */ 2363 2360 2364 /** 2361 2365 * This is just a longjmp wrapper function for VMMR0EntryEx calls. … … 2374 2378 pGVCpu->vmmr0.s.pSession); 2375 2379 } 2376 #endif2377 2380 2378 2381 … … 2394 2397 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession) 2395 2398 { 2396 #ifndef VMM_R0_SWITCH_STACK /* Not safe unless we disable preemption first. */2397 2399 /* 2398 2400 * Requests that should only happen on the EMT thread will be 2399 * wrapped in a setjmp so we can assert without causing t rouble.2401 * wrapped in a setjmp so we can assert without causing too much trouble. 2400 2402 */ 2401 2403 if ( pVM != NULL … … 2404 2406 && idCpu < pGVM->cCpus 2405 2407 && pGVM->pSession == pSession 2406 && pGVM->pSelf == pVM) 2407 { 2408 switch (enmOperation) 2409 { 2410 /* These might/will be called before VMMR3Init. */ 2411 case VMMR0_DO_GMM_INITIAL_RESERVATION: 2412 case VMMR0_DO_GMM_UPDATE_RESERVATION: 2413 case VMMR0_DO_GMM_ALLOCATE_PAGES: 2414 case VMMR0_DO_GMM_FREE_PAGES: 2415 case VMMR0_DO_GMM_BALLOONED_PAGES: 2416 /* On the mac we might not have a valid jmp buf, so check these as well. */ 2417 case VMMR0_DO_VMMR0_INIT: 2418 case VMMR0_DO_VMMR0_TERM: 2419 2420 case VMMR0_DO_PDM_DEVICE_CREATE: 2421 case VMMR0_DO_PDM_DEVICE_GEN_CALL: 2422 case VMMR0_DO_IOM_GROW_IO_PORTS: 2423 case VMMR0_DO_IOM_GROW_IO_PORT_STATS: 2424 case VMMR0_DO_DBGF_BP_INIT: 2425 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC: 2426 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC: 2427 { 2428 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2429 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf(); 2430 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread 2431 && pGVCpu->hNativeThreadR0 == hNativeThread)) 2432 { 2433 if (!pGVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack) 2434 break; 2435 2436 pGVCpu->vmmr0.s.pGVM = pGVM; 2437 pGVCpu->vmmr0.s.idCpu = idCpu; 2438 pGVCpu->vmmr0.s.enmOperation = enmOperation; 2439 pGVCpu->vmmr0.s.pReq = pReq; 2440 pGVCpu->vmmr0.s.u64Arg = u64Arg; 2441 pGVCpu->vmmr0.s.pSession = pSession; 2442 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, pGVCpu, 2443 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation); 2444 } 2445 return VERR_VM_THREAD_NOT_EMT; 2446 } 2447 2448 default: 2449 case VMMR0_DO_PGM_POOL_GROW: 2450 break; 2451 } 2452 } 2453 #else 2454 RT_NOREF(pVM); 2455 #endif 2408 && pGVM->pSelf == pGVM 2409 && enmOperation != VMMR0_DO_GVMM_DESTROY_VM 2410 && enmOperation != VMMR0_DO_GVMM_SCHED_WAKE_UP /* idCpu is not caller but target. Sigh. */ /** @todo fix*/ 2411 ) 2412 { 2413 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2414 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf(); 2415 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread 2416 && pGVCpu->hNativeThreadR0 == hNativeThread)) 2417 { 2418 pGVCpu->vmmr0.s.pGVM = pGVM; 2419 pGVCpu->vmmr0.s.idCpu = idCpu; 2420 pGVCpu->vmmr0.s.enmOperation = enmOperation; 2421 pGVCpu->vmmr0.s.pReq = pReq; 2422 pGVCpu->vmmr0.s.u64Arg = u64Arg; 2423 pGVCpu->vmmr0.s.pSession = pSession; 2424 return vmmR0CallRing3SetJmpEx(&pGVCpu->vmmr0.s.AssertJmpBuf, vmmR0EntryExWrapper, pGVCpu, 2425 ((uintptr_t)u64Arg << 16) | (uintptr_t)enmOperation); 2426 } 2427 return VERR_VM_THREAD_NOT_EMT; 2428 } 2456 2429 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession); 2457 2430 } … … 2473 2446 { 2474 2447 #ifdef RT_ARCH_X86 2475 return pVCpu->vmm.s.CallRing3JmpBufR0.eip 2476 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 2448 return pVCpu->vmmr0.s.AssertJmpBuf.eip != 0; 2477 2449 #else 2478 return pVCpu->vmm.s.CallRing3JmpBufR0.rip 2479 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 2480 #endif 2481 } 2482 2483 2484 /** 2485 * Checks whether we've done a ring-3 long jump. 2486 * 2487 * @returns @c true / @c false 2488 * @param pVCpu The cross context virtual CPU structure. 2489 * @thread EMT 2490 */ 2491 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu) 2492 { 2493 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 2450 return pVCpu->vmmr0.s.AssertJmpBuf.rip != 0; 2451 #endif 2494 2452 } 2495 2453 … … 3005 2963 * Inner worker for vmmR0LoggerFlushCommon. 3006 2964 */ 3007 #ifndef VMM_R0_SWITCH_STACK3008 2965 static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush) 3009 #else3010 DECLASM(bool) vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush);3011 DECLASM(bool) StkBack_vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush)3012 #endif3013 2966 { 3014 2967 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger]; … … 3545 3498 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER); 3546 3499 3547 if (!pVCpu->vmm .s.pfnRing0AssertCallback)3548 { 3549 pVCpu->vmm .s.pfnRing0AssertCallback = pfnCallback;3550 pVCpu->vmm .s.pvRing0AssertCallbackUser = pvUser;3500 if (!pVCpu->vmmr0.s.pfnAssertCallback) 3501 { 3502 pVCpu->vmmr0.s.pfnAssertCallback = pfnCallback; 3503 pVCpu->vmmr0.s.pvAssertCallbackUser = pvUser; 3551 3504 return VINF_SUCCESS; 3552 3505 } … … 3562 3515 VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu) 3563 3516 { 3564 pVCpu->vmm .s.pfnRing0AssertCallback = NULL;3565 pVCpu->vmm .s.pvRing0AssertCallbackUser = NULL;3517 pVCpu->vmmr0.s.pfnAssertCallback = NULL; 3518 pVCpu->vmmr0.s.pvAssertCallbackUser = NULL; 3566 3519 } 3567 3520 … … 3575 3528 VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu) 3576 3529 { 3577 return pVCpu->vmm .s.pfnRing0AssertCallback != NULL;3530 return pVCpu->vmmr0.s.pfnAssertCallback != NULL; 3578 3531 } 3579 3532 … … 3597 3550 { 3598 3551 # ifdef RT_ARCH_X86 3599 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip 3600 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 3552 if (pVCpu->vmmr0.s.AssertJmpBuf.eip) 3601 3553 # else 3602 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip 3603 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 3554 if (pVCpu->vmmr0.s.AssertJmpBuf.rip) 3604 3555 # endif 3605 3556 { 3606 if (pVCpu->vmm .s.pfnRing0AssertCallback)3607 pVCpu->vmm .s.pfnRing0AssertCallback(pVCpu, pVCpu->vmm.s.pvRing0AssertCallbackUser);3608 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm .s.CallRing3JmpBufR0, VERR_VMM_RING0_ASSERTION);3557 if (pVCpu->vmmr0.s.pfnAssertCallback) 3558 pVCpu->vmmr0.s.pfnAssertCallback(pVCpu, pVCpu->vmmr0.s.pvAssertCallbackUser); 3559 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmmr0.s.AssertJmpBuf, VERR_VMM_RING0_ASSERTION); 3609 3560 return RT_FAILURE_NP(rc); 3610 3561 } -
trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm
r91806 r92408 16 16 ; 17 17 18 ;******************************************************************************* 19 ;* Header Files*20 ;******************************************************************************* 18 ;********************************************************************************************************************************* 19 ;* Header Files * 20 ;********************************************************************************************************************************* 21 21 %define RT_ASM_WITH_SEH64_ALT 22 22 %include "VBox/asmdefs.mac" … … 24 24 %include "VBox/err.mac" 25 25 %include "VBox/param.mac" 26 %ifdef VMM_R0_SWITCH_STACK27 %include "VBox/SUPR0StackWrapper.mac"28 %endif29 30 31 ;*******************************************************************************32 ;* Defined Constants And Macros *33 ;*******************************************************************************34 %define RESUME_MAGIC 07eadf00dh35 %define STACK_PADDING 0eeeeeeeeeeeeeeeeh36 37 ;; Workaround for linux 4.6 fast/slow syscall stack depth difference.38 ;; Update: This got worse with linux 5.13 and CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.39 ;; The x86 arch_exit_to_user_mode_prepare code limits the offset to 255,40 ;; while the generic limit is 1023. See bugref:10064 for details.41 %ifdef VMM_R0_SWITCH_STACK42 %define STACK_FUZZ_SIZE 043 %else44 %ifdef RT_OS_LINUX45 %define STACK_FUZZ_SIZE 38446 %else47 %define STACK_FUZZ_SIZE 12848 %endif49 %endif50 26 51 27 52 28 BEGINCODE 53 54 29 55 30 ;; … … 77 52 SEH64_SET_FRAME_xBP 0 78 53 %ifdef ASM_CALL64_MSC 79 sub rsp, 30h + STACK_FUZZ_SIZE; (10h is used by resume (??), 20h for callee spill area)80 SEH64_ALLOCATE_STACK 30h + STACK_FUZZ_SIZE54 sub rsp, 30h ; (10h is used by resume (??), 20h for callee spill area) 55 SEH64_ALLOCATE_STACK 30h 81 56 SEH64_END_PROLOGUE 82 57 mov r11, rdx ; pfn 83 58 mov rdx, rcx ; pJmpBuf; 84 59 %else 85 sub rsp, 10h + STACK_FUZZ_SIZE; (10h is used by resume (??))86 SEH64_ALLOCATE_STACK 10h + STACK_FUZZ_SIZE60 sub rsp, 10h ; (10h is used by resume (??)) 61 SEH64_ALLOCATE_STACK 10h 87 62 SEH64_END_PROLOGUE 88 63 mov r8, rdx ; pvUser1 (save it like MSC) … … 126 101 127 102 ; 128 ; If we're not in a ring-3 call, call pfn and return. 129 ; 130 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1 131 jnz .resume 132 133 .different_call_continue: 103 ; Save the call then make it. 104 ; 134 105 mov [xDX + VMMR0JMPBUF.pfn], r11 135 106 mov [xDX + VMMR0JMPBUF.pvUser1], r8 136 107 mov [xDX + VMMR0JMPBUF.pvUser2], r9 137 138 %ifdef VMM_R0_SWITCH_STACK139 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]140 test r15, r15141 jz .entry_error142 %ifdef VBOX_STRICT143 cmp dword [r15], 0h144 jne .entry_error145 mov rdi, r15146 mov rcx, VMM_STACK_SIZE / 8147 mov rax, qword 0eeeeeeeffeeeeeeeh148 repne stosq149 mov [rdi - 10h], rbx150 %endif151 152 ; New RSP153 %ifdef WITHOUT_SUPR0STACKINFO154 lea r15, [r15 + VMM_STACK_SIZE]155 %else156 lea r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]157 158 ; Plant SUPR0 stack info.159 mov [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp160 mov [r15 + SUPR0STACKINFO.pSelf], r15161 mov dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0162 mov dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1163 mov dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2164 mov dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3165 166 %endif167 168 ; Switch stack!169 %ifdef ASM_CALL64_MSC170 lea rsp, [r15 - 20h]171 %else172 mov rsp, r15173 %endif174 %endif ; VMM_R0_SWITCH_STACK175 108 176 109 mov r12, rdx ; Save pJmpBuf. … … 184 117 call r11 185 118 mov rdx, r12 ; Restore pJmpBuf 186 187 %ifdef VMM_R0_SWITCH_STACK188 ; Reset the debug mark and the stack info header.189 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]190 %ifndef WITHOUT_SUPR0STACKINFO191 mov qword [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size + SUPR0STACKINFO.magic0], 0h192 %endif193 %ifdef VBOX_STRICT194 mov dword [r15], 0h ; Reset the marker195 %endif196 %endif197 119 198 120 ; … … 227 149 popf 228 150 leave 229 ret230 231 .entry_error:232 mov eax, VERR_VMM_SET_JMP_ERROR233 jmp .proper_return234 235 .stack_overflow:236 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW237 jmp .proper_return238 239 ;240 ; Aborting resume.241 ; Note! No need to restore XMM registers here since we haven't touched them yet.242 ;243 .bad:244 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.245 mov rbx, [xDX + VMMR0JMPBUF.rbx]246 %ifdef ASM_CALL64_MSC247 mov rsi, [xDX + VMMR0JMPBUF.rsi]248 mov rdi, [xDX + VMMR0JMPBUF.rdi]249 %endif250 mov r12, [xDX + VMMR0JMPBUF.r12]251 mov r13, [xDX + VMMR0JMPBUF.r13]252 mov r14, [xDX + VMMR0JMPBUF.r14]253 mov r15, [xDX + VMMR0JMPBUF.r15]254 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME255 leave256 ret257 258 ;259 ; Not the same call as went to ring-3.260 ;261 .different_call:262 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0263 ;; @todo or should we fail here instead?264 jmp .different_call_continue265 266 ;267 ; Resume VMMRZCallRing3 the call.268 ;269 .resume:270 ; Check if it's actually the same call, if not just continue with it271 ; as a regular call (ring-0 assert, then VM destroy).272 cmp [xDX + VMMR0JMPBUF.pfn], r11273 jne .different_call274 cmp [xDX + VMMR0JMPBUF.pvUser1], r8275 jne .different_call276 cmp [xDX + VMMR0JMPBUF.pvUser2], r9277 jne .different_call278 279 %ifndef VMM_R0_SWITCH_STACK280 ; Sanity checks incoming stack, applying fuzz if needed.281 sub r10, [xDX + VMMR0JMPBUF.SpCheck]282 jz .resume_stack_checked_out283 add r10, STACK_FUZZ_SIZE ; plus/minus STACK_FUZZ_SIZE is fine.284 cmp r10, STACK_FUZZ_SIZE * 2285 ja .bad286 287 mov r10, [xDX + VMMR0JMPBUF.SpCheck]288 mov [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).289 290 .resume_stack_checked_out:291 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]292 cmp rcx, VMM_STACK_SIZE293 ja .bad294 test rcx, 7295 jnz .bad296 mov rdi, [xDX + VMMR0JMPBUF.SpCheck]297 sub rdi, [xDX + VMMR0JMPBUF.SpResume]298 cmp rcx, rdi299 jne .bad300 %endif301 302 %ifdef VMM_R0_SWITCH_STACK303 ; Update the signature in case the kernel stack moved.304 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]305 test r15, r15306 jz .entry_error307 %ifndef WITHOUT_SUPR0STACKINFO308 lea r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]309 310 mov [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp311 mov [r15 + SUPR0STACKINFO.pSelf], r15312 mov dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0313 mov dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1314 mov dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2315 mov dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3316 %endif317 318 ; Switch stack.319 mov rsp, [xDX + VMMR0JMPBUF.SpResume]320 %else321 ; Restore the stack.322 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]323 shr ecx, 3324 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]325 mov rdi, [xDX + VMMR0JMPBUF.SpResume]326 mov rsp, rdi327 rep movsq328 %endif ; !VMM_R0_SWITCH_STACK329 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0330 331 ;332 ; Continue where we left off.333 ;334 %ifdef VBOX_STRICT335 pop rax ; magic336 cmp rax, RESUME_MAGIC337 je .magic_ok338 mov ecx, 0123h339 mov [ecx], edx340 .magic_ok:341 %endif342 %ifdef RT_OS_WINDOWS343 movdqa xmm6, [rsp + 000h]344 movdqa xmm7, [rsp + 010h]345 movdqa xmm8, [rsp + 020h]346 movdqa xmm9, [rsp + 030h]347 movdqa xmm10, [rsp + 040h]348 movdqa xmm11, [rsp + 050h]349 movdqa xmm12, [rsp + 060h]350 movdqa xmm13, [rsp + 070h]351 movdqa xmm14, [rsp + 080h]352 movdqa xmm15, [rsp + 090h]353 add rsp, 0a0h354 %endif355 popf356 pop rbx357 %ifdef ASM_CALL64_MSC358 pop rsi359 pop rdi360 %endif361 pop r12362 pop r13363 pop r14364 pop r15365 pop rbp366 xor eax, eax ; VINF_SUCCESS367 151 ret 368 152 ENDPROC vmmR0CallRing3SetJmp … … 416 200 movdqa [rsp + 090h], xmm15 417 201 %endif 418 %ifdef VBOX_STRICT419 push RESUME_MAGIC420 SEH64_ALLOCATE_STACK 8421 %endif422 202 SEH64_END_PROLOGUE 423 203 … … 440 220 441 221 ; 442 ; Sanity checks. 443 ; 444 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack] 445 test rdi, rdi ; darwin may set this to 0. 446 jz .nok 447 mov [xDX + VMMR0JMPBUF.SpResume], rsp 448 %ifndef VMM_R0_SWITCH_STACK 222 ; Also check that the stack is in the vicinity of the RSP we entered 223 ; on so the stack mirroring below doesn't go wild. 224 ; 449 225 mov rsi, rsp 450 226 mov rcx, [xDX + VMMR0JMPBUF.rsp] 451 227 sub rcx, rsi 452 453 ; two sanity checks on the size. 454 cmp rcx, VMM_STACK_SIZE ; check max size. 228 cmp rcx, _64K 455 229 jnbe .nok 456 230 457 231 ; 458 ; Copy the stack459 ;460 test ecx, 7 ; check alignment461 jnz .nok462 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx463 shr ecx, 3464 rep movsq465 466 %endif ; !VMM_R0_SWITCH_STACK467 468 232 ; Save a PC and return PC here to assist unwinding. 233 ; 469 234 .unwind_point: 470 235 lea rcx, [.unwind_point wrt RIP] 471 mov [xDX + VMMR0JMPBUF. SavedEipForUnwind], rcx236 mov [xDX + VMMR0JMPBUF.UnwindPc], rcx 472 237 mov rcx, [xDX + VMMR0JMPBUF.rbp] 473 238 lea rcx, [rcx + 8] … … 477 242 478 243 ; Save RSP & RBP to enable stack dumps 244 mov [xDX + VMMR0JMPBUF.UnwindSp], rsp 479 245 mov rcx, rbp 480 mov [xDX + VMMR0JMPBUF. SavedEbp], rcx246 mov [xDX + VMMR0JMPBUF.UnwindBp], rcx 481 247 sub rcx, 8 482 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx 483 484 ; store the last pieces of info. 248 mov [xDX + VMMR0JMPBUF.UnwindRetSp], rcx 249 250 ; 251 ; Make sure the direction flag is clear before we do any rep movsb below. 252 ; 253 cld 254 255 ; 256 ; Mirror the stack. 257 ; 258 xor ebx, ebx 259 260 mov rdi, [xDX + VMMR0JMPBUF.pvStackBuf] 261 or rdi, rdi 262 jz .skip_stack_mirroring 263 264 mov ebx, [xDX + VMMR0JMPBUF.cbStackBuf] 265 or ebx, ebx 266 jz .skip_stack_mirroring 267 485 268 mov rcx, [xDX + VMMR0JMPBUF.rsp] 486 mov [xDX + VMMR0JMPBUF.SpCheck], rcx 487 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1 269 sub rcx, rsp 270 and rcx, ~0fffh ; copy up to the page boundrary 271 272 cmp rcx, rbx ; rbx = rcx = RT_MIN(rbx, rcx); 273 jbe .do_stack_buffer_big_enough 274 mov ecx, ebx ; too much to copy, limit to ebx 275 jmp .do_stack_copying 276 .do_stack_buffer_big_enough: 277 mov ebx, ecx ; ecx is smaller, update ebx for cbStackValid 278 279 .do_stack_copying: 280 mov rsi, rsp 281 rep movsb 282 283 .skip_stack_mirroring: 284 mov [xDX + VMMR0JMPBUF.cbStackValid], ebx 285 286 ; 287 ; Do buffer mirroring. 288 ; 289 mov rdi, [xDX + VMMR0JMPBUF.pMirrorBuf] 290 or rdi, rdi 291 jz .skip_buffer_mirroring 292 mov rsi, rdx 293 mov ecx, VMMR0JMPBUF_size 294 rep movsb 295 .skip_buffer_mirroring: 488 296 489 297 ; … … 522 330 ; 523 331 .nok: 524 %ifdef VBOX_STRICT525 pop rax ; magic526 cmp rax, RESUME_MAGIC527 je .magic_ok528 mov ecx, 0123h529 mov [rcx], edx530 .magic_ok:531 %endif532 332 mov eax, VERR_VMM_LONG_JMP_ERROR 533 333 %ifdef RT_OS_WINDOWS -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r92392 r92408 165 165 * Internal Functions * 166 166 *********************************************************************************************************************************/ 167 static int vmmR3InitStacks(PVM pVM);168 167 static void vmmR3InitRegisterStats(PVM pVM); 169 168 static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM); … … 280 279 return rc; 281 280 282 /* 283 * Init various sub-components. 284 */ 285 rc = vmmR3InitStacks(pVM); 281 #ifdef VBOX_WITH_NMI 282 /* 283 * Allocate mapping for the host APIC. 284 */ 285 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); 286 AssertRC(rc); 287 #endif 286 288 if (RT_SUCCESS(rc)) 287 289 { 288 #ifdef VBOX_WITH_NMI289 290 /* 290 * Allocate mapping for the host APIC.291 * Start the log flusher thread. 291 292 */ 292 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); 293 AssertRC(rc); 294 #endif 293 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 294 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); 295 295 if (RT_SUCCESS(rc)) 296 296 { 297 297 298 /* 298 * Start the log flusher thread.299 * Debug info and statistics. 299 300 */ 300 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 301 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); 302 if (RT_SUCCESS(rc)) 303 { 304 305 /* 306 * Debug info and statistics. 307 */ 308 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 309 vmmR3InitRegisterStats(pVM); 310 vmmInitFormatTypes(); 311 312 return VINF_SUCCESS; 313 } 301 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 302 vmmR3InitRegisterStats(pVM); 303 vmmInitFormatTypes(); 304 305 return VINF_SUCCESS; 314 306 } 315 307 } 316 308 /** @todo Need failure cleanup? */ 317 318 return rc;319 }320 321 322 /**323 * Allocate & setup the VMM RC stack(s) (for EMTs).324 *325 * The stacks are also used for long jumps in Ring-0.326 *327 * @returns VBox status code.328 * @param pVM The cross context VM structure.329 *330 * @remarks The optional guard page gets it protection setup up during R3 init331 * completion because of init order issues.332 */333 static int vmmR3InitStacks(PVM pVM)334 {335 int rc = VINF_SUCCESS;336 #ifdef VMM_R0_SWITCH_STACK337 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;338 #else339 uint32_t fFlags = 0;340 #endif341 342 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)343 {344 PVMCPU pVCpu = pVM->apCpusR3[idCpu];345 346 #ifdef VBOX_STRICT_VMM_STACK347 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,348 #else349 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,350 #endif351 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);352 if (RT_SUCCESS(rc))353 {354 #ifdef VBOX_STRICT_VMM_STACK355 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;356 #endif357 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);358 359 }360 }361 309 362 310 return rc; … … 433 381 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up."); 434 382 435 #ifdef VBOX_WITH_STATISTICS436 for (VMCPUID i = 0; i < pVM->cCpus; i++)437 {438 PVMCPU pVCpu = pVM->apCpusR3[i];439 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);440 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);441 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);442 }443 #endif444 383 for (VMCPUID i = 0; i < pVM->cCpus; i++) 445 384 { … … 2265 2204 AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER); 2266 2205 2267 int rc; 2268 #ifdef VMM_R0_SWITCH_STACK 2269 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3); 2270 #else 2271 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr); 2272 #endif 2273 if ( off < VMM_STACK_SIZE 2274 && off + cbRead <= VMM_STACK_SIZE) 2275 { 2276 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead); 2277 rc = VINF_SUCCESS; 2206 /* 2207 * Hopefully we've got all the requested bits. If not supply what we 2208 * can and zero the remaining stuff. 2209 */ 2210 RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp; 2211 if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid) 2212 { 2213 size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off; 2214 if (cbRead <= cbValid) 2215 { 2216 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead); 2217 return VINF_SUCCESS; 2218 } 2219 2220 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid); 2221 RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid); 2278 2222 } 2279 2223 else 2280 rc = VERR_INVALID_POINTER; 2281 2282 /* Supply the setjmp return RIP/EIP. */ 2283 if ( pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr 2284 && pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation < R0Addr + cbRead) 2285 { 2286 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue; 2287 size_t cbSrc = sizeof(pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue); 2224 RT_BZERO(pvBuf, cbRead); 2225 2226 /* 2227 * Supply the setjmp return RIP/EIP if requested. 2228 */ 2229 if ( pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr 2230 && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead) 2231 { 2232 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue; 2233 size_t cbSrc = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue); 2288 2234 size_t offDst = 0; 2289 if (R0Addr < pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation)2290 offDst = pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation - R0Addr;2291 else if (R0Addr > pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation)2292 { 2293 size_t offSrc = R0Addr - pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation;2235 if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation) 2236 offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr; 2237 else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation) 2238 { 2239 size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation; 2294 2240 Assert(offSrc < cbSrc); 2295 2241 pbSrc -= offSrc; … … 2300 2246 memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc); 2301 2247 2302 if (cbSrc == cbRead)2303 rc = VINF_SUCCESS;2304 } 2305 2306 return rc;2248 //if (cbSrc == cbRead) 2249 // rc = VINF_SUCCESS; 2250 } 2251 2252 return VINF_SUCCESS; 2307 2253 } 2308 2254 … … 2321 2267 2322 2268 /* 2269 * This is all we really need here if we had proper unwind info (win64 only)... 2270 */ 2271 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp; 2272 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp; 2273 pState->uPc = pVCpu->vmm.s.AssertJmpBuf.UnwindPc; 2274 2275 /* 2323 2276 * Locate the resume point on the stack. 2324 2277 */ 2325 #ifdef VMM_R0_SWITCH_STACK2326 uintptr_t off = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume - MMHyperCCToR0(pVCpu->pVMR3, pVCpu->vmm.s.pbEMTStackR3);2327 AssertReturnVoid(off < VMM_STACK_SIZE);2328 #else2329 2278 uintptr_t off = 0; 2330 #endif2331 2279 2332 2280 #ifdef RT_ARCH_AMD64 2333 2281 /* 2334 * This code must match the .resume stuff in VMMR0JmpA-amd64.asm exactly. 2335 */ 2336 # ifdef VBOX_STRICT 2337 Assert(*(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d)); 2338 off += 8; /* RESUME_MAGIC */ 2339 # endif 2282 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly. 2283 */ 2340 2284 # ifdef RT_OS_WINDOWS 2341 2285 off += 0xa0; /* XMM6 thru XMM15 */ 2342 2286 # endif 2343 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2287 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2344 2288 off += 8; 2345 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2289 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2346 2290 off += 8; 2347 2291 # ifdef RT_OS_WINDOWS 2348 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2292 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2349 2293 off += 8; 2350 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2294 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2351 2295 off += 8; 2352 2296 # endif 2353 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2297 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2354 2298 off += 8; 2355 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2299 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2356 2300 off += 8; 2357 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2301 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2358 2302 off += 8; 2359 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2303 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2360 2304 off += 8; 2361 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2305 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2362 2306 off += 8; 2363 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2364 off += 8;2307 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2308 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp; 2365 2309 2366 2310 #elif defined(RT_ARCH_X86) 2367 2311 /* 2368 * This code must match the .resume stuff in VMMR0JmpA-x86.asm exactly. 2369 */ 2370 # ifdef VBOX_STRICT 2371 Assert(*(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d)); 2372 off += 4; /* RESUME_MAGIC */ 2373 # endif 2374 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off]; 2312 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly. 2313 */ 2314 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2375 2315 off += 4; 2376 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2316 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2377 2317 off += 4; 2378 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2318 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2379 2319 off += 4; 2380 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2320 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2381 2321 off += 4; 2382 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2322 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2383 2323 off += 4; 2384 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2385 off += 4;2324 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2325 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp; 2386 2326 #else 2387 2327 # error "Port me" 2388 2328 #endif 2389 2390 /*2391 * This is all we really need here, though the above helps if the assembly2392 * doesn't contain unwind info (currently only on win/64, so that is useful).2393 */2394 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp;2395 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume;2396 2329 } 2397 2330 … … 2464 2397 static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu) 2465 2398 { 2466 /* 2467 * Signal a ring 0 hypervisor assertion. 2468 * Cancel the longjmp operation that's in progress. 2469 */ 2470 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false; 2471 #ifdef RT_ARCH_X86 2472 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0; 2473 #else 2474 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0; 2475 #endif 2476 #ifdef VMM_R0_SWITCH_STACK 2477 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */ 2478 #endif 2399 RT_NOREF(pVCpu); 2479 2400 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1)); 2480 2401 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2)); -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r90829 r92408 362 362 case VINF_EM_TRIPLE_FAULT: 363 363 case VERR_VMM_HYPER_CR3_MISMATCH: 364 case VERR_VMM_SET_JMP_ERROR:365 case VERR_VMM_SET_JMP_ABORTED_RESUME:366 case VERR_VMM_SET_JMP_STACK_OVERFLOW:367 364 case VERR_VMM_LONG_JMP_ERROR: 368 365 { … … 398 395 * Dump the relevant hypervisor registers and stack. 399 396 */ 400 if ( rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */ 401 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 397 if (rcErr == VERR_VMM_RING0_ASSERTION) 402 398 { 403 399 /* Dump the jmpbuf. */ 404 400 pHlp->pfnPrintf(pHlp, 405 401 "!!\n" 406 "!! CallRing3JmpBuf:\n"402 "!! AssertJmpBuf:\n" 407 403 "!!\n"); 408 404 pHlp->pfnPrintf(pHlp, 409 "SavedEsp=%RHv SavedEbp=%RHv SpResume=%RHv SpCheck=%RHv\n", 410 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp, 411 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp, 412 pVCpu->vmm.s.CallRing3JmpBufR0.SpResume, 413 pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck); 414 pHlp->pfnPrintf(pHlp, 415 "pvSavedStack=%RHv cbSavedStack=%#x fInRing3Call=%RTbool\n", 416 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack, 417 pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack, 418 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call); 419 pHlp->pfnPrintf(pHlp, 420 "cbUsedMax=%#x cbUsedAvg=%#x cbUsedTotal=%#llx cUsedTotal=%#llx\n", 421 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax, 422 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg, 423 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedTotal, 424 pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal); 405 "UnwindSp=%RHv UnwindRetSp=%RHv UnwindBp=%RHv UnwindPc=%RHv\n", 406 pVCpu->vmm.s.AssertJmpBuf.UnwindSp, 407 pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, 408 pVCpu->vmm.s.AssertJmpBuf.UnwindBp, 409 pVCpu->vmm.s.AssertJmpBuf.UnwindPc); 410 pHlp->pfnPrintf(pHlp, 411 "UnwindRetPcValue=%RHv UnwindRetPcLocation=%RHv\n", 412 pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue, 413 pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation); 425 414 pHlp->pfnPrintf(pHlp, 426 415 "pfn=%RHv pvUser1=%RHv pvUser2=%RHv\n", 427 pVCpu->vmm.s. CallRing3JmpBufR0.pfn,428 pVCpu->vmm.s. CallRing3JmpBufR0.pvUser1,429 pVCpu->vmm.s. CallRing3JmpBufR0.pvUser2);416 pVCpu->vmm.s.AssertJmpBuf.pfn, 417 pVCpu->vmm.s.AssertJmpBuf.pvUser1, 418 pVCpu->vmm.s.AssertJmpBuf.pvUser2); 430 419 431 420 /* Dump the resume register frame on the stack. */ 432 PRTHCUINTPTR pBP; 433 #ifdef VMM_R0_SWITCH_STACK 434 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[ pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp 435 - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)]; 436 #else 437 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[ pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack 438 - pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck 439 + pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp]; 440 #endif 421 PRTHCUINTPTR const pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.abAssertStack[ pVCpu->vmm.s.AssertJmpBuf.UnwindBp 422 - pVCpu->vmm.s.AssertJmpBuf.UnwindSp]; 441 423 #if HC_ARCH_BITS == 32 442 424 pHlp->pfnPrintf(pHlp, … … 445 427 , 446 428 pBP[-3], pBP[-2], pBP[-1], 447 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 8, pBP[0], pBP[-4]);429 pBP[1], pVCpu->vmm.s.AssertJmpBuf.SavedEbp - 8, pBP[0], pBP[-4]); 448 430 #else 449 431 # ifdef RT_OS_WINDOWS … … 459 441 pBP[-4], pBP[-3], 460 442 pBP[-2], pBP[-1], 461 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-8]);443 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-8]); 462 444 # else 463 445 pHlp->pfnPrintf(pHlp, … … 471 453 pBP[-4], pBP[-3], 472 454 pBP[-2], pBP[-1], 473 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-6]);455 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-6]); 474 456 # endif 475 457 #endif … … 479 461 PCDBGFSTACKFRAME pFirstFrame; 480 462 rc2 = DBGFR3StackWalkBeginEx(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_RING0, 481 DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp),482 DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s. CallRing3JmpBufR0.SpResume),483 DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s. CallRing3JmpBufR0.SavedEipForUnwind),463 DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s.AssertJmpBuf.UnwindBp), 464 DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s.AssertJmpBuf.UnwindSp), 465 DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s.AssertJmpBuf.UnwindPc), 484 466 RTDBGRETURNTYPE_INVALID, &pFirstFrame); 485 467 if (RT_SUCCESS(rc2)) … … 548 530 549 531 /* Symbols on the stack. */ 550 #ifdef VMM_R0_SWITCH_STACK 551 uint32_t const iLast = VMM_STACK_SIZE / sizeof(uintptr_t); 552 uint32_t iAddr = (uint32_t)( pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp 553 - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)) / sizeof(uintptr_t); 554 if (iAddr > iLast) 555 iAddr = 0; 556 #else 557 uint32_t const iLast = RT_MIN(pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack, VMM_STACK_SIZE) 558 / sizeof(uintptr_t); 559 uint32_t iAddr = 0; 560 #endif 532 uint32_t const cbRawStack = RT_MIN(pVCpu->vmm.s.AssertJmpBuf.cbStackValid, sizeof(pVCpu->vmm.s.abAssertStack)); 533 uintptr_t const * const pauAddr = (uintptr_t const *)&pVCpu->vmm.s.abAssertStack[0]; 534 uint32_t const iEnd = cbRawStack / sizeof(uintptr_t); 535 uint32_t iAddr = 0; 561 536 pHlp->pfnPrintf(pHlp, 562 537 "!!\n" 563 "!! Addresses on the stack (iAddr=%#x, i Last=%#x)\n"538 "!! Addresses on the stack (iAddr=%#x, iEnd=%#x)\n" 564 539 "!!\n", 565 iAddr, iLast); 566 uintptr_t const *paAddr = (uintptr_t const *)pVCpu->vmm.s.pbEMTStackR3; 567 while (iAddr < iLast) 540 iAddr, iEnd); 541 while (iAddr < iEnd) 568 542 { 569 uintptr_t const uAddr = pa Addr[iAddr];543 uintptr_t const uAddr = pauAddr[iAddr]; 570 544 if (uAddr > X86_PAGE_SIZE) 571 545 { 572 546 DBGFADDRESS Addr; 573 547 DBGFR3AddrFromFlat(pVM->pUVM, &Addr, uAddr); 574 RTGCINTPTR offDisp = 0; 575 PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, 576 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, 577 &offDisp, NULL); 578 RTGCINTPTR offLineDisp; 579 PRTDBGLINE pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL); 548 RTGCINTPTR offDisp = 0; 549 RTGCINTPTR offLineDisp = 0; 550 PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, 551 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL 552 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, 553 &offDisp, NULL); 554 PRTDBGLINE pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL); 580 555 if (pLine || pSym) 581 556 { … … 599 574 "!!\n" 600 575 "!! Raw stack (mind the direction).\n" 601 "!! pbEMTStackR0=%RHv pbEMTStackBottomR0=%RHv VMM_STACK_SIZE=%#x\n"576 "!! pbEMTStackR0=%RHv cbRawStack=%#x\n" 602 577 "!! pbEmtStackR3=%p\n" 603 578 "!!\n" 604 579 "%.*Rhxd\n", 605 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3), 606 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3) + VMM_STACK_SIZE, 607 VMM_STACK_SIZE, 608 pVCpu->vmm.s.pbEMTStackR3, 609 VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3); 580 pVCpu->vmm.s.AssertJmpBuf.UnwindSp, cbRawStack, 581 &pVCpu->vmm.s.abAssertStack[0], 582 cbRawStack, &pVCpu->vmm.s.abAssertStack[0]); 610 583 } 611 584 else -
trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp
r92395 r92408 44 44 #endif 45 45 46 Assert(pVCpu->vmm .s.cCallRing3Disabled < 16);47 if (ASMAtomicUoIncU32(&pVCpu->vmm .s.cCallRing3Disabled) == 1)46 Assert(pVCpu->vmmr0.s.cCallRing3Disabled < 16); 47 if (ASMAtomicUoIncU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 1) 48 48 { 49 49 #ifdef IN_RC … … 73 73 #endif 74 74 75 Assert(pVCpu->vmm .s.cCallRing3Disabled > 0);76 if (ASMAtomicUoDecU32(&pVCpu->vmm .s.cCallRing3Disabled) == 0)75 Assert(pVCpu->vmmr0.s.cCallRing3Disabled > 0); 76 if (ASMAtomicUoDecU32(&pVCpu->vmmr0.s.cCallRing3Disabled) == 0) 77 77 { 78 78 #ifdef IN_RC … … 98 98 { 99 99 VMCPU_ASSERT_EMT(pVCpu); 100 Assert(pVCpu->vmm .s.cCallRing3Disabled <= 16);101 return pVCpu->vmm .s.cCallRing3Disabled == 0;100 Assert(pVCpu->vmmr0.s.cCallRing3Disabled <= 16); 101 return pVCpu->vmmr0.s.cCallRing3Disabled == 0; 102 102 } 103 103 -
trunk/src/VBox/VMM/include/VMMInternal.h
r92392 r92408 138 138 139 139 140 /** Pointer to a ring-0 jump buffer. */ 141 typedef struct VMMR0JMPBUF *PVMMR0JMPBUF; 140 142 /** 141 143 * Jump buffer for the setjmp/longjmp like constructs used to … … 184 186 /** @} */ 185 187 186 /** Flag that indicates that we've done a ring-3 call. */ 187 bool fInRing3Call; 188 /** The number of bytes we've saved. */ 189 uint32_t cbSavedStack; 190 /** Pointer to the buffer used to save the stack. 191 * This is assumed to be 8KB. */ 192 RTR0PTR pvSavedStack; 193 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */ 194 RTHCUINTREG SpCheck; 195 /** The esp we should resume execution with after the restore. */ 196 RTHCUINTREG SpResume; 197 /** ESP/RSP at the time of the jump to ring 3. */ 198 RTHCUINTREG SavedEsp; 199 /** EBP/RBP at the time of the jump to ring 3. */ 200 RTHCUINTREG SavedEbp; 201 /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */ 202 RTHCUINTREG SavedEipForUnwind; 188 /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */ 189 RTHCUINTREG UnwindSp; 190 /** RSP/ESP at the time of the long jump call. */ 191 RTHCUINTREG UnwindRetSp; 192 /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */ 193 RTHCUINTREG UnwindBp; 194 /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */ 195 RTHCUINTREG UnwindPc; 203 196 /** Unwind: The vmmR0CallRing3SetJmp return address value. */ 204 197 RTHCUINTREG UnwindRetPcValue; … … 213 206 RTHCUINTREG pvUser2; 214 207 215 #if HC_ARCH_BITS == 32 216 /** Alignment padding. */ 217 uint32_t uPadding; 218 #endif 219 220 /** Stats: Max amount of stack used. */ 221 uint32_t cbUsedMax; 222 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */ 223 uint32_t cbUsedAvg; 224 /** Stats: Total amount of stack used. */ 225 uint64_t cbUsedTotal; 226 /** Stats: Number of stack usages. */ 227 uint64_t cUsedTotal; 208 /** Number of valid bytes in pvStackBuf. */ 209 uint32_t cbStackValid; 210 /** Size of buffer pvStackBuf points to. */ 211 uint32_t cbStackBuf; 212 /** Pointer to buffer for mirroring the stack. Optional. */ 213 RTR0PTR pvStackBuf; 214 /** Pointer to a ring-3 accessible jump buffer structure for automatic 215 * mirroring on longjmp. Optional. */ 216 R0PTRTYPE(PVMMR0JMPBUF) pMirrorBuf; 228 217 } VMMR0JMPBUF; 229 /** Pointer to a ring-0 jump buffer. */230 typedef VMMR0JMPBUF *PVMMR0JMPBUF;231 218 232 219 … … 429 416 uint32_t u32Padding0; 430 417 431 /** VMM stack, pointer to the top of the stack in R3.432 * Stack is allocated from the hypervisor heap and is page aligned433 * and always writable in RC. */434 R3PTRTYPE(uint8_t *) pbEMTStackR3;435 436 418 /** @name Rendezvous 437 419 * @{ */ … … 465 447 /** @} */ 466 448 467 /** @name Call Ring-3 468 * Formerly known as host calls. 469 * @{ */ 470 /** The disable counter. */ 471 uint32_t cCallRing3Disabled; 472 uint32_t u32Padding3; 473 /** Ring-0 assertion notification callback. */ 474 R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnRing0AssertCallback; 475 /** Argument for pfnRing0AssertionNotificationCallback. */ 476 R0PTRTYPE(void *) pvRing0AssertCallbackUser; 477 /** The Ring-0 jmp buffer. 478 * @remarks The size of this type isn't stable in assembly, so don't put 479 * anything that needs to be accessed from assembly after it. */ 480 VMMR0JMPBUF CallRing3JmpBufR0; 449 /** @name Ring-0 assertion info for this EMT. 450 * @{ */ 451 /** Copy of the ring-0 jmp buffer after an assertion. */ 452 VMMR0JMPBUF AssertJmpBuf; 453 /** Copy of the assertion stack. */ 454 uint8_t abAssertStack[8192]; 481 455 /** @} */ 482 456 … … 540 514 * @note Cannot be put on the stack as the location may change and upset the 541 515 * validation of resume-after-ring-3-call logic. 516 * @todo This no longer needs to be here now that we don't call ring-3 and mess 517 * around with stack restoring/switching. 542 518 * @{ */ 543 519 PGVM pGVM; … … 547 523 uint64_t u64Arg; 548 524 PSUPDRVSESSION pSession; 525 /** @} */ 526 527 /** @name Ring-0 setjmp / assertion handling. 528 * @{ */ 529 /** The ring-0 setjmp buffer. */ 530 VMMR0JMPBUF AssertJmpBuf; 531 /** The disable counter. */ 532 uint32_t cCallRing3Disabled; 533 uint32_t u32Padding3; 534 /** Ring-0 assertion notification callback. */ 535 R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback; 536 /** Argument for pfnRing0AssertionNotificationCallback. */ 537 R0PTRTYPE(void *) pvAssertCallbackUser; 549 538 /** @} */ 550 539 … … 569 558 AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger) 570 559 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE); 560 AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64); 571 561 /** Pointer to VMM ring-0 VMCPU instance data. */ 572 562 typedef VMMR0PERVCPU *PVMMR0PERVCPU; -
trunk/src/VBox/VMM/include/VMMInternal.mac
r92392 r92408 19 19 %include "VBox/sup.mac" 20 20 21 ; 22 ; Determine the default stack switching unless specified explicitly. 23 ; 24 %ifndef VMM_R0_SWITCH_STACK 25 %ifndef VMM_R0_NO_SWITCH_STACK 26 %ifdef RT_OS_DARWIN 27 %define VMM_R0_SWITCH_STACK 28 %endif 21 22 struc VMMR0JMPBUF 23 ; 24 ; traditional jmp_buf 25 ; 26 %ifdef RT_ARCH_X86 27 .ebx resd 1 28 .esi resd 1 29 .edi resd 1 30 .ebp resd 1 31 .esp resd 1 32 .eip resd 1 33 .eflags resd 1 34 %endif 35 %ifdef RT_ARCH_AMD64 36 .rbx resq 1 37 %ifdef RT_OS_WINDOWS 38 .rsi resq 1 39 .rdi resq 1 29 40 %endif 41 .rbp resq 1 42 .r12 resq 1 43 .r13 resq 1 44 .r14 resq 1 45 .r15 resq 1 46 .rsp resq 1 47 .rip resq 1 48 %ifdef RT_OS_WINDOWS 49 .xmm6 resq 2 50 .xmm7 resq 2 51 .xmm8 resq 2 52 .xmm9 resq 2 53 .xmm10 resq 2 54 .xmm11 resq 2 55 .xmm12 resq 2 56 .xmm13 resq 2 57 .xmm14 resq 2 58 .xmm15 resq 2 59 %endif 60 .rflags resq 1 30 61 %endif 31 62 63 ; 64 ; Additional state and stack info for unwinding. 65 ; 66 .UnwindSp RTR0PTR_RES 1 67 .UnwindRetSp RTR0PTR_RES 1 68 .UnwindBp RTR0PTR_RES 1 69 .UnwindPc RTR0PTR_RES 1 70 .UnwindRetPcValue RTR0PTR_RES 1 71 .UnwindRetPcLocation RTR0PTR_RES 1 32 72 33 struc VMMR0JMPBUF 34 %ifdef RT_ARCH_X86 35 ; traditional jmp_buf 36 .ebx resd 1 37 .esi resd 1 38 .edi resd 1 39 .ebp resd 1 40 .esp resd 1 41 .eip resd 1 42 .eflags resd 1 73 ; 74 ; Info about what we were doing in case it's helpful. 75 ; 76 .pfn RTR0PTR_RES 1 77 .pvUser1 RTR0PTR_RES 1 78 .pvUser2 RTR0PTR_RES 1 43 79 44 ; additional state and stack info. 45 .fInRing3Call resd 1 46 .cbSavedStack resd 1 47 .pvSavedStack resd 1 48 .SpCheck resd 1 49 .SpResume resd 1 50 .SavedEsp resd 1 51 .SavedEbp resd 1 52 .SavedEipForUnwind resd 1 53 .UnwindRetPcValue resd 1 54 .UnwindRetPcLocation resd 1 55 .pfn resd 1 56 .pvUser1 resd 1 57 .pvUser2 resd 1 58 %endif 59 %ifdef RT_ARCH_AMD64 60 ; traditional jmp_buf 61 .rbx resq 1 62 %ifdef RT_OS_WINDOWS 63 .rsi resq 1 64 .rdi resq 1 65 %endif 66 .rbp resq 1 67 .r12 resq 1 68 .r13 resq 1 69 .r14 resq 1 70 .r15 resq 1 71 .rsp resq 1 72 .rip resq 1 73 %ifdef RT_OS_WINDOWS 74 .xmm6 resq 2 75 .xmm7 resq 2 76 .xmm8 resq 2 77 .xmm9 resq 2 78 .xmm10 resq 2 79 .xmm11 resq 2 80 .xmm12 resq 2 81 .xmm13 resq 2 82 .xmm14 resq 2 83 .xmm15 resq 2 84 %endif 85 .rflags resq 1 86 87 ; additional state and stack info. 88 .fInRing3Call resd 1 89 .cbSavedStack resd 1 90 .pvSavedStack resq 1 91 .SpCheck resq 1 92 .SpResume resq 1 93 .SavedEsp resq 1 94 .SavedEbp resq 1 95 .SavedEipForUnwind resq 1 96 .UnwindRetPcValue resq 1 97 .UnwindRetPcLocation resq 1 98 .pfn resq 1 99 .pvUser1 resq 1 100 .pvUser2 resq 1 101 %endif 102 103 ; Statistics 104 alignb 8 105 .cbUsedMax resd 1 106 .cbUsedAvg resd 1 107 .cbUsedTotal resq 1 108 .cUsedTotal resq 1 80 ; 81 ; For mirroring the jump buffer and stack to ring-3 for unwinding and analysis. 82 ; 83 .cbStackValid resd 1 84 .cbStackBuf resd 1 85 .pvStackBuf RTR0PTR_RES 1 86 .pMirrorBuf RTR0PTR_RES 1 109 87 endstruc 110 88 … … 114 92 .iLastGZRc resd 1 115 93 alignb 8 116 .pbEMTStackR3 RTR3PTR_RES 1117 94 118 95 .fInRendezvous resb 1 … … 127 104 .TracerCtx resb SUPDRVTRACERUSRCTX64_size 128 105 129 .cCallRing3Disabled resd 1130 106 alignb 8 131 .pfnRing0AssertCallback RTR0PTR_RES 1 132 .pvRing0AssertCallbackUser RTR0PTR_RES 1 133 alignb 16 134 .CallRing3JmpBufR0 resb 1 107 .AssertJmpBuf resb 1 135 108 endstruc 136 109 -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r91775 r92408 71 71 tstSSM \ 72 72 tstVMMR0CallHost-1 \ 73 tstVMMR0CallHost-2 \74 73 tstX86-FpuSaveRestore 75 74 ifn1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), solaris.x86 solaris.amd64 win.amd64 ) ## TODO: Fix the code. … … 300 299 301 300 # 302 # Two testcases for checking the ring- 3 "long jump"code.301 # Two testcases for checking the ring-0 setjmp/longjmp code. 303 302 # 304 303 tstVMMR0CallHost-1_TEMPLATE = VBOXR3TSTEXE 305 tstVMMR0CallHost-1_DEFS = VMM_R0_NO_SWITCH_STACK306 304 tstVMMR0CallHost-1_INCS = $(VBOX_PATH_VMM_SRC)/include 307 305 tstVMMR0CallHost-1_SOURCES = \ … … 312 310 $(VBOX_PATH_VMM_SRC)/VMMR0/VMMR0JmpA-x86.asm 313 311 314 tstVMMR0CallHost-2_EXTENDS = tstVMMR0CallHost-1315 tstVMMR0CallHost-2_DEFS = VMM_R0_SWITCH_STACK316 tstVMMR0CallHost-2_SOURCES.amd64 = \317 $(tstVMMR0CallHost-1_SOURCES.amd64) \318 tstVMMR0CallHost-2A.asm319 312 320 313 # -
trunk/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp
r91806 r92408 36 36 37 37 /********************************************************************************************************************************* 38 * Defined Constants And Macros *39 *********************************************************************************************************************************/40 #if !defined(VMM_R0_SWITCH_STACK) && !defined(VMM_R0_NO_SWITCH_STACK)41 # error "VMM_R0_SWITCH_STACK or VMM_R0_NO_SWITCH_STACK has to be defined."42 #endif43 44 45 /*********************************************************************************************************************************46 38 * Global Variables * 47 39 *********************************************************************************************************************************/ 48 40 /** The jump buffer. */ 49 41 static VMMR0JMPBUF g_Jmp; 42 /** The mirror jump buffer. */ 43 static VMMR0JMPBUF g_JmpMirror; 50 44 /** The number of jumps we've done. */ 51 45 static unsigned volatile g_cJmps; … … 67 61 char *pv = (char *)alloca(cb); 68 62 RTStrPrintf(pv, cb, "i=%d%*s\n", i, cb, ""); 69 #ifdef VMM_R0_SWITCH_STACK 70 g_cbFooUsed = VMM_STACK_SIZE - ((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack); 71 RTTESTI_CHECK_MSG_RET(g_cbFooUsed < (intptr_t)VMM_STACK_SIZE - 128, ("%#x - (%p - %p) -> %#x; cb=%#x i=%d\n", VMM_STACK_SIZE, pv, g_Jmp.pvSavedStack, g_cbFooUsed, cb, i), -15); 72 #elif defined(RT_ARCH_AMD64) 63 #if defined(RT_ARCH_AMD64) 73 64 g_cbFooUsed = (uintptr_t)g_Jmp.rsp - (uintptr_t)pv; 74 65 RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.rsp, pv, g_cbFooUsed, cb, i), -15); … … 122 113 void tst(int iFrom, int iTo, int iInc) 123 114 { 124 #ifdef VMM_R0_SWITCH_STACK 125 int const cIterations = iFrom > iTo ? iFrom - iTo : iTo - iFrom; 126 void *pvPrev = alloca(1); 127 #endif 128 129 RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack; 130 RT_ZERO(g_Jmp); 131 g_Jmp.pvSavedStack = R0PtrSaved; 132 memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE); 115 RT_BZERO(&g_Jmp, RT_UOFFSETOF(VMMR0JMPBUF, cbStackBuf)); 116 g_Jmp.cbStackValid = _1M; 117 memset((void *)g_Jmp.pvStackBuf, '\0', g_Jmp.cbStackBuf); 133 118 g_cbFoo = 0; 134 119 g_cJmps = 0; … … 136 121 g_fInLongJmp = false; 137 122 138 int iOrg = iFrom;139 123 for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++) 140 124 { 141 if (!g_fInLongJmp) 142 iOrg = i; 143 int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tst2, (PVM)(uintptr_t)iOrg, 0); 125 g_fInLongJmp = false; 126 int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tst2, (PVM)(uintptr_t)i, 0); 144 127 RTTESTI_CHECK_MSG_RETV(rc == (g_fInLongJmp ? 42 : 0), 145 ("i=%d iOrg=%drc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",146 i, iOrg,rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));128 ("i=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n", 129 i, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp)); 147 130 148 #ifdef VMM_R0_SWITCH_STACK149 /* Make the stack pointer slide for the second half of the calls. */150 if (iItr >= cIterations / 2)151 {152 /* Note! gcc does funny rounding up of alloca(). */153 # if !defined(VBOX_WITH_GCC_SANITIZER) && !defined(__MSVC_RUNTIME_CHECKS)154 void *pv2 = alloca((i % 63) | 1);155 size_t cb2 = (uintptr_t)pvPrev - (uintptr_t)pv2;156 # else157 size_t cb2 = ((i % 3) + 1) * 16; /* We get what we ask for here, and it's not at RSP/ESP due to guards. */158 void *pv2 = alloca(cb2);159 # endif160 RTTESTI_CHECK_MSG(cb2 >= 16 && cb2 <= 128, ("cb2=%zu pv2=%p pvPrev=%p iAlloca=%d\n", cb2, pv2, pvPrev, iItr));161 memset(pv2, 0xff, cb2);162 memset(pvPrev, 0xee, 1);163 pvPrev = pv2;164 }165 #endif166 131 } 167 132 RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!")); 168 if (g_Jmp.cbUsedAvg || g_Jmp.cUsedTotal)169 RTTestIPrintf(RTTESTLVL_ALWAYS, "cbUsedAvg=%#x cbUsedMax=%#x cUsedTotal=%#llx\n",170 g_Jmp.cbUsedAvg, g_Jmp.cbUsedMax, g_Jmp.cUsedTotal);171 133 } 172 173 174 #if defined(VMM_R0_SWITCH_STACK) && defined(RT_ARCH_AMD64)175 /*176 * Stack switch back tests.177 */178 RT_C_DECLS_BEGIN179 DECLCALLBACK(int) tstWrapped4( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4);180 DECLCALLBACK(int) StkBack_tstWrapped4( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4);181 DECLCALLBACK(int) tstWrapped5( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5);182 DECLCALLBACK(int) StkBack_tstWrapped5( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5);183 DECLCALLBACK(int) tstWrapped6( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6);184 DECLCALLBACK(int) StkBack_tstWrapped6( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6);185 DECLCALLBACK(int) tstWrapped7( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7);186 DECLCALLBACK(int) StkBack_tstWrapped7( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7);187 DECLCALLBACK(int) tstWrapped8( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8);188 DECLCALLBACK(int) StkBack_tstWrapped8( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8);189 DECLCALLBACK(int) tstWrapped9( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9);190 DECLCALLBACK(int) StkBack_tstWrapped9( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9);191 DECLCALLBACK(int) tstWrapped10( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10);192 DECLCALLBACK(int) StkBack_tstWrapped10(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10);193 DECLCALLBACK(int) tstWrapped16( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16);194 DECLCALLBACK(int) StkBack_tstWrapped16(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16);195 DECLCALLBACK(int) tstWrapped20( PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20);196 DECLCALLBACK(int) StkBack_tstWrapped20(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20);197 198 DECLCALLBACK(int) tstWrappedThin(PVMMR0JMPBUF pJmp);199 DECLCALLBACK(int) StkBack_tstWrappedThin(PVMMR0JMPBUF pJmp);200 RT_C_DECLS_END201 202 203 204 DECLCALLBACK(int) StkBack_tstWrapped4(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4)205 {206 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);207 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);208 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);209 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);210 211 void *pv = alloca(32);212 memset(pv, 'a', 32);213 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);214 215 return 42;216 }217 218 219 DECLCALLBACK(int) StkBack_tstWrapped5(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5)220 {221 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);222 RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);223 RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);224 RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);225 RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);226 227 void *pv = alloca(32);228 memset(pv, 'a', 32);229 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);230 231 return 42;232 }233 234 235 DECLCALLBACK(int) StkBack_tstWrapped6(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6)236 {237 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);238 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);239 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);240 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);241 RTTESTI_CHECK_RET(u5 == (uintptr_t)5U, -5);242 RTTESTI_CHECK_RET(u6 == (uintptr_t)6U, -6);243 244 void *pv = alloca(32);245 memset(pv, 'a', 32);246 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);247 248 return 42;249 }250 251 252 DECLCALLBACK(int) StkBack_tstWrapped7(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7)253 {254 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);255 RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);256 RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);257 RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);258 RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);259 RTTESTI_CHECK_RET(u6 == ~(uintptr_t)6U, -6);260 RTTESTI_CHECK_RET(u7 == ~(uintptr_t)7U, -7);261 262 void *pv = alloca(32);263 memset(pv, 'a', 32);264 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);265 266 return 42;267 }268 269 270 DECLCALLBACK(int) StkBack_tstWrapped8(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8)271 {272 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);273 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);274 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);275 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);276 RTTESTI_CHECK_RET(u5 == (uintptr_t)5U, -5);277 RTTESTI_CHECK_RET(u6 == (uintptr_t)6U, -6);278 RTTESTI_CHECK_RET(u7 == (uintptr_t)7U, -7);279 RTTESTI_CHECK_RET(u8 == (uintptr_t)8U, -8);280 281 void *pv = alloca(32);282 memset(pv, 'a', 32);283 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);284 285 return 42;286 }287 288 DECLCALLBACK(int) StkBack_tstWrapped9(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9)289 {290 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);291 RTTESTI_CHECK_RET(u2 == ~(uintptr_t)2U, -2);292 RTTESTI_CHECK_RET(u3 == ~(uintptr_t)3U, -3);293 RTTESTI_CHECK_RET(u4 == ~(uintptr_t)4U, -4);294 RTTESTI_CHECK_RET(u5 == ~(uintptr_t)5U, -5);295 RTTESTI_CHECK_RET(u6 == ~(uintptr_t)6U, -6);296 RTTESTI_CHECK_RET(u7 == ~(uintptr_t)7U, -7);297 RTTESTI_CHECK_RET(u8 == ~(uintptr_t)8U, -8);298 RTTESTI_CHECK_RET(u9 == ~(uintptr_t)9U, -9);299 300 void *pv = alloca(32);301 memset(pv, 'a', 32);302 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);303 304 return 42;305 }306 307 308 DECLCALLBACK(int) StkBack_tstWrapped10(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10)309 {310 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);311 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);312 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);313 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);314 RTTESTI_CHECK_RET(u5 == (uintptr_t)5U, -5);315 RTTESTI_CHECK_RET(u6 == (uintptr_t)6U, -6);316 RTTESTI_CHECK_RET(u7 == (uintptr_t)7U, -7);317 RTTESTI_CHECK_RET(u8 == (uintptr_t)8U, -8);318 RTTESTI_CHECK_RET(u9 == (uintptr_t)9U, -9);319 RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);320 321 void *pv = alloca(32);322 memset(pv, 'a', 32);323 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);324 325 return 42;326 }327 328 329 DECLCALLBACK(int) StkBack_tstWrapped16(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16)330 {331 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);332 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);333 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);334 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);335 RTTESTI_CHECK_RET(u5 == (uintptr_t)5U, -5);336 RTTESTI_CHECK_RET(u6 == (uintptr_t)6U, -6);337 RTTESTI_CHECK_RET(u7 == (uintptr_t)7U, -7);338 RTTESTI_CHECK_RET(u8 == (uintptr_t)8U, -8);339 RTTESTI_CHECK_RET(u9 == (uintptr_t)9U, -9);340 RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);341 RTTESTI_CHECK_RET(u11 == (uintptr_t)11U, -11);342 RTTESTI_CHECK_RET(u12 == (uintptr_t)12U, -12);343 RTTESTI_CHECK_RET(u13 == (uintptr_t)13U, -13);344 RTTESTI_CHECK_RET(u14 == (uintptr_t)14U, -14);345 RTTESTI_CHECK_RET(u15 == (uintptr_t)15U, -15);346 RTTESTI_CHECK_RET(u16 == (uintptr_t)16U, -16);347 348 void *pv = alloca(32);349 memset(pv, 'a', 32);350 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);351 352 return 42;353 }354 355 356 DECLCALLBACK(int) StkBack_tstWrapped20(PVMMR0JMPBUF pJmp, uintptr_t u2, uintptr_t u3, uintptr_t u4, uintptr_t u5, uintptr_t u6, uintptr_t u7, uintptr_t u8, uintptr_t u9, uintptr_t u10, uintptr_t u11, uintptr_t u12, uintptr_t u13, uintptr_t u14, uintptr_t u15, uintptr_t u16, uintptr_t u17, uintptr_t u18, uintptr_t u19, uintptr_t u20)357 {358 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -1);359 RTTESTI_CHECK_RET(u2 == (uintptr_t)2U, -2);360 RTTESTI_CHECK_RET(u3 == (uintptr_t)3U, -3);361 RTTESTI_CHECK_RET(u4 == (uintptr_t)4U, -4);362 RTTESTI_CHECK_RET(u5 == (uintptr_t)5U, -5);363 RTTESTI_CHECK_RET(u6 == (uintptr_t)6U, -6);364 RTTESTI_CHECK_RET(u7 == (uintptr_t)7U, -7);365 RTTESTI_CHECK_RET(u8 == (uintptr_t)8U, -8);366 RTTESTI_CHECK_RET(u9 == (uintptr_t)9U, -9);367 RTTESTI_CHECK_RET(u10 == (uintptr_t)10U, -10);368 RTTESTI_CHECK_RET(u11 == (uintptr_t)11U, -11);369 RTTESTI_CHECK_RET(u12 == (uintptr_t)12U, -12);370 RTTESTI_CHECK_RET(u13 == (uintptr_t)13U, -13);371 RTTESTI_CHECK_RET(u14 == (uintptr_t)14U, -14);372 RTTESTI_CHECK_RET(u15 == (uintptr_t)15U, -15);373 RTTESTI_CHECK_RET(u16 == (uintptr_t)16U, -16);374 RTTESTI_CHECK_RET(u17 == (uintptr_t)17U, -17);375 RTTESTI_CHECK_RET(u18 == (uintptr_t)18U, -18);376 RTTESTI_CHECK_RET(u19 == (uintptr_t)19U, -19);377 RTTESTI_CHECK_RET(u20 == (uintptr_t)20U, -20);378 379 void *pv = alloca(32);380 memset(pv, 'a', 32);381 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -11);382 383 return 42;384 }385 386 387 DECLCALLBACK(int) tstSwitchBackInner(intptr_t i1, intptr_t i2)388 {389 RTTESTI_CHECK_RET(i1 == -42, -20);390 RTTESTI_CHECK_RET(i2 == (intptr_t)&g_Jmp, -21);391 392 void *pv = alloca(32);393 memset(pv, 'b', 32);394 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack < VMM_STACK_SIZE, -22);395 396 int rc;397 rc = tstWrapped4(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U);398 RTTESTI_CHECK_RET(rc == 42, -23);399 400 rc = tstWrapped5(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U);401 RTTESTI_CHECK_RET(rc == 42, -23);402 403 rc = tstWrapped6(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U, (uintptr_t)5U, (uintptr_t)6U);404 RTTESTI_CHECK_RET(rc == 42, -23);405 406 rc = tstWrapped7(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U, ~(uintptr_t)6U, ~(uintptr_t)7U);407 RTTESTI_CHECK_RET(rc == 42, -23);408 409 rc = tstWrapped8(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U, (uintptr_t)5U, (uintptr_t)6U, (uintptr_t)7U, (uintptr_t)8U);410 RTTESTI_CHECK_RET(rc == 42, -23);411 412 rc = tstWrapped9(&g_Jmp, ~(uintptr_t)2U, ~(uintptr_t)3U, ~(uintptr_t)4U, ~(uintptr_t)5U, ~(uintptr_t)6U, ~(uintptr_t)7U, ~(uintptr_t)8U, ~(uintptr_t)9U);413 RTTESTI_CHECK_RET(rc == 42, -23);414 415 rc = tstWrapped10(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U, (uintptr_t)5U, (uintptr_t)6U, (uintptr_t)7U, (uintptr_t)8U, (uintptr_t)9U, (uintptr_t)10);416 RTTESTI_CHECK_RET(rc == 42, -23);417 418 rc = tstWrapped16(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U, (uintptr_t)5U, (uintptr_t)6U, (uintptr_t)7U, (uintptr_t)8U, (uintptr_t)9U, (uintptr_t)10, (uintptr_t)11, (uintptr_t)12, (uintptr_t)13, (uintptr_t)14, (uintptr_t)15, (uintptr_t)16);419 RTTESTI_CHECK_RET(rc == 42, -23);420 421 rc = tstWrapped20(&g_Jmp, (uintptr_t)2U, (uintptr_t)3U, (uintptr_t)4U, (uintptr_t)5U, (uintptr_t)6U, (uintptr_t)7U, (uintptr_t)8U, (uintptr_t)9U, (uintptr_t)10, (uintptr_t)11, (uintptr_t)12, (uintptr_t)13, (uintptr_t)14, (uintptr_t)15, (uintptr_t)16, (uintptr_t)17, (uintptr_t)18, (uintptr_t)19, (uintptr_t)20);422 RTTESTI_CHECK_RET(rc == 42, -23);423 return rc;424 }425 426 427 DECLCALLBACK(int) StkBack_tstWrappedThin(PVMMR0JMPBUF pJmp)428 {429 RTTESTI_CHECK_RET(pJmp == &g_Jmp, -31);430 431 void *pv = alloca(32);432 memset(pv, 'c', 32);433 RTTESTI_CHECK_RET((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack > VMM_STACK_SIZE, -32);434 435 return 42;436 }437 438 DECLCALLBACK(int) tstSwitchBackInnerThin(intptr_t i1, intptr_t i2)439 {440 RT_NOREF(i1);441 return tstWrappedThin((PVMMR0JMPBUF)i2);442 }443 444 445 void tstSwitchBack(void)446 {447 RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack;448 RT_ZERO(g_Jmp);449 g_Jmp.pvSavedStack = R0PtrSaved;450 memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE);451 g_cbFoo = 0;452 g_cJmps = 0;453 g_cbFooUsed = 0;454 g_fInLongJmp = false;455 456 //for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++)457 {458 int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tstSwitchBackInner, (PVM)(intptr_t)-42, (PVMCPU)&g_Jmp);459 RTTESTI_CHECK_MSG_RETV(rc == 42,460 ("i=%d iOrg=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",461 0, 0 /*i, iOrg*/, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));462 463 rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tstSwitchBackInnerThin, NULL, (PVMCPU)&g_Jmp);464 RTTESTI_CHECK_MSG_RETV(rc == 42,465 ("i=%d iOrg=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x fInLongJmp=%d\n",466 0, 0 /*i, iOrg*/, rc, g_cbFoo, g_cbFooUsed, g_fInLongJmp));467 468 }469 //RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!"));470 }471 472 #endif473 134 474 135 … … 479 140 */ 480 141 RTTEST hTest; 481 #ifdef VMM_R0_NO_SWITCH_STACK482 142 RTEXITCODE rcExit = RTTestInitAndCreate("tstVMMR0CallHost-1", &hTest); 483 #else484 RTEXITCODE rcExit = RTTestInitAndCreate("tstVMMR0CallHost-2", &hTest);485 #endif486 143 if (rcExit != RTEXITCODE_SUCCESS) 487 144 return rcExit; 488 145 RTTestBanner(hTest); 489 146 490 g_Jmp.pvSavedStack = (RTR0PTR)RTTestGuardedAllocTail(hTest, VMM_STACK_SIZE); 147 g_Jmp.cbStackBuf = PAGE_SIZE; 148 g_Jmp.pvStackBuf = (uintptr_t)RTTestGuardedAllocTail(hTest, g_Jmp.cbStackBuf); 149 g_Jmp.pMirrorBuf = (uintptr_t)&g_JmpMirror; 491 150 492 151 /* … … 497 156 RTTestSub(hTest, "Decreasing stack usage"); 498 157 tst(7599, 0, -1); 499 #if defined(VMM_R0_SWITCH_STACK) && defined(RT_ARCH_AMD64)500 RTTestSub(hTest, "Switch back");501 tstSwitchBack();502 #endif503 158 504 159 return RTTestSummaryAndDestroy(hTest); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r92392 r92408 265 265 PVM pVM = NULL; NOREF(pVM); 266 266 267 #if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)268 CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0, 16);269 CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0.xmm6, 16);270 #endif271 272 267 /* the VMCPUs are page aligned TLB hit reasons. */ 273 268 CHECK_SIZE_ALIGNMENT(VMCPU, 4096);
Note:
See TracChangeset
for help on using the changeset viewer.

