Changeset 20132 in vbox
- Timestamp:
- May 28, 2009 7:20:26 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
-
HWACCM.cpp (modified) (3 diffs)
-
HWACCMInternal.h (modified) (3 diffs)
-
VMMR0/HWACCMR0.cpp (modified) (3 diffs)
-
VMMR0/HWSVMR0.cpp (modified) (7 diffs)
-
VMMR0/HWVMXR0.cpp (modified) (8 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r20057 r20132 376 376 #endif 377 377 378 /* Max number of resume loops. */ 379 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */); 380 AssertRC(rc); 381 378 382 return VINF_SUCCESS; 379 383 } … … 475 479 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow"); 476 480 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume"); 481 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending"); 477 482 478 483 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending"); … … 494 499 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page"); 495 500 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB"); 496 501 497 502 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset"); 498 503 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept"); -
trunk/src/VBox/VMM/HWACCMInternal.h
r20057 r20132 120 120 121 121 122 /** Maxium resume loops allowed in ring 0 (safety precaution) */123 #define HWACCM_MAX_RESUME_LOOPS 1024124 125 122 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */ 126 123 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8 … … 217 214 /** Maximum ASID allowed. */ 218 215 RTUINT uMaxASID; 216 217 /** The maximum number of resumes loops allowed in ring-0 (safety precaution). 218 * This number is set much higher when RTThreadPreemptIsPending is reliable. */ 219 uint32_t cMaxResumeLoops; 219 220 220 221 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 644 645 STAMCOUNTER StatExitIrqWindow; 645 646 STAMCOUNTER StatExitMaxResume; 647 STAMCOUNTER StatExitPreemptPending; 646 648 STAMCOUNTER StatIntReinject; 647 649 STAMCOUNTER StatPendingHostIrq; -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r19360 r20132 37 37 #include <VBox/selm.h> 38 38 #include <VBox/iom.h> 39 #include <iprt/param.h>40 39 #include <iprt/assert.h> 41 40 #include <iprt/asm.h> 41 #include <iprt/cpuset.h> 42 #include <iprt/memobj.h> 43 #include <iprt/param.h> 44 #include <iprt/power.h> 42 45 #include <iprt/string.h> 43 #include <iprt/memobj.h> 44 #include <iprt/cpuset.h> 45 #include <iprt/power.h> 46 #include <iprt/thread.h> 46 47 #include "HWVMXR0.h" 47 48 #include "HWSVMR0.h" … … 836 837 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID; 837 838 839 840 if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */ 841 { 842 pVM->hwaccm.s.cMaxResumeLoops = 1024; 843 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 844 if (RTThreadPreemptIsPendingTrusty()) 845 pVM->hwaccm.s.cMaxResumeLoops = 8192; 846 #endif 847 } 848 838 849 for (unsigned i=0;i<pVM->cCPUs;i++) 839 850 { … … 852 863 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu(); 853 864 854 /* @noteNot correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */865 /* Note: Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */ 855 866 ASMAtomicWriteBool(&pCpu->fInUse, true); 856 867 ASMSetFlags(fFlags); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r20056 r20132 847 847 uint8_t u8LastVTPR; 848 848 PHWACCM_CPUINFO pCpu = 0; 849 RTCCUINTREG uOldEFlags ;849 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0; 850 850 #ifdef VBOX_STRICT 851 851 RTCPUID idCpuCheck; … … 863 863 864 864 /* Safety precaution; looping for too long here can have a very bad effect on the host */ 865 if ( ++cResume > HWACCM_MAX_RESUME_LOOPS)865 if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops)) 866 866 { 867 867 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume); … … 919 919 920 920 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 921 /** @todo This must be repeated (or moved down) after we've disabled interrupts 922 * below because a rescheduling request (IPI) might arrive before we get 923 * there and we end up exceeding our timeslice. (Putting it here for 924 * now because I don't want to mess up anything.) */ 921 /* 922 * Exit to ring-3 preemption/work is pending. 923 * 924 * Interrupts are disabled before the call to make sure we don't miss any interrupt 925 * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this 926 * further down, but SVMR0CheckPendingInterrupt makes that hard.) 927 * 928 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB 929 * shootdowns rely on this. 930 */ 931 uOldEFlags = ASMIntDisableFlags(); 925 932 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 926 933 { 927 rc = VINF_EM_RAW_INTERRUPT_HYPER; 934 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending); 935 rc = VINF_EM_RAW_INTERRUPT; 928 936 goto end; 929 937 } 938 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 930 939 #endif 931 940 … … 1003 1012 } 1004 1013 1014 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1005 1015 /* Disable interrupts to make sure a poke will interrupt execution. 1006 1016 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this. … … 1008 1018 uOldEFlags = ASMIntDisableFlags(); 1009 1019 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1020 #endif 1010 1021 1011 1022 pCpu = HWACCMR0GetCurrentCpu(); … … 1099 1110 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1100 1111 ASMSetFlags(uOldEFlags); 1112 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1113 uOldEFlags = ~(RTCCUINTREG)0; 1114 #endif 1101 1115 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatInGC, x); 1102 1116 … … 2255 2269 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC); 2256 2270 2271 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2272 /* Restore interrupts if we exitted after disabling them. */ 2273 if (uOldEFlags != ~(RTCCUINTREG)0) 2274 ASMSetFlags(uOldEFlags); 2275 #endif 2276 2257 2277 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x); 2258 2278 return rc; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r20026 r20132 433 433 if (pVM->hwaccm.s.fHasIoApic) 434 434 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; 435 435 436 436 /* Mask away the bits that the CPU doesn't support */ 437 437 /** @todo make sure they don't conflict with the above requirements. */ … … 2002 2002 bool fSetupTPRCaching = false; 2003 2003 PHWACCM_CPUINFO pCpu = 0; 2004 RTCCUINTREG uOldEFlags ;2004 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0; 2005 2005 unsigned cResume = 0; 2006 2006 #ifdef VBOX_STRICT … … 2104 2104 2105 2105 /* Safety precaution; looping for too long here can have a very bad effect on the host */ 2106 if ( ++cResume > HWACCM_MAX_RESUME_LOOPS)2106 if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops)) 2107 2107 { 2108 2108 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume); … … 2153 2153 2154 2154 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2155 /** @todo This must be repeated (or moved down) after we've disabled interrupts 2156 * below because a rescheduling request (IPI) might arrive before we get 2157 * there and we end up exceeding our timeslice. (Putting it here for 2158 * now because I don't want to mess up anything.) */ 2155 /* 2156 * Exit to ring-3 preemption/work is pending. 2157 * 2158 * Interrupts are disabled before the call to make sure we don't miss any interrupt 2159 * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this 2160 * further down, but VMXR0CheckPendingInterrupt makes that hard.) 2161 * 2162 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB 2163 * shootdowns rely on this. 2164 */ 2165 uOldEFlags = ASMIntDisableFlags(); 2159 2166 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 2160 2167 { 2161 rc = VINF_EM_RAW_INTERRUPT_HYPER; 2168 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending); 2169 rc = VINF_EM_RAW_INTERRUPT; 2162 2170 goto end; 2163 2171 } 2172 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2164 2173 #endif 2165 2174 … … 2248 2257 goto end; 2249 2258 2259 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2250 2260 /* Disable interrupts to make sure a poke will interrupt execution. 2251 2261 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this. … … 2253 2263 uOldEFlags = ASMIntDisableFlags(); 2254 2264 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2265 #endif 2255 2266 2256 2267 /* Deal with tagged TLB setup and invalidation. */ … … 2293 2304 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 2294 2305 ASMSetFlags(uOldEFlags); 2306 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2307 uOldEFlags = ~(RTCCUINTREG)0; 2308 #endif 2295 2309 2296 2310 AssertMsg(!pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n", pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries)); … … 3683 3697 /* Just set the correct state here instead of trying to catch every goto above. */ 3684 3698 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC); 3699 3700 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 3701 /* Restore interrupts if we exitted after disabling them. */ 3702 if (uOldEFlags != ~(RTCCUINTREG)0) 3703 ASMSetFlags(uOldEFlags); 3704 #endif 3685 3705 3686 3706 STAM_STATS({
Note:
See TracChangeset
for help on using the changeset viewer.

