VirtualBox

Changeset 20132 in vbox


Ignore:
Timestamp:
May 28, 2009 7:20:26 PM (15 years ago)
Author:
vboxsync
Message:

HWACCM: Finished (I hope) the VBOX_WITH_VMMR0_DISABLE_PREEMPTION code.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCM.cpp

    r20057 r20132  
    376376#endif
    377377
     378    /* Max number of resume loops. */
     379    rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
     380    AssertRC(rc);
     381
    378382    return VINF_SUCCESS;
    379383}
     
    475479        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow,          "/HWACCM/CPU%d/Exit/IrqWindow");
    476480        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume,          "/HWACCM/CPU%d/Exit/MaxResume");
     481        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending,     "/HWACCM/CPU%d/Exit/PreemptPending");
    477482
    478483        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq,         "/HWACCM/CPU%d/Switch/IrqPending");
     
    494499        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown,           "/HWACCM/CPU%d/Flush/Shootdown/Page");
    495500        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush,      "/HWACCM/CPU%d/Flush/Shootdown/TLB");
    496        
     501
    497502        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset,              "/HWACCM/CPU%d/TSC/Offset");
    498503        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept,           "/HWACCM/CPU%d/TSC/Intercept");
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r20057 r20132  
    120120
    121121
    122 /** Maxium resume loops allowed in ring 0 (safety precaution) */
    123 #define HWACCM_MAX_RESUME_LOOPS             1024
    124 
    125122/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
    126123#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES      8
     
    217214    /** Maximum ASID allowed. */
    218215    RTUINT                      uMaxASID;
     216
     217    /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
     218     * This number is set much higher when RTThreadPreemptIsPending is reliable. */
     219    uint32_t                    cMaxResumeLoops;
    219220
    220221#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    644645    STAMCOUNTER             StatExitIrqWindow;
    645646    STAMCOUNTER             StatExitMaxResume;
     647    STAMCOUNTER             StatExitPreemptPending;
    646648    STAMCOUNTER             StatIntReinject;
    647649    STAMCOUNTER             StatPendingHostIrq;
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r19360 r20132  
    3737#include <VBox/selm.h>
    3838#include <VBox/iom.h>
    39 #include <iprt/param.h>
    4039#include <iprt/assert.h>
    4140#include <iprt/asm.h>
     41#include <iprt/cpuset.h>
     42#include <iprt/memobj.h>
     43#include <iprt/param.h>
     44#include <iprt/power.h>
    4245#include <iprt/string.h>
    43 #include <iprt/memobj.h>
    44 #include <iprt/cpuset.h>
    45 #include <iprt/power.h>
     46#include <iprt/thread.h>
    4647#include "HWVMXR0.h"
    4748#include "HWSVMR0.h"
     
    836837    pVM->hwaccm.s.uMaxASID                  = HWACCMR0Globals.uMaxASID;
    837838
     839
     840    if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
     841    {
     842        pVM->hwaccm.s.cMaxResumeLoops       = 1024;
     843#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     844        if (RTThreadPreemptIsPendingTrusty())
     845            pVM->hwaccm.s.cMaxResumeLoops   = 8192;
     846#endif
     847    }
     848
    838849    for (unsigned i=0;i<pVM->cCPUs;i++)
    839850    {
     
    852863    PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
    853864
    854     /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
     865    /* Note: Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
    855866    ASMAtomicWriteBool(&pCpu->fInUse, true);
    856867    ASMSetFlags(fFlags);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r20056 r20132  
    847847    uint8_t     u8LastVTPR;
    848848    PHWACCM_CPUINFO pCpu = 0;
    849     RTCCUINTREG uOldEFlags;
     849    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
    850850#ifdef VBOX_STRICT
    851851    RTCPUID  idCpuCheck;
     
    863863
    864864    /* Safety precaution; looping for too long here can have a very bad effect on the host */
    865     if (++cResume > HWACCM_MAX_RESUME_LOOPS)
     865    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    866866    {
    867867        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     
    919919
    920920#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    921     /** @todo This must be repeated (or moved down) after we've disabled interrupts
    922      *        below because a rescheduling request (IPI) might arrive before we get
    923      *        there and we end up exceeding our timeslice. (Putting it here for
    924      *        now because I don't want to mess up anything.) */
     921    /*
     922     * Exit to ring-3 preemption/work is pending.
     923     *
     924     * Interrupts are disabled before the call to make sure we don't miss any interrupt
     925     * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
     926     * further down, but SVMR0CheckPendingInterrupt makes that hard.)
     927     *
     928     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
     929     *       shootdowns rely on this.
     930     */
     931    uOldEFlags = ASMIntDisableFlags();
    925932    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    926933    {
    927         rc = VINF_EM_RAW_INTERRUPT_HYPER;
     934        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     935        rc = VINF_EM_RAW_INTERRUPT;
    928936        goto end;
    929937    }
     938    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    930939#endif
    931940
     
    10031012    }
    10041013
     1014#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    10051015    /* Disable interrupts to make sure a poke will interrupt execution.
    10061016     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
     
    10081018    uOldEFlags = ASMIntDisableFlags();
    10091019    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     1020#endif
    10101021
    10111022    pCpu = HWACCMR0GetCurrentCpu();
     
    10991110    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    11001111    ASMSetFlags(uOldEFlags);
     1112#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     1113    uOldEFlags = ~(RTCCUINTREG)0;
     1114#endif
    11011115    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatInGC, x);
    11021116
     
    22552269    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
    22562270
     2271#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     2272    /* Restore interrupts if we exitted after disabling them. */
     2273    if (uOldEFlags != ~(RTCCUINTREG)0)
     2274        ASMSetFlags(uOldEFlags);
     2275#endif
     2276
    22572277    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    22582278    return rc;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r20026 r20132  
    433433            if (pVM->hwaccm.s.fHasIoApic)
    434434                val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
    435            
     435
    436436            /* Mask away the bits that the CPU doesn't support */
    437437            /** @todo make sure they don't conflict with the above requirements. */
     
    20022002    bool        fSetupTPRCaching = false;
    20032003    PHWACCM_CPUINFO pCpu = 0;
    2004     RTCCUINTREG uOldEFlags;
     2004    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
    20052005    unsigned    cResume = 0;
    20062006#ifdef VBOX_STRICT
     
    21042104
    21052105    /* Safety precaution; looping for too long here can have a very bad effect on the host */
    2106     if (++cResume > HWACCM_MAX_RESUME_LOOPS)
     2106    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    21072107    {
    21082108        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     
    21532153
    21542154#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    2155     /** @todo This must be repeated (or moved down) after we've disabled interrupts
    2156      *        below because a rescheduling request (IPI) might arrive before we get
    2157      *        there and we end up exceeding our timeslice. (Putting it here for
    2158      *        now because I don't want to mess up anything.) */
     2155    /*
     2156     * Exit to ring-3 preemption/work is pending.
     2157     *
     2158     * Interrupts are disabled before the call to make sure we don't miss any interrupt
     2159     * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
     2160     * further down, but VMXR0CheckPendingInterrupt makes that hard.)
     2161     *
     2162     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
     2163     *       shootdowns rely on this.
     2164     */
     2165    uOldEFlags = ASMIntDisableFlags();
    21592166    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    21602167    {
    2161         rc = VINF_EM_RAW_INTERRUPT_HYPER;
     2168        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     2169        rc = VINF_EM_RAW_INTERRUPT;
    21622170        goto end;
    21632171    }
     2172    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    21642173#endif
    21652174
     
    22482257        goto end;
    22492258
     2259#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    22502260    /* Disable interrupts to make sure a poke will interrupt execution.
    22512261     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
     
    22532263    uOldEFlags = ASMIntDisableFlags();
    22542264    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     2265#endif
    22552266
    22562267    /* Deal with tagged TLB setup and invalidation. */
     
    22932304    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    22942305    ASMSetFlags(uOldEFlags);
     2306#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     2307    uOldEFlags = ~(RTCCUINTREG)0;
     2308#endif
    22952309
    22962310    AssertMsg(!pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n", pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries));
     
    36833697    /* Just set the correct state here instead of trying to catch every goto above. */
    36843698    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
     3699
     3700#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     3701    /* Restore interrupts if we exitted after disabling them. */
     3702    if (uOldEFlags != ~(RTCCUINTREG)0)
     3703        ASMSetFlags(uOldEFlags);
     3704#endif
    36853705
    36863706    STAM_STATS({
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette