VirtualBox

Changeset 54065 in vbox


Ignore:
Timestamp:
Feb 3, 2015 10:45:39 AM (10 years ago)
Author:
vboxsync
Message:

VMM: Implemented TM TSC-mode switching with paravirtualized guests.

Location:
trunk
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/tm.h

    r53789 r54065  
    134134VMM_INT_DECL(uint64_t)  TMCpuTickGetNoCheck(PVMCPU pVCpu);
    135135VMM_INT_DECL(bool)      TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc);
    136 VMM_INT_DECL(uint64_t)  TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc, uint64_t *poffRealTSC);
     136VMM_INT_DECL(uint64_t)  TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc, bool *pfParavirtTsc);
    137137VMM_INT_DECL(int)       TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick);
    138138VMM_INT_DECL(int)       TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick);
     
    269269VMMR3_INT_DECL(void)    TMR3VirtualSyncFF(PVM pVM, PVMCPU pVCpu);
    270270VMMR3_INT_DECL(PRTTIMESPEC) TMR3UtcNow(PVM pVM, PRTTIMESPEC pTime);
     271
     272VMMR3_INT_DECL(int)     TMR3CpuTickParavirtEnable(PVM pVM);
     273VMMR3_INT_DECL(int)     TMR3CpuTickParavirtDisable(PVM pVM);
    271274/** @} */
    272275#endif /* IN_RING3 */
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r50387 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2013 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    25752575    return uHz;
    25762576}
     2577
  • trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp

    r53441 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    174174 * Record why we refused to use offsetted TSC.
    175175 *
    176  * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
     176 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
    177177 *
    178178 * @param   pVM         Pointer to the VM.
     
    213213 * @param   pVCpu           Pointer to the VMCPU.
    214214 * @param   poffRealTSC     The offset against the TSC of the current CPU.
    215  *                          Can be NULL.
    216  * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
    217  *                          not.
    218  * @thread EMT(pVCpu).
     215 * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
     216 *
     217 * @thread  EMT(pVCpu).
     218 * @see     TMCpuTickGetDeadlineAndTscOffset().
    219219 */
    220220VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
    221221{
    222222    PVM pVM = pVCpu->CTX_SUFF(pVM);
    223     bool fParavirtTsc = false;
     223    bool fOffsettedTsc = false;
    224224
    225225    /*
    226226     * We require:
    227      *     1. Use of a paravirtualized TSC is enabled by the guest.
    228      *     (OR)
    229227     *     1. A fixed TSC, this is checked at init time.
    230228     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     
    234232     *          c) we're not using warp drive (accelerated virtual guest time).
    235233     */
    236     *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM);
    237     if (    pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    238         &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
    239         &&  (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
    240              || (   !pVM->tm.s.fVirtualSyncCatchUp
    241                  && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
    242                  && !pVM->tm.s.fVirtualWarpDrive)))
    243     {
    244         if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
    245         {
    246             /* The source is the timer synchronous virtual clock. */
    247             if (poffRealTSC)
    248             {
    249                 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
    250                                 - pVCpu->tm.s.offTSCRawSrc;
    251                 /** @todo When we start collecting statistics on how much time we spend executing
    252                  * guest code before exiting, we should check this against the next virtual sync
    253                  * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
    254                  * the chance that we'll get interrupted right after the timer expired. */
    255                 *poffRealTSC = u64Now - ASMReadTSC();
    256             }
    257         }
    258         else if (poffRealTSC)
    259         {
    260             /* The source is the real TSC. */
    261             *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
    262         }
    263         /** @todo count this? */
    264         return true;
     234    Assert(pVCpu->tm.s.fTSCTicking);
     235    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
     236
     237    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     238    {
     239        /* The source is the real TSC. */
     240        *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
     241        return true;    /** @todo count this? */
     242    }
     243
     244    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
     245        && !pVM->tm.s.fVirtualSyncCatchUp
     246        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
     247        && !pVM->tm.s.fVirtualWarpDrive)
     248    {
     249        /* The source is the timer synchronous virtual clock. */
     250        uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
     251                        - pVCpu->tm.s.offTSCRawSrc;
     252        /** @todo When we start collecting statistics on how much time we spend executing
     253         * guest code before exiting, we should check this against the next virtual sync
     254         * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
     255         * the chance that we'll get interrupted right after the timer expired. */
     256        uint64_t u64TSC = ASMReadTSC();     /** @todo should be replaced with SUPReadTSC() eventually. */
     257        *poffRealTSC = u64Now - u64TSC;
     258        fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
     259        return true;    /** @todo count this? */
    265260    }
    266261
     
    304299 * @returns The number of host CPU clock ticks to the next timer deadline.
    305300 * @param   pVCpu           The current CPU.
    306  * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
    307  *                          not.
    308301 * @param   poffRealTSC     The offset against the TSC of the current CPU.
     302 * @param   pfOffsettedTsc  Where to store whether TSC offsetting can be used.
     303 * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
    309304 *
    310305 * @thread  EMT(pVCpu).
    311  * @remarks Superset of TMCpuTickCanUseRealTSC().
    312  */
    313 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,
    314                                                         uint64_t *poffRealTSC)
    315 {
    316     PVM         pVM = pVCpu->CTX_SUFF(pVM);
    317     uint64_t    cTicksToDeadline;
     306 * @see    TMCpuTickCanUseRealTSC().
     307 */
     308VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc,
     309                                                        bool *pfParavirtTsc)
     310{
     311    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     312    uint64_t cTicksToDeadline;
    318313
    319314    /*
    320315     * We require:
    321      *     1. Use of a paravirtualized TSC is enabled by the guest.
    322      *     (OR)
    323316     *     1. A fixed TSC, this is checked at init time.
    324317     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     
    328321     *          c) we're not using warp drive (accelerated virtual guest time).
    329322     */
    330     *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM);
    331     if (    pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    332         &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
    333         &&  (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
    334              || (   !pVM->tm.s.fVirtualSyncCatchUp
    335                  && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
    336                  && !pVM->tm.s.fVirtualWarpDrive)))
    337     {
     323    Assert(pVCpu->tm.s.fTSCTicking);
     324    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
     325
     326    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     327    {
     328        /* The source is the real TSC. */
     329        *poffRealTSC    = 0 - pVCpu->tm.s.offTSCRawSrc;
    338330        *pfOffsettedTsc = true;
    339         if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
    340         {
    341             /* The source is the timer synchronous virtual clock. */
    342             uint64_t cNsToDeadline;
    343             uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
    344             uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
    345                             ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
    346                             : u64NowVirtSync;
    347             u64Now -= pVCpu->tm.s.offTSCRawSrc;
    348             *poffRealTSC = u64Now - ASMReadTSC();
    349             cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
    350         }
    351         else
    352         {
    353             /* The source is the real TSC. */
    354             *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
    355             cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    356         }
    357     }
    358     else
    359     {
     331        cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
     332        return cTicksToDeadline;
     333    }
     334
     335    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
     336        && !pVM->tm.s.fVirtualSyncCatchUp
     337        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
     338        && !pVM->tm.s.fVirtualWarpDrive)
     339    {
     340        /* The source is the timer synchronous virtual clock. */
     341        uint64_t cNsToDeadline;
     342        uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
     343        uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
     344                        ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
     345                        : u64NowVirtSync;
     346        u64Now -= pVCpu->tm.s.offTSCRawSrc;
     347        *poffRealTSC     = u64Now - ASMReadTSC();        /** @todo replace with SUPReadTSC() eventually. */
     348        *pfOffsettedTsc  = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
     349        cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
     350        return cTicksToDeadline;
     351    }
     352
    360353#ifdef VBOX_WITH_STATISTICS
    361         tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
     354    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
    362355#endif
    363         *pfOffsettedTsc  = false;
    364         *poffRealTSC     = 0;
    365         cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    366     }
    367 
     356    *pfOffsettedTsc  = false;
     357    *poffRealTSC     = 0;
     358    cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    368359    return cTicksToDeadline;
    369360}
     
    395386        {
    396387            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
    397             pVCpu->tm.s.u64TSCLastSeen += 64;   /* @todo choose a good increment here */
     388            pVCpu->tm.s.u64TSCLastSeen += 64;   /** @todo choose a good increment here */
    398389            u64 = pVCpu->tm.s.u64TSCLastSeen;
    399390        }
     
    503494VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
    504495{
    505     /** @todo revisit this, not sure why we need to get the rate from GIP for
    506      *        real-tsc-offset. */
    507     if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     496    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
     497        && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
    508498    {
    509499        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r53631 r54065  
    55
    66/*
    7  * Copyright (C) 2013-2014 Oracle Corporation
     7 * Copyright (C) 2013-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    22572257static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu)
    22582258{
    2259     bool     fParavirtTsc = false;
     2259    bool fParavirtTsc;
     2260    bool fCanUseRealTsc;
    22602261    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2261     if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc))
    2262     {
    2263         uint64_t u64CurTSC   = ASMReadTSC();
    2264         uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu);
    2265 
    2266         if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    2267         {
    2268             pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
    2269             pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
    2270             STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    2271         }
    2272         else
    2273         {
    2274             pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    2275             pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    2276             STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
    2277         }
     2262    fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
     2263    if (fCanUseRealTsc)
     2264    {
     2265        pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
     2266        pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
     2267        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    22782268    }
    22792269    else
    22802270    {
    2281         Assert(!fParavirtTsc);
    22822271        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    22832272        pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    22842273        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    22852274    }
    2286 
     2275    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     2276
     2277    /** @todo later optimize this to be done elsewhere and not before every
     2278     *        VM-entry. */
    22872279    if (fParavirtTsc)
    22882280    {
     
    22912283        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
    22922284    }
    2293 
    2294     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    22952285}
    22962286
     
    32003190    }
    32013191
    3202     /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant
    3203      *        mode. */
    32043192    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
    3205         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
     3193        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);     /** @todo use SUPReadTSC() eventually. */
    32063194
    32073195    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     
    43574345    HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    43584346    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
     4347    if (rc != VINF_SUCCESS)
     4348        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3);
    43594349    return rc;
    43604350}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r54058 r54065  
    55
    66/*
    7  * Copyright (C) 2012-2014 Oracle Corporation
     7 * Copyright (C) 2012-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    56065606    if (pVM->hm.s.vmx.fUsePreemptTimer)
    56075607    {
    5608         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc,
    5609                                                                      &pVCpu->hm.s.vmx.u64TSCOffset);
     5608        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fOffsettedTsc,
     5609                                                                     &fParavirtTsc);
    56105610
    56115611        /* Make sure the returned values have sane upper and lower boundaries. */
     
    56165616
    56175617        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    5618         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);            AssertRC(rc);
     5618        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);        AssertRC(rc);
    56195619    }
    56205620    else
    56215621        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
    56225622
     5623    /** @todo later optimize this to be done elsewhere and not before every
     5624     *        VM-entry. */
    56235625    if (fParavirtTsc)
    56245626    {
     
    56305632    if (fOffsettedTsc)
    56315633    {
    5632         uint64_t u64CurTSC = ASMReadTSC();
    5633         if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    5634         {
    5635             /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    5636             rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);     AssertRC(rc);
    5637 
    5638             pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
    5639             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
    5640             STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    5641         }
    5642         else
    5643         {
    5644             /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
    5645             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
    5646             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
    5647             STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
    5648         }
     5634        /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
     5635        rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);     AssertRC(rc);
     5636
     5637        pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     5638        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
     5639        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    56495640    }
    56505641    else
     
    56525643        /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
    56535644        pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
    5654         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);                AssertRC(rc);
     5645        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
    56555646        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    56565647    }
     
    87138704    pVmxTransient->fVectoringDoublePF  = false;                 /* Vectoring double page-fault needs to be determined later. */
    87148705
    8715     /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant
    8716      *        mode. */
    87178706    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    8718         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
     8707        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);     /** @todo use SUPReadTSC() eventually. */
    87198708
    87208709    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     
    88398828        }
    88408829
    8841         /* Profiling the VM-exit. */
     8830        /* Profile the VM-exit. */
    88428831        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    88438832        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
     
    89228911            return VBOXSTRICTRC_TODO(rcStrict);
    89238912        }
    8924         /* Profiling the VM-exit. */
     8913
     8914        /* Profile the VM-exit. */
    89258915        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    89268916        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
     
    1046110451
    1046210452    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
     10453    if (rc != VINF_SUCCESS)
     10454        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3);
    1046310455    return rc;
    1046410456}
  • trunk/src/VBox/VMM/VMMR3/GIM.cpp

    r52767 r54065  
    55
    66/*
    7  * Copyright (C) 2014 Oracle Corporation
     7 * Copyright (C) 2014-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    275275    if (uVersion != GIM_SAVED_STATE_VERSION)
    276276        return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    277 
    278277
    279278    /** @todo Load per-CPU data. */
  • trunk/src/VBox/VMM/VMMR3/GIMHv.cpp

    r52768 r54065  
    55
    66/*
    7  * Copyright (C) 2014 Oracle Corporation
     7 * Copyright (C) 2014-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    586586        pRefTsc->u32TscSequence  = u32TscSeq;
    587587        pRefTsc->u64TscScale     = ((INT64_C(10000) << 32) / u64TscKHz) << 32;
    588 
    589         LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64)\n", GCPhysTscPage,
    590                 pRefTsc->u64TscScale, u64TscKHz, u64TscKHz));
     588        pRefTsc->i64TscOffset    = 0;
     589
     590        LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64) Seq=%#RU32\n",
     591                GCPhysTscPage, pRefTsc->u64TscScale, u64TscKHz, u64TscKHz, pRefTsc->u32TscSequence));
     592
     593        TMR3CpuTickParavirtEnable(pVM);
    591594        return VINF_SUCCESS;
    592595    }
     
    613616        Assert(!pRegion->fMapped);
    614617        LogRel(("GIM: HyperV: Disabled TSC-page\n"));
     618
     619        TMR3CpuTickParavirtDisable(pVM);
    615620        return VINF_SUCCESS;
    616621    }
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r53414 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    687687        HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt,                "/HM/CPU%d/Exit/Instr/Int", "Guest attempted to execute INT.");
    688688        HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt,                "/HM/CPU%d/Exit/Instr/Hlt", "Guest attempted to execute HLT.");
     689        HM_REG_COUNTER(&pVCpu->hm.s.StatExitHltToR3,            "/HM/CPU%d/Exit/HltToR3", "HLT causing us to go to ring-3.");
    689690        HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess,         "/HM/CPU%d/Exit/Instr/XdtrAccess", "Guest attempted to access descriptor table register (GDTR, IDTR, LDTR).");
    690691        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite,            "/HM/CPU%d/Exit/IO/Write", "I/O write.");
     
    733734        HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush,      "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
    734735
    735         HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffsetAdjusted,      "/HM/CPU%d/TSC/OffsetAdjusted", "TSC offset overflowed for paravirt. TSC. Fudged.");
    736736        HM_REG_COUNTER(&pVCpu->hm.s.StatTscParavirt,            "/HM/CPU%d/TSC/Paravirt", "Paravirtualized TSC in effect.");
    737737        HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset,              "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect.");
    738738        HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept,           "/HM/CPU%d/TSC/Intercept", "Intercept TSC accesses.");
    739         HM_REG_COUNTER(&pVCpu->hm.s.StatTscInterceptOverFlow,   "/HM/CPU%d/TSC/InterceptOverflow", "TSC offset overflow, fallback to intercept TSC accesses.");
    740739
    741740        HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed,               "/HM/CPU%d/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
  • trunk/src/VBox/VMM/VMMR3/TM.cpp

    r53721 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    126126#include <VBox/vmm/mm.h>
    127127#include <VBox/vmm/hm.h>
     128#include <VBox/vmm/gim.h>
    128129#include <VBox/vmm/ssm.h>
    129130#include <VBox/vmm/dbgf.h>
     
    181182static DECLCALLBACK(void)   tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    182183static DECLCALLBACK(void)   tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     184static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpu, void *pvData);
    183185
    184186
     
    331333    rc = CFGMR3ValidateConfig(pCfgHandle, "/TM/",
    332334                              "TSCMode|"
     335                              "TSCModeSwitchAllowed|"
    333336                              "TSCTicksPerSecond|"
    334337                              "TSCTiedToExecution|"
     
    378381    }
    379382    else if (RT_FAILURE(rc))
    380         return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \"Mode\""));
     383        return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \"TSCMode\""));
    381384    else
    382385    {
     
    388391            pVM->tm.s.enmTSCMode = TMTSCMODE_DYNAMIC;
    389392        else
    390             return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM mode value \"%s\""), szTSCMode);
    391     }
     393            return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM TSC mode value \"%s\""), szTSCMode);
     394    }
     395
     396    /**
     397     * @cfgm{/TM/TSCModeSwitchAllowed, bool, Whether TM TSC mode switch is allowed
     398     *      at runtime}
     399     * When using paravirtualized guests, we dynamically switch TSC modes to a more
     400     * optimal one for performance. This setting allows overriding this behaviour.
     401     */
     402    rc = CFGMR3QueryBool(pCfgHandle, "TSCModeSwitchAllowed", &pVM->tm.s.fTSCModeSwitchAllowed);
     403    if (rc == VERR_CFGM_VALUE_NOT_FOUND)
     404    {
     405        /* This is finally determined in TMR3InitFinalize() as GIM isn't initialized yet. */
     406        pVM->tm.s.fTSCModeSwitchAllowed = true;
     407    }
     408    else if (RT_FAILURE(rc))
     409        return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying bool value \"TSCModeSwitchAllowed\""));
    392410
    393411    /** @cfgm{/TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP}
     
    548566    }
    549567
    550     /* Setup and report */
     568    /*
     569     * Gather the Host Hz configuration values.
     570     */
     571    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzMax", &pVM->tm.s.cHostHzMax, 20000);
     572    if (RT_FAILURE(rc))
     573        return VMSetError(pVM, rc, RT_SRC_POS,
     574                          N_("Configuration error: Failed to querying uint32_t value \"HostHzMax\""));
     575
     576    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorTimerCpu", &pVM->tm.s.cPctHostHzFudgeFactorTimerCpu, 111);
     577    if (RT_FAILURE(rc))
     578        return VMSetError(pVM, rc, RT_SRC_POS,
     579                          N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorTimerCpu\""));
     580
     581    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorOtherCpu", &pVM->tm.s.cPctHostHzFudgeFactorOtherCpu, 110);
     582    if (RT_FAILURE(rc))
     583        return VMSetError(pVM, rc, RT_SRC_POS,
     584                          N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorOtherCpu\""));
     585
     586    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp100", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp100, 300);
     587    if (RT_FAILURE(rc))
     588        return VMSetError(pVM, rc, RT_SRC_POS,
     589                          N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp100\""));
     590
     591    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp200", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp200, 250);
     592    if (RT_FAILURE(rc))
     593        return VMSetError(pVM, rc, RT_SRC_POS,
     594                          N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp200\""));
     595
     596    rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp400", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp400, 200);
     597    if (RT_FAILURE(rc))
     598        return VMSetError(pVM, rc, RT_SRC_POS,
     599                          N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp400\""));
     600
     601    /*
     602     * Finally, setup and report.
     603     */
     604    pVM->tm.s.enmOriginalTSCMode = pVM->tm.s.enmTSCMode;
    551605    CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD);
    552606    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) enmTSCMode=%d (%s)\n"
     
    556610
    557611    /*
    558      * Gather the Host Hz configuration values.
    559      */
    560     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzMax", &pVM->tm.s.cHostHzMax, 20000);
    561     if (RT_FAILURE(rc))
    562         return VMSetError(pVM, rc, RT_SRC_POS,
    563                           N_("Configuration error: Failed to querying uint32_t value \"HostHzMax\""));
    564 
    565     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorTimerCpu", &pVM->tm.s.cPctHostHzFudgeFactorTimerCpu, 111);
    566     if (RT_FAILURE(rc))
    567         return VMSetError(pVM, rc, RT_SRC_POS,
    568                           N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorTimerCpu\""));
    569 
    570     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorOtherCpu", &pVM->tm.s.cPctHostHzFudgeFactorOtherCpu, 110);
    571     if (RT_FAILURE(rc))
    572         return VMSetError(pVM, rc, RT_SRC_POS,
    573                           N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorOtherCpu\""));
    574 
    575     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp100", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp100, 300);
    576     if (RT_FAILURE(rc))
    577         return VMSetError(pVM, rc, RT_SRC_POS,
    578                           N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp100\""));
    579 
    580     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp200", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp200, 250);
    581     if (RT_FAILURE(rc))
    582         return VMSetError(pVM, rc, RT_SRC_POS,
    583                           N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp200\""));
    584 
    585     rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp400", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp400, 200);
    586     if (RT_FAILURE(rc))
    587         return VMSetError(pVM, rc, RT_SRC_POS,
    588                           N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp400\""));
     612     * Dump the GIPCPU TSC-deltas, iterate using the Apic Id to get master at the beginning in most cases.
     613     */
     614    unsigned cGipCpus = RT_ELEMENTS(g_pSUPGlobalInfoPage->aiCpuFromApicId);
     615    for (unsigned i = 0; i < cGipCpus; i++)
     616    {
     617        uint16_t iCpu  = g_pSUPGlobalInfoPage->aiCpuFromApicId[i];
     618#if 1
     619        if (iCpu != UINT16_MAX)
     620            LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%RI64\n", g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu,
     621                    g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic, g_pSUPGlobalInfoPage->aCPUs[iCpu].i64TSCDelta));
     622#else
     623        /* Dump 2 entries per line, saves vertical space in release log but more dumps bytes due to formatting. */
     624        uint16_t iCpu2 = UINT16_MAX;
     625        for (unsigned k = i + 1; k < cGipCpus; k++)
     626        {
     627            iCpu2 = g_pSUPGlobalInfoPage->aiCpuFromApicId[k];
     628            if (iCpu2 != UINT16_MAX)
     629            {
     630                i = k + 1;
     631                break;
     632            }
     633        }
     634        if (   iCpu  != UINT16_MAX
     635            && iCpu2 != UINT16_MAX)
     636        {
     637            LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%-4lld CPU[%d]: idApic=%d i64TSCDelta=%lld\n",
     638                    g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu, g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic,
     639                    g_pSUPGlobalInfoPage->aCPUs[iCpu].i64TSCDelta, g_pSUPGlobalInfoPage->aCPUs[iCpu2].idCpu,
     640                    g_pSUPGlobalInfoPage->aCPUs[iCpu2].idApic, g_pSUPGlobalInfoPage->aCPUs[iCpu2].i64TSCDelta));
     641        }
     642        else if (iCpu != UINT16_MAX)
     643            LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%lld\n", g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu,
     644                    g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic));
     645#endif
     646    }
    589647
    590648    /*
     
    10271085#endif
    10281086
     1087    /*
     1088     * GIM is now initialized. Determine if TSC mode switching is allowed (respecting CFGM override).
     1089     */
     1090    pVM->tm.s.fTSCModeSwitchAllowed &= GIMIsEnabled(pVM) && HMIsEnabled(pVM);
    10291091    return rc;
    10301092}
     
    11531215    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    11541216    VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /** @todo FIXME: this isn't right. */
     1217
     1218    /*
     1219     * Switch TM TSC mode back to the original mode after a reset for
     1220     * paravirtualized guests that alter the TM TSC mode during operation.
     1221     */
     1222    if (   pVM->tm.s.fTSCModeSwitchAllowed
     1223        && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
     1224    {
     1225        bool fParavirtTSC = false;
     1226        tmR3CpuTickParavirtToggle(pVM, NULL /* pVCpuEmt */, &fParavirtTSC);
     1227    }
     1228    Assert(!GIMIsParavirtTscEnabled(pVM));
     1229    pVM->tm.s.fParavirtTscEnabled = false;
     1230
    11551231    TM_UNLOCK_TIMERS(pVM);
    11561232}
     
    30553131#endif /* !VBOX_WITHOUT_NS_ACCOUNTING */
    30563132
     3133
     3134/**
     3135 * Switch TM TSC mode to the most appropriate/efficient one.
     3136 *
     3137 * @returns strict VBox status code.
     3138 * @param   pVM         Pointer to the VM.
     3139 * @param   pVCpuEmt    Pointer to the VMCPU it's called on, can be NULL.
     3140 * @param   pvData      Opaque pointer to whether usage of paravirt. TSC is
     3141 *                      enabled or disabled by the guest OS.
     3142 *
     3143 * @thread  EMT.
     3144 * @remarks Must only be called during an EMTs rendezvous.
     3145 */
     3146static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
     3147{
     3148    Assert(pVM);
     3149    Assert(pvData);
     3150    Assert(pVM->tm.s.fTSCModeSwitchAllowed);
     3151    NOREF(pVCpuEmt);
     3152
     3153    bool *pfEnable = (bool *)pvData;
     3154    if (*pfEnable)
     3155    {
     3156        if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
     3157        {
     3158            uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM);
     3159            uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
     3160            uint32_t cCpus  = pVM->cCpus;
     3161            uint64_t u64RealTSC = ASMReadTSC();
     3162            for (uint32_t i = 0; i < cCpus; i++)
     3163            {
     3164                PVMCPU   pVCpu = &pVM->aCpus[i];
     3165                uint64_t u64TickOld = u64Now - pVCpu->tm.s.offTSCRawSrc;
     3166
     3167                /*
     3168                 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must
     3169                 * remain constant across the TM TSC mode-switch.
     3170                 * OldTick = VrSync - CurOff
     3171                 * NewTick = RealTsc - NewOff
     3172                 * NewTick = OldTick
     3173                 *  => RealTsc - NewOff = VrSync - CurOff
     3174                 *  => NewOff = CurOff + RealTsc - VrSync
     3175                 */
     3176                pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64RealTSC  - u64Now;
     3177
     3178                /* If the new offset results in the TSC going backwards, re-adjust the offset. */
     3179                if (u64RealTSC - pVCpu->tm.s.offTSCRawSrc < u64TickOld)
     3180                    pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64RealTSC;
     3181                Assert(u64RealTSC - pVCpu->tm.s.offTSCRawSrc >= u64TickOld);
     3182            }
     3183            pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET;
     3184            LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
     3185        }
     3186    }
     3187    else
     3188    {
     3189        if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
     3190            && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
     3191        {
     3192            uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM);
     3193            uint64_t u64Now     = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
     3194            uint64_t u64RealTSC = ASMReadTSC();          /** @todo replace with SUPReadTSC() eventually. */
     3195            uint32_t cCpus      = pVM->cCpus;
     3196            for (uint32_t i = 0; i < cCpus; i++)
     3197            {
     3198                PVMCPU   pVCpu      = &pVM->aCpus[i];
     3199                uint64_t u64TickOld = u64RealTSC - pVCpu->tm.s.offTSCRawSrc;
     3200
     3201                /* Update the last-seen tick here as we havent't been updating it (as we don't
     3202                   need it) while in pure TSC-offsetting mode. */
     3203                pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC;
     3204
     3205                /*
     3206                 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must
     3207                 * remain constant across the TM TSC mode-switch.
     3208                 * OldTick = RealTsc - CurOff
     3209                 * NewTick = VrSync - NewOff
     3210                 * NewTick = OldTick
     3211                 *  => VrSync - NewOff = RealTsc - CurOff
     3212                 *  => NewOff = CurOff + VrSync - RealTsc
     3213                 */
     3214                pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64Now - u64RealTSC;
     3215
     3216                /* If the new offset results in the TSC going backwards, re-adjust the offset. */
     3217                if (u64Now - pVCpu->tm.s.offTSCRawSrc < u64TickOld)
     3218                    pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64Now;
     3219                Assert(u64Now - pVCpu->tm.s.offTSCRawSrc >= u64TickOld);
     3220            }
     3221            pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode;
     3222            LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
     3223        }
     3224    }
     3225    return VINF_SUCCESS;
     3226}
     3227
     3228
     3229/**
     3230 * Notify TM that the guest has enabled usage of a paravirtualized TSC.
     3231 *
     3232 * @returns VBox status code.
     3233 * @param   pVM     Pointer to the VM.
     3234 */
     3235VMMR3_INT_DECL(int) TMR3CpuTickParavirtEnable(PVM pVM)
     3236{
     3237    int rc = VINF_SUCCESS;
     3238    if (pVM->tm.s.fTSCModeSwitchAllowed)
     3239    {
     3240        bool fEnable = true;
     3241        rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable);
     3242    }
     3243    pVM->tm.s.fParavirtTscEnabled = true;
     3244    return rc;
     3245}
     3246
     3247
     3248/**
     3249 * Notify TM that the guest has disabled usage of a paravirtualized TSC.
     3250 *
     3251 * @returns VBox status code.
     3252 * @param   pVM     Pointer to the VM.
     3253 */
     3254VMMR3_INT_DECL(int) TMR3CpuTickParavirtDisable(PVM pVM)
     3255{
     3256    int rc = VINF_SUCCESS;
     3257    if (pVM->tm.s.fTSCModeSwitchAllowed)
     3258    {
     3259        bool fEnable = false;
     3260        rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable);
     3261    }
     3262    pVM->tm.s.fParavirtTscEnabled = false;
     3263    return rc;
     3264}
     3265
     3266
    30573267/**
    30583268 * Gets the 5 char clock name for the info tables.
     
    32543464        case TMTSCMODE_VIRT_TSC_EMULATED:  return "VirtTscEmulated";
    32553465        case TMTSCMODE_DYNAMIC:            return "Dynamic";
    3256         default:                           return "?????";
    3257     }
    3258 }
    3259 
     3466        default:                           return "???";
     3467    }
     3468}
     3469
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r52699 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    27972797        CSAMR3Reset(pVM);
    27982798#endif
    2799         GIMR3Reset(pVM);                /* This must come *before* PDM. */
     2799        GIMR3Reset(pVM);                /* This must come *before* PDM and TM. */
    28002800        PDMR3Reset(pVM);
    28012801        PGMR3Reset(pVM);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r52766 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    896896    STAMCOUNTER             StatExitXdtrAccess;
    897897    STAMCOUNTER             StatExitHlt;
     898    STAMCOUNTER             StatExitHltToR3;
    898899    STAMCOUNTER             StatExitMwait;
    899900    STAMCOUNTER             StatExitMonitor;
     
    938939    STAMCOUNTER             StatSwitchLongJmpToR3;
    939940
    940     STAMCOUNTER             StatTscOffsetAdjusted;
    941941    STAMCOUNTER             StatTscParavirt;
    942942    STAMCOUNTER             StatTscOffset;
    943943    STAMCOUNTER             StatTscIntercept;
    944     STAMCOUNTER             StatTscInterceptOverFlow;
    945944
    946945    STAMCOUNTER             StatExitReasonNpf;
  • trunk/src/VBox/VMM/include/TMInternal.h

    r53441 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    324324typedef enum TMTSCMODE
    325325{
    326     /** The guest TSC is an emulated virtual TSC. */
     326    /** The guest TSC is an emulated, virtual TSC. */
    327327    TMTSCMODE_VIRT_TSC_EMULATED = 1,
    328328    /** The guest TSC is an offset of the real TSC. */
    329329    TMTSCMODE_REAL_TSC_OFFSET,
    330     /** The guest TSC is dynamically derived through emulation or offsetting. */
     330    /** The guest TSC is dynamically derived through emulating or offsetting. */
    331331    TMTSCMODE_DYNAMIC
    332332} TMTSCMODE;
     
    355355     *  Config variable: Mode (string). */
    356356    TMTSCMODE                   enmTSCMode;
     357    /** The original TSC mode of the VM. */
     358    TMTSCMODE                   enmOriginalTSCMode;
     359    /** Alignment padding. */
     360    uint32_t                    u32Alignment0;
    357361    /** Whether the TSC is tied to the execution of code.
    358362     * Config variable: TSCTiedToExecution (bool) */
     
    361365     * Config variable: TSCNotTiedToHalt (bool) */
    362366    bool                        fTSCNotTiedToHalt;
    363     /** Alignment padding. */
    364     bool                        afAlignment0[2];
     367    /** Whether TM TSC mode switching is allowed at runtime. */
     368    bool                        fTSCModeSwitchAllowed;
     369    /** Whether the guest has enabled use of paravirtualized TSC. */
     370    bool                        fParavirtTscEnabled;
    365371    /** The ID of the virtual CPU that normally runs the timers. */
    366372    VMCPUID                     idTimerCpu;
     
    685691    bool                        afAlignment0[3]; /**< alignment padding */
    686692
    687     /** The offset between the raw TSC source and the Guest TSC.
    688      * Only valid if fTicking is set and and fTSCUseRealTSC is clear. */
     693    /** The offset between the host tick (TSC/virtual depending on the TSC mode) and
     694     *  the guest tick. */
    689695    uint64_t                    offTSCRawSrc;
    690696
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette