Changeset 54065 in vbox
- Timestamp:
- Feb 3, 2015 10:45:39 AM (10 years ago)
- Location:
- trunk
- Files:
-
- 12 edited
-
include/VBox/vmm/tm.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/TMAll.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/TMAllCpu.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMR3/GIM.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/GIMHv.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/HM.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/TM.cpp (modified) (12 diffs)
-
src/VBox/VMM/VMMR3/VM.cpp (modified) (2 diffs)
-
src/VBox/VMM/include/HMInternal.h (modified) (3 diffs)
-
src/VBox/VMM/include/TMInternal.h (modified) (5 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/tm.h
r53789 r54065 134 134 VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu); 135 135 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc); 136 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc, uint64_t *poffRealTSC);136 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc, bool *pfParavirtTsc); 137 137 VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick); 138 138 VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick); … … 269 269 VMMR3_INT_DECL(void) TMR3VirtualSyncFF(PVM pVM, PVMCPU pVCpu); 270 270 VMMR3_INT_DECL(PRTTIMESPEC) TMR3UtcNow(PVM pVM, PRTTIMESPEC pTime); 271 272 VMMR3_INT_DECL(int) TMR3CpuTickParavirtEnable(PVM pVM); 273 VMMR3_INT_DECL(int) TMR3CpuTickParavirtDisable(PVM pVM); 271 274 /** @} */ 272 275 #endif /* IN_RING3 */ -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r50387 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 3Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 2575 2575 return uHz; 2576 2576 } 2577 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r53441 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 174 174 * Record why we refused to use offsetted TSC. 175 175 * 176 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.176 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset(). 177 177 * 178 178 * @param pVM Pointer to the VM. … … 213 213 * @param pVCpu Pointer to the VMCPU. 214 214 * @param poffRealTSC The offset against the TSC of the current CPU. 215 * Can be NULL.216 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or217 * not.218 * @ thread EMT(pVCpu).215 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 216 * 217 * @thread EMT(pVCpu). 218 * @see TMCpuTickGetDeadlineAndTscOffset(). 219 219 */ 220 220 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc) 221 221 { 222 222 PVM pVM = pVCpu->CTX_SUFF(pVM); 223 bool f ParavirtTsc = false;223 bool fOffsettedTsc = false; 224 224 225 225 /* 226 226 * We require: 227 * 1. Use of a paravirtualized TSC is enabled by the guest.228 * (OR)229 227 * 1. A fixed TSC, this is checked at init time. 230 228 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 234 232 * c) we're not using warp drive (accelerated virtual guest time). 235 233 */ 236 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 237 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 238 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 239 && ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 240 || ( !pVM->tm.s.fVirtualSyncCatchUp 241 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 242 && !pVM->tm.s.fVirtualWarpDrive))) 243 { 244 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 245 { 246 /* The source is the timer synchronous virtual clock. */ 247 if (poffRealTSC) 248 { 249 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 250 - pVCpu->tm.s.offTSCRawSrc; 251 /** @todo When we start collecting statistics on how much time we spend executing 252 * guest code before exiting, we should check this against the next virtual sync 253 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 254 * the chance that we'll get interrupted right after the timer expired. */ 255 *poffRealTSC = u64Now - ASMReadTSC(); 256 } 257 } 258 else if (poffRealTSC) 259 { 260 /* The source is the real TSC. */ 261 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 262 } 263 /** @todo count this? */ 264 return true; 234 Assert(pVCpu->tm.s.fTSCTicking); 235 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 236 237 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 238 { 239 /* The source is the real TSC. */ 240 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 241 return true; /** @todo count this? */ 242 } 243 244 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 245 && !pVM->tm.s.fVirtualSyncCatchUp 246 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 247 && !pVM->tm.s.fVirtualWarpDrive) 248 { 249 /* The source is the timer synchronous virtual clock. */ 250 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 251 - pVCpu->tm.s.offTSCRawSrc; 252 /** @todo When we start collecting statistics on how much time we spend executing 253 * guest code before exiting, we should check this against the next virtual sync 254 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 255 * the chance that we'll get interrupted right after the timer expired. */ 256 uint64_t u64TSC = ASMReadTSC(); /** @todo should be replaced with SUPReadTSC() eventually. */ 257 *poffRealTSC = u64Now - u64TSC; 258 fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 259 return true; /** @todo count this? */ 265 260 } 266 261 … … 304 299 * @returns The number of host CPU clock ticks to the next timer deadline. 305 300 * @param pVCpu The current CPU. 306 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or307 * not.308 301 * @param poffRealTSC The offset against the TSC of the current CPU. 302 * @param pfOffsettedTsc Where to store whether TSC offsetting can be used. 303 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 309 304 * 310 305 * @thread EMT(pVCpu). 311 * @ remarks Superset ofTMCpuTickCanUseRealTSC().312 */ 313 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,314 uint64_t *poffRealTSC)315 { 316 PVM pVM = pVCpu->CTX_SUFF(pVM);317 uint64_t cTicksToDeadline;306 * @see TMCpuTickCanUseRealTSC(). 307 */ 308 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc, 309 bool *pfParavirtTsc) 310 { 311 PVM pVM = pVCpu->CTX_SUFF(pVM); 312 uint64_t cTicksToDeadline; 318 313 319 314 /* 320 315 * We require: 321 * 1. Use of a paravirtualized TSC is enabled by the guest.322 * (OR)323 316 * 1. A fixed TSC, this is checked at init time. 324 317 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 328 321 * c) we're not using warp drive (accelerated virtual guest time). 329 322 */ 330 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 331 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 332 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 333 && ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 334 || ( !pVM->tm.s.fVirtualSyncCatchUp 335 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 336 && !pVM->tm.s.fVirtualWarpDrive))) 337 { 323 Assert(pVCpu->tm.s.fTSCTicking); 324 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 325 326 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 327 { 328 /* The source is the real TSC. */ 329 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 338 330 *pfOffsettedTsc = true; 339 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 340 { 341 /* The source is the timer synchronous virtual clock. */ 342 uint64_t cNsToDeadline; 343 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); 344 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ 345 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) 346 : u64NowVirtSync; 347 u64Now -= pVCpu->tm.s.offTSCRawSrc; 348 *poffRealTSC = u64Now - ASMReadTSC(); 349 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 350 } 351 else 352 { 353 /* The source is the real TSC. */ 354 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 355 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 356 } 357 } 358 else 359 { 331 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 332 return cTicksToDeadline; 333 } 334 335 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 336 && !pVM->tm.s.fVirtualSyncCatchUp 337 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 338 && !pVM->tm.s.fVirtualWarpDrive) 339 { 340 /* The source is the timer synchronous virtual clock. */ 341 uint64_t cNsToDeadline; 342 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); 343 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ 344 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) 345 : u64NowVirtSync; 346 u64Now -= pVCpu->tm.s.offTSCRawSrc; 347 *poffRealTSC = u64Now - ASMReadTSC(); /** @todo replace with SUPReadTSC() eventually. */ 348 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 349 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 350 return cTicksToDeadline; 351 } 352 360 353 #ifdef VBOX_WITH_STATISTICS 361 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);354 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 362 355 #endif 363 *pfOffsettedTsc = false; 364 *poffRealTSC = 0; 365 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 366 } 367 356 *pfOffsettedTsc = false; 357 *poffRealTSC = 0; 358 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 368 359 return cTicksToDeadline; 369 360 } … … 395 386 { 396 387 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); 397 pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */388 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */ 398 389 u64 = pVCpu->tm.s.u64TSCLastSeen; 399 390 } … … 503 494 VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM) 504 495 { 505 /** @todo revisit this, not sure why we need to get the rate from GIP for 506 * real-tsc-offset. */ 507 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 496 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 497 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC) 508 498 { 509 499 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r53631 r54065 5 5 6 6 /* 7 * Copyright (C) 2013-201 4Oracle Corporation7 * Copyright (C) 2013-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 2257 2257 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu) 2258 2258 { 2259 bool fParavirtTsc = false; 2259 bool fParavirtTsc; 2260 bool fCanUseRealTsc; 2260 2261 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2261 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc)) 2262 { 2263 uint64_t u64CurTSC = ASMReadTSC(); 2264 uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu); 2265 2266 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2267 { 2268 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 2269 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 2270 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2271 } 2272 else 2273 { 2274 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 2275 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 2276 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow); 2277 } 2262 fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc); 2263 if (fCanUseRealTsc) 2264 { 2265 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 2266 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 2267 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2278 2268 } 2279 2269 else 2280 2270 { 2281 Assert(!fParavirtTsc);2282 2271 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 2283 2272 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 2284 2273 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2285 2274 } 2286 2275 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2276 2277 /** @todo later optimize this to be done elsewhere and not before every 2278 * VM-entry. */ 2287 2279 if (fParavirtTsc) 2288 2280 { … … 2291 2283 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 2292 2284 } 2293 2294 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;2295 2285 } 2296 2286 … … 3200 3190 } 3201 3191 3202 /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant3203 * mode. */3204 3192 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 3205 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); 3193 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */ 3206 3194 3207 3195 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); … … 4357 4345 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 4358 4346 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 4347 if (rc != VINF_SUCCESS) 4348 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3); 4359 4349 return rc; 4360 4350 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r54058 r54065 5 5 6 6 /* 7 * Copyright (C) 2012-201 4Oracle Corporation7 * Copyright (C) 2012-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 5606 5606 if (pVM->hm.s.vmx.fUsePreemptTimer) 5607 5607 { 5608 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, & fOffsettedTsc, &fParavirtTsc,5609 & pVCpu->hm.s.vmx.u64TSCOffset);5608 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fOffsettedTsc, 5609 &fParavirtTsc); 5610 5610 5611 5611 /* Make sure the returned values have sane upper and lower boundaries. */ … … 5616 5616 5617 5617 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 5618 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);5618 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc); 5619 5619 } 5620 5620 else 5621 5621 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc); 5622 5622 5623 /** @todo later optimize this to be done elsewhere and not before every 5624 * VM-entry. */ 5623 5625 if (fParavirtTsc) 5624 5626 { … … 5630 5632 if (fOffsettedTsc) 5631 5633 { 5632 uint64_t u64CurTSC = ASMReadTSC(); 5633 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 5634 { 5635 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5636 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc); 5637 5638 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5639 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5640 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 5641 } 5642 else 5643 { 5644 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */ 5645 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5646 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5647 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow); 5648 } 5634 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5635 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc); 5636 5637 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5638 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5639 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 5649 5640 } 5650 5641 else … … 5652 5643 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */ 5653 5644 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5654 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);5645 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5655 5646 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 5656 5647 } … … 8713 8704 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 8714 8705 8715 /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant8716 * mode. */8717 8706 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 8718 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); 8707 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */ 8719 8708 8720 8709 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); … … 8839 8828 } 8840 8829 8841 /* Profil ingthe VM-exit. */8830 /* Profile the VM-exit. */ 8842 8831 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8843 8832 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 8922 8911 return VBOXSTRICTRC_TODO(rcStrict); 8923 8912 } 8924 /* Profiling the VM-exit. */ 8913 8914 /* Profile the VM-exit. */ 8925 8915 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8926 8916 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 10461 10451 10462 10452 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 10453 if (rc != VINF_SUCCESS) 10454 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3); 10463 10455 return rc; 10464 10456 } -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r52767 r54065 5 5 6 6 /* 7 * Copyright (C) 2014 Oracle Corporation7 * Copyright (C) 2014-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 275 275 if (uVersion != GIM_SAVED_STATE_VERSION) 276 276 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; 277 278 277 279 278 /** @todo Load per-CPU data. */ -
trunk/src/VBox/VMM/VMMR3/GIMHv.cpp
r52768 r54065 5 5 6 6 /* 7 * Copyright (C) 2014 Oracle Corporation7 * Copyright (C) 2014-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 586 586 pRefTsc->u32TscSequence = u32TscSeq; 587 587 pRefTsc->u64TscScale = ((INT64_C(10000) << 32) / u64TscKHz) << 32; 588 589 LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64)\n", GCPhysTscPage, 590 pRefTsc->u64TscScale, u64TscKHz, u64TscKHz)); 588 pRefTsc->i64TscOffset = 0; 589 590 LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64) Seq=%#RU32\n", 591 GCPhysTscPage, pRefTsc->u64TscScale, u64TscKHz, u64TscKHz, pRefTsc->u32TscSequence)); 592 593 TMR3CpuTickParavirtEnable(pVM); 591 594 return VINF_SUCCESS; 592 595 } … … 613 616 Assert(!pRegion->fMapped); 614 617 LogRel(("GIM: HyperV: Disabled TSC-page\n")); 618 619 TMR3CpuTickParavirtDisable(pVM); 615 620 return VINF_SUCCESS; 616 621 } -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r53414 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 687 687 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "Guest attempted to execute INT."); 688 688 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "Guest attempted to execute HLT."); 689 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHltToR3, "/HM/CPU%d/Exit/HltToR3", "HLT causing us to go to ring-3."); 689 690 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "Guest attempted to access descriptor table register (GDTR, IDTR, LDTR)."); 690 691 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write", "I/O write."); … … 733 734 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB."); 734 735 735 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffsetAdjusted, "/HM/CPU%d/TSC/OffsetAdjusted", "TSC offset overflowed for paravirt. TSC. Fudged.");736 736 HM_REG_COUNTER(&pVCpu->hm.s.StatTscParavirt, "/HM/CPU%d/TSC/Paravirt", "Paravirtualized TSC in effect."); 737 737 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect."); 738 738 HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept", "Intercept TSC accesses."); 739 HM_REG_COUNTER(&pVCpu->hm.s.StatTscInterceptOverFlow, "/HM/CPU%d/TSC/InterceptOverflow", "TSC offset overflow, fallback to intercept TSC accesses.");740 739 741 740 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed", "Loaded guest-debug state while loading guest-state."); -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r53721 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 126 126 #include <VBox/vmm/mm.h> 127 127 #include <VBox/vmm/hm.h> 128 #include <VBox/vmm/gim.h> 128 129 #include <VBox/vmm/ssm.h> 129 130 #include <VBox/vmm/dbgf.h> … … 181 182 static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 182 183 static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 184 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpu, void *pvData); 183 185 184 186 … … 331 333 rc = CFGMR3ValidateConfig(pCfgHandle, "/TM/", 332 334 "TSCMode|" 335 "TSCModeSwitchAllowed|" 333 336 "TSCTicksPerSecond|" 334 337 "TSCTiedToExecution|" … … 378 381 } 379 382 else if (RT_FAILURE(rc)) 380 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \" Mode\""));383 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \"TSCMode\"")); 381 384 else 382 385 { … … 388 391 pVM->tm.s.enmTSCMode = TMTSCMODE_DYNAMIC; 389 392 else 390 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM mode value \"%s\""), szTSCMode); 391 } 393 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM TSC mode value \"%s\""), szTSCMode); 394 } 395 396 /** 397 * @cfgm{/TM/TSCModeSwitchAllowed, bool, Whether TM TSC mode switch is allowed 398 * at runtime} 399 * When using paravirtualized guests, we dynamically switch TSC modes to a more 400 * optimal one for performance. This setting allows overriding this behaviour. 401 */ 402 rc = CFGMR3QueryBool(pCfgHandle, "TSCModeSwitchAllowed", &pVM->tm.s.fTSCModeSwitchAllowed); 403 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 404 { 405 /* This is finally determined in TMR3InitFinalize() as GIM isn't initialized yet. */ 406 pVM->tm.s.fTSCModeSwitchAllowed = true; 407 } 408 else if (RT_FAILURE(rc)) 409 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying bool value \"TSCModeSwitchAllowed\"")); 392 410 393 411 /** @cfgm{/TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP} … … 548 566 } 549 567 550 /* Setup and report */ 568 /* 569 * Gather the Host Hz configuration values. 570 */ 571 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzMax", &pVM->tm.s.cHostHzMax, 20000); 572 if (RT_FAILURE(rc)) 573 return VMSetError(pVM, rc, RT_SRC_POS, 574 N_("Configuration error: Failed to querying uint32_t value \"HostHzMax\"")); 575 576 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorTimerCpu", &pVM->tm.s.cPctHostHzFudgeFactorTimerCpu, 111); 577 if (RT_FAILURE(rc)) 578 return VMSetError(pVM, rc, RT_SRC_POS, 579 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorTimerCpu\"")); 580 581 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorOtherCpu", &pVM->tm.s.cPctHostHzFudgeFactorOtherCpu, 110); 582 if (RT_FAILURE(rc)) 583 return VMSetError(pVM, rc, RT_SRC_POS, 584 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorOtherCpu\"")); 585 586 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp100", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp100, 300); 587 if (RT_FAILURE(rc)) 588 return VMSetError(pVM, rc, RT_SRC_POS, 589 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp100\"")); 590 591 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp200", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp200, 250); 592 if (RT_FAILURE(rc)) 593 return VMSetError(pVM, rc, RT_SRC_POS, 594 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp200\"")); 595 596 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp400", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp400, 200); 597 if (RT_FAILURE(rc)) 598 return VMSetError(pVM, rc, RT_SRC_POS, 599 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp400\"")); 600 601 /* 602 * Finally, setup and report. 603 */ 604 pVM->tm.s.enmOriginalTSCMode = pVM->tm.s.enmTSCMode; 551 605 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD); 552 606 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) enmTSCMode=%d (%s)\n" … … 556 610 557 611 /* 558 * Gather the Host Hz configuration values. 559 */ 560 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzMax", &pVM->tm.s.cHostHzMax, 20000); 561 if (RT_FAILURE(rc)) 562 return VMSetError(pVM, rc, RT_SRC_POS, 563 N_("Configuration error: Failed to querying uint32_t value \"HostHzMax\"")); 564 565 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorTimerCpu", &pVM->tm.s.cPctHostHzFudgeFactorTimerCpu, 111); 566 if (RT_FAILURE(rc)) 567 return VMSetError(pVM, rc, RT_SRC_POS, 568 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorTimerCpu\"")); 569 570 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorOtherCpu", &pVM->tm.s.cPctHostHzFudgeFactorOtherCpu, 110); 571 if (RT_FAILURE(rc)) 572 return VMSetError(pVM, rc, RT_SRC_POS, 573 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorOtherCpu\"")); 574 575 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp100", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp100, 300); 576 if (RT_FAILURE(rc)) 577 return VMSetError(pVM, rc, RT_SRC_POS, 578 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp100\"")); 579 580 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp200", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp200, 250); 581 if (RT_FAILURE(rc)) 582 return VMSetError(pVM, rc, RT_SRC_POS, 583 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp200\"")); 584 585 rc = CFGMR3QueryU32Def(pCfgHandle, "HostHzFudgeFactorCatchUp400", &pVM->tm.s.cPctHostHzFudgeFactorCatchUp400, 200); 586 if (RT_FAILURE(rc)) 587 return VMSetError(pVM, rc, RT_SRC_POS, 588 N_("Configuration error: Failed to querying uint32_t value \"HostHzFudgeFactorCatchUp400\"")); 612 * Dump the GIPCPU TSC-deltas, iterate using the Apic Id to get master at the beginning in most cases. 613 */ 614 unsigned cGipCpus = RT_ELEMENTS(g_pSUPGlobalInfoPage->aiCpuFromApicId); 615 for (unsigned i = 0; i < cGipCpus; i++) 616 { 617 uint16_t iCpu = g_pSUPGlobalInfoPage->aiCpuFromApicId[i]; 618 #if 1 619 if (iCpu != UINT16_MAX) 620 LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%RI64\n", g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu, 621 g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic, g_pSUPGlobalInfoPage->aCPUs[iCpu].i64TSCDelta)); 622 #else 623 /* Dump 2 entries per line, saves vertical space in release log but more dumps bytes due to formatting. */ 624 uint16_t iCpu2 = UINT16_MAX; 625 for (unsigned k = i + 1; k < cGipCpus; k++) 626 { 627 iCpu2 = g_pSUPGlobalInfoPage->aiCpuFromApicId[k]; 628 if (iCpu2 != UINT16_MAX) 629 { 630 i = k + 1; 631 break; 632 } 633 } 634 if ( iCpu != UINT16_MAX 635 && iCpu2 != UINT16_MAX) 636 { 637 LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%-4lld CPU[%d]: idApic=%d i64TSCDelta=%lld\n", 638 g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu, g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic, 639 g_pSUPGlobalInfoPage->aCPUs[iCpu].i64TSCDelta, g_pSUPGlobalInfoPage->aCPUs[iCpu2].idCpu, 640 g_pSUPGlobalInfoPage->aCPUs[iCpu2].idApic, g_pSUPGlobalInfoPage->aCPUs[iCpu2].i64TSCDelta)); 641 } 642 else if (iCpu != UINT16_MAX) 643 LogRel(("TM: GIP - CPU[%d]: idApic=%d i64TSCDelta=%lld\n", g_pSUPGlobalInfoPage->aCPUs[iCpu].idCpu, 644 g_pSUPGlobalInfoPage->aCPUs[iCpu].idApic)); 645 #endif 646 } 589 647 590 648 /* … … 1027 1085 #endif 1028 1086 1087 /* 1088 * GIM is now initialized. Determine if TSC mode switching is allowed (respecting CFGM override). 1089 */ 1090 pVM->tm.s.fTSCModeSwitchAllowed &= GIMIsEnabled(pVM) && HMIsEnabled(pVM); 1029 1091 return rc; 1030 1092 } … … 1153 1215 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 1154 1216 VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /** @todo FIXME: this isn't right. */ 1217 1218 /* 1219 * Switch TM TSC mode back to the original mode after a reset for 1220 * paravirtualized guests that alter the TM TSC mode during operation. 1221 */ 1222 if ( pVM->tm.s.fTSCModeSwitchAllowed 1223 && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode) 1224 { 1225 bool fParavirtTSC = false; 1226 tmR3CpuTickParavirtToggle(pVM, NULL /* pVCpuEmt */, &fParavirtTSC); 1227 } 1228 Assert(!GIMIsParavirtTscEnabled(pVM)); 1229 pVM->tm.s.fParavirtTscEnabled = false; 1230 1155 1231 TM_UNLOCK_TIMERS(pVM); 1156 1232 } … … 3055 3131 #endif /* !VBOX_WITHOUT_NS_ACCOUNTING */ 3056 3132 3133 3134 /** 3135 * Switch TM TSC mode to the most appropriate/efficient one. 3136 * 3137 * @returns strict VBox status code. 3138 * @param pVM Pointer to the VM. 3139 * @param pVCpuEmt Pointer to the VMCPU it's called on, can be NULL. 3140 * @param pvData Opaque pointer to whether usage of paravirt. TSC is 3141 * enabled or disabled by the guest OS. 3142 * 3143 * @thread EMT. 3144 * @remarks Must only be called during an EMTs rendezvous. 3145 */ 3146 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpuEmt, void *pvData) 3147 { 3148 Assert(pVM); 3149 Assert(pvData); 3150 Assert(pVM->tm.s.fTSCModeSwitchAllowed); 3151 NOREF(pVCpuEmt); 3152 3153 bool *pfEnable = (bool *)pvData; 3154 if (*pfEnable) 3155 { 3156 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 3157 { 3158 uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM); 3159 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL); 3160 uint32_t cCpus = pVM->cCpus; 3161 uint64_t u64RealTSC = ASMReadTSC(); 3162 for (uint32_t i = 0; i < cCpus; i++) 3163 { 3164 PVMCPU pVCpu = &pVM->aCpus[i]; 3165 uint64_t u64TickOld = u64Now - pVCpu->tm.s.offTSCRawSrc; 3166 3167 /* 3168 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must 3169 * remain constant across the TM TSC mode-switch. 3170 * OldTick = VrSync - CurOff 3171 * NewTick = RealTsc - NewOff 3172 * NewTick = OldTick 3173 * => RealTsc - NewOff = VrSync - CurOff 3174 * => NewOff = CurOff + RealTsc - VrSync 3175 */ 3176 pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64RealTSC - u64Now; 3177 3178 /* If the new offset results in the TSC going backwards, re-adjust the offset. */ 3179 if (u64RealTSC - pVCpu->tm.s.offTSCRawSrc < u64TickOld) 3180 pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64RealTSC; 3181 Assert(u64RealTSC - pVCpu->tm.s.offTSCRawSrc >= u64TickOld); 3182 } 3183 pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET; 3184 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM))); 3185 } 3186 } 3187 else 3188 { 3189 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 3190 && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode) 3191 { 3192 uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM); 3193 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL); 3194 uint64_t u64RealTSC = ASMReadTSC(); /** @todo replace with SUPReadTSC() eventually. */ 3195 uint32_t cCpus = pVM->cCpus; 3196 for (uint32_t i = 0; i < cCpus; i++) 3197 { 3198 PVMCPU pVCpu = &pVM->aCpus[i]; 3199 uint64_t u64TickOld = u64RealTSC - pVCpu->tm.s.offTSCRawSrc; 3200 3201 /* Update the last-seen tick here as we havent't been updating it (as we don't 3202 need it) while in pure TSC-offsetting mode. */ 3203 pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC; 3204 3205 /* 3206 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must 3207 * remain constant across the TM TSC mode-switch. 3208 * OldTick = RealTsc - CurOff 3209 * NewTick = VrSync - NewOff 3210 * NewTick = OldTick 3211 * => VrSync - NewOff = RealTsc - CurOff 3212 * => NewOff = CurOff + VrSync - RealTsc 3213 */ 3214 pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64Now - u64RealTSC; 3215 3216 /* If the new offset results in the TSC going backwards, re-adjust the offset. */ 3217 if (u64Now - pVCpu->tm.s.offTSCRawSrc < u64TickOld) 3218 pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64Now; 3219 Assert(u64Now - pVCpu->tm.s.offTSCRawSrc >= u64TickOld); 3220 } 3221 pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode; 3222 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM))); 3223 } 3224 } 3225 return VINF_SUCCESS; 3226 } 3227 3228 3229 /** 3230 * Notify TM that the guest has enabled usage of a paravirtualized TSC. 3231 * 3232 * @returns VBox status code. 3233 * @param pVM Pointer to the VM. 3234 */ 3235 VMMR3_INT_DECL(int) TMR3CpuTickParavirtEnable(PVM pVM) 3236 { 3237 int rc = VINF_SUCCESS; 3238 if (pVM->tm.s.fTSCModeSwitchAllowed) 3239 { 3240 bool fEnable = true; 3241 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable); 3242 } 3243 pVM->tm.s.fParavirtTscEnabled = true; 3244 return rc; 3245 } 3246 3247 3248 /** 3249 * Notify TM that the guest has disabled usage of a paravirtualized TSC. 3250 * 3251 * @returns VBox status code. 3252 * @param pVM Pointer to the VM. 3253 */ 3254 VMMR3_INT_DECL(int) TMR3CpuTickParavirtDisable(PVM pVM) 3255 { 3256 int rc = VINF_SUCCESS; 3257 if (pVM->tm.s.fTSCModeSwitchAllowed) 3258 { 3259 bool fEnable = false; 3260 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable); 3261 } 3262 pVM->tm.s.fParavirtTscEnabled = false; 3263 return rc; 3264 } 3265 3266 3057 3267 /** 3058 3268 * Gets the 5 char clock name for the info tables. … … 3254 3464 case TMTSCMODE_VIRT_TSC_EMULATED: return "VirtTscEmulated"; 3255 3465 case TMTSCMODE_DYNAMIC: return "Dynamic"; 3256 default: return "??? ??";3257 } 3258 } 3259 3466 default: return "???"; 3467 } 3468 } 3469 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r52699 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 2797 2797 CSAMR3Reset(pVM); 2798 2798 #endif 2799 GIMR3Reset(pVM); /* This must come *before* PDM . */2799 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */ 2800 2800 PDMR3Reset(pVM); 2801 2801 PGMR3Reset(pVM); -
trunk/src/VBox/VMM/include/HMInternal.h
r52766 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 896 896 STAMCOUNTER StatExitXdtrAccess; 897 897 STAMCOUNTER StatExitHlt; 898 STAMCOUNTER StatExitHltToR3; 898 899 STAMCOUNTER StatExitMwait; 899 900 STAMCOUNTER StatExitMonitor; … … 938 939 STAMCOUNTER StatSwitchLongJmpToR3; 939 940 940 STAMCOUNTER StatTscOffsetAdjusted;941 941 STAMCOUNTER StatTscParavirt; 942 942 STAMCOUNTER StatTscOffset; 943 943 STAMCOUNTER StatTscIntercept; 944 STAMCOUNTER StatTscInterceptOverFlow;945 944 946 945 STAMCOUNTER StatExitReasonNpf; -
trunk/src/VBox/VMM/include/TMInternal.h
r53441 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 324 324 typedef enum TMTSCMODE 325 325 { 326 /** The guest TSC is an emulated virtual TSC. */326 /** The guest TSC is an emulated, virtual TSC. */ 327 327 TMTSCMODE_VIRT_TSC_EMULATED = 1, 328 328 /** The guest TSC is an offset of the real TSC. */ 329 329 TMTSCMODE_REAL_TSC_OFFSET, 330 /** The guest TSC is dynamically derived through emulati onor offsetting. */330 /** The guest TSC is dynamically derived through emulating or offsetting. */ 331 331 TMTSCMODE_DYNAMIC 332 332 } TMTSCMODE; … … 355 355 * Config variable: Mode (string). */ 356 356 TMTSCMODE enmTSCMode; 357 /** The original TSC mode of the VM. */ 358 TMTSCMODE enmOriginalTSCMode; 359 /** Alignment padding. */ 360 uint32_t u32Alignment0; 357 361 /** Whether the TSC is tied to the execution of code. 358 362 * Config variable: TSCTiedToExecution (bool) */ … … 361 365 * Config variable: TSCNotTiedToHalt (bool) */ 362 366 bool fTSCNotTiedToHalt; 363 /** Alignment padding. */ 364 bool afAlignment0[2]; 367 /** Whether TM TSC mode switching is allowed at runtime. */ 368 bool fTSCModeSwitchAllowed; 369 /** Whether the guest has enabled use of paravirtualized TSC. */ 370 bool fParavirtTscEnabled; 365 371 /** The ID of the virtual CPU that normally runs the timers. */ 366 372 VMCPUID idTimerCpu; … … 685 691 bool afAlignment0[3]; /**< alignment padding */ 686 692 687 /** The offset between the raw TSC source and the Guest TSC.688 * Only valid if fTicking is set and and fTSCUseRealTSC is clear. */693 /** The offset between the host tick (TSC/virtual depending on the TSC mode) and 694 * the guest tick. */ 689 695 uint64_t offTSCRawSrc; 690 696
Note:
See TracChangeset
for help on using the changeset viewer.

