Changeset 19803 in vbox
- Timestamp:
- May 19, 2009 8:33:18 AM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
-
TM.cpp (modified) (16 diffs)
-
TMInternal.h (modified) (1 diff)
-
VMMAll/TMAllVirtual.cpp (modified) (6 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/TM.cpp
r19753 r19803 589 589 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet."); 590 590 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet, STAMTYPE_COUNTER, "/TM/VirtualSyncGet", STAMUNIT_OCCURENCES, "The number of times tmVirtualSyncGetEx was called."); 591 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualSyncGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling tmVirtualSyncGetEx."); 592 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop, STAMTYPE_COUNTER, "/TM/VirtualSyncGetELoop", STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx."); 593 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked, STAMTYPE_COUNTER, "/TM/VirtualSyncGetLocked", STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx."); 591 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop", STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx."); 592 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetExpired, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Expired", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx encountered an expired timer stopping the clock."); 593 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Locked", STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx."); 594 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLockless, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Lockless", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx returned without needing to take the lock."); 595 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/SetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling tmVirtualSyncGetEx."); 594 596 STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/VirtualPause", STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called."); 595 597 STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/VirtualResume", STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called."); … … 968 970 const uint64_t offNew = offVirtualNow - offVirtualSyncNow; 969 971 Assert(offOld <= offNew); 970 ASMAtomic XchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);971 ASMAtomic XchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);972 ASMAtomic XchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);972 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 973 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew); 974 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 973 975 LogRel(("TM: Aborting catch-up attempt on reset with a %RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew)); 974 976 } … … 1862 1864 * 1863 1865 * @param pVM The VM to run the timers for. 1866 * 1867 * @remarks The caller must own both the TM/EMT and the Virtual Sync locks. 1864 1868 */ 1865 1869 static void tmR3TimerQueueRunVirtualSync(PVM pVM) … … 1891 1895 * and 2) lag behind at a steady rate. 1892 1896 */ 1893 const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM); 1894 uint64_t u64Now; 1897 const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM); 1898 uint64_t const offSyncGivenUp = pVM->tm.s.offVirtualSyncGivenUp; 1899 uint64_t u64Now; 1895 1900 if (!pVM->tm.s.fVirtualSyncTicking) 1896 1901 { … … 1901 1906 else 1902 1907 { 1903 /* Calc 'now'. (update order doesn't really matter here) */ 1904 uint64_t off = pVM->tm.s.offVirtualSync; 1908 /* Calc 'now'. */ 1909 bool fStopCatchup = false; 1910 bool fUpdateStuff = false; 1911 uint64_t off = pVM->tm.s.offVirtualSync; 1905 1912 if (pVM->tm.s.fVirtualSyncCatchUp) 1906 1913 { … … 1909 1916 { 1910 1917 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 1911 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)1918 if (off > u64Sub + offSyncGivenUp) 1912 1919 { 1913 1920 off -= u64Sub; 1914 Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));1921 Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub)); 1915 1922 } 1916 1923 else 1917 1924 { 1918 1925 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 1919 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);1920 off = pVM->tm.s.offVirtualSyncGivenUp;1926 fStopCatchup = true; 1927 off = offSyncGivenUp; 1921 1928 Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow)); 1922 1929 } 1923 1930 } 1924 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);1925 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow;1926 1931 } 1927 1932 u64Now = u64VirtualNow - off; 1928 1933 1929 1934 /* Check if stopped by expired timer. */ 1935 uint64_t u64Expire = pNext->u64Expire; 1930 1936 if (u64Now >= pNext->u64Expire) 1931 1937 { 1932 1938 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop); 1933 1939 u64Now = pNext->u64Expire; 1934 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now); 1935 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 1936 Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp)); 1937 1940 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now); 1941 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 1942 Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp)); 1943 } 1944 else if (fUpdateStuff) 1945 { 1946 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off); 1947 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow); 1948 if (fStopCatchup) 1949 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1938 1950 } 1939 1951 } … … 1941 1953 /* calc end of frame. */ 1942 1954 uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1943 if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)1944 u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp;1955 if (u64Max > u64VirtualNow - offSyncGivenUp) 1956 u64Max = u64VirtualNow - offSyncGivenUp; 1945 1957 1946 1958 /* assert sanity */ 1947 Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);1948 Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);1959 Assert(u64Now <= u64VirtualNow - offSyncGivenUp); 1960 Assert(u64Max <= u64VirtualNow - offSyncGivenUp); 1949 1961 Assert(u64Now <= u64Max); 1962 Assert(offSyncGivenUp == pVM->tm.s.offVirtualSyncGivenUp); 1950 1963 1951 1964 /* … … 1984 1997 u64Prev = pTimer->u64Expire; 1985 1998 #endif 1986 ASMAtomic XchgSize(&pVM->tm.s.fVirtualSyncTicking, false);1987 ASMAtomic XchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);1999 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire); 2000 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 1988 2001 1989 2002 /* fire */ … … 2032 2045 /* Let the time run a little bit while we were busy running timers(?). */ 2033 2046 uint64_t u64Elapsed; 2034 #define MAX_ELAPSED 30000 /* ns */2047 #define MAX_ELAPSED 30000U /* ns */ 2035 2048 if (offSlack > MAX_ELAPSED) 2036 2049 u64Elapsed = 0; … … 2059 2072 /* stop */ 2060 2073 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 2061 ASMAtomic XchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);2074 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 2062 2075 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag)); 2063 2076 } … … 2072 2085 { 2073 2086 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]); 2074 ASMAtomic XchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);2087 ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 2075 2088 Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2076 2089 } … … 2082 2095 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp); 2083 2096 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 2084 ASMAtomic XchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);2085 ASMAtomic XchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);2097 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 2098 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 2086 2099 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2087 2100 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew)); … … 2099 2112 i++; 2100 2113 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]); 2101 ASMAtomic XchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);2102 ASMAtomic XchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true);2114 ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 2115 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true); 2103 2116 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2104 2117 } … … 2107 2120 /* don't bother */ 2108 2121 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting); 2109 ASMAtomic XchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);2122 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 2110 2123 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag)); 2111 2124 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew)); … … 2117 2130 */ 2118 2131 Assert(!(offNew & RT_BIT_64(63))); 2119 ASMAtomic XchgU64(&pVM->tm.s.offVirtualSync, offNew);2120 ASMAtomic XchgBool(&pVM->tm.s.fVirtualSyncTicking, true);2132 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, offNew); 2133 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true); 2121 2134 } 2122 2135 } … … 2307 2320 { 2308 2321 RTTimeNow(pTime); 2309 RTTimeSpecSubNano(pTime, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp);2322 RTTimeSpecSubNano(pTime, ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) - ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp)); 2310 2323 RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC); 2311 2324 return pTime; -
trunk/src/VBox/VMM/TMInternal.h
r19753 r19803 458 458 STAMCOUNTER StatVirtualGetSetFF; 459 459 STAMCOUNTER StatVirtualSyncGet; 460 STAMCOUNTER StatVirtualSyncGetELoop; 461 STAMCOUNTER StatVirtualSyncGetExpired; 462 STAMCOUNTER StatVirtualSyncGetLockless; 463 STAMCOUNTER StatVirtualSyncGetLocked; 460 464 STAMCOUNTER StatVirtualSyncGetSetFF; 461 STAMCOUNTER StatVirtualSyncGetELoop;462 STAMCOUNTER StatVirtualSyncGetLocked;463 465 STAMCOUNTER StatVirtualPause; 464 466 STAMCOUNTER StatVirtualResume; -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19753 r19803 394 394 395 395 /** 396 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock. 397 * 398 * @returns The timestamp. 399 * @param pVM VM handle. 400 * @param u64 raw virtual time. 401 * @param off offVirtualSync. 402 */ 403 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off) 404 { 405 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 406 407 /* 408 * Don't make updates untill 409 */ 410 bool fUpdatePrev = true; 411 bool fUpdateOff = true; 412 bool fStop = false; 413 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 414 uint64_t u64Delta = u64 - u64Prev; 415 if (RT_LIKELY(!(u64Delta >> 32))) 416 { 417 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 418 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp) 419 { 420 off -= u64Sub; 421 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub)); 422 } 423 else 424 { 425 /* we've completely caught up. */ 426 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 427 off = pVM->tm.s.offVirtualSyncGivenUp; 428 fStop = true; 429 Log4(("TM: %RU64/0: caught up\n", u64)); 430 } 431 } 432 else 433 { 434 /* More than 4 seconds since last time (or negative), ignore it. */ 435 fUpdateOff = false; 436 fUpdatePrev = !(u64Delta & RT_BIT_64(63)); 437 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta)); 438 } 439 440 /* 441 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current 442 * approach is to never pass the head timer. So, when we do stop the clock and 443 * set the timer pending flag. 444 */ 445 u64 -= off; 446 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 447 if (u64 < u64Expire) 448 { 449 if (fUpdateOff) 450 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off); 451 if (fStop) 452 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 453 if (fUpdatePrev) 454 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64); 455 tmVirtualSyncUnlock(pVM); 456 } 457 else 458 { 459 u64 = u64Expire; 460 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64); 461 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 462 463 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 464 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 465 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 466 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 467 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 468 tmVirtualSyncUnlock(pVM); 469 470 #ifdef IN_RING3 471 REMR3NotifyTimerPending(pVM, pVCpuDst); 472 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 473 #endif 474 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 475 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); 476 } 477 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 478 479 return u64; 480 } 481 482 483 /** 484 * tmVirtualSyncGetEx worker for when we get the lock. 485 * 486 * @returns timesamp. 487 * @param pVM The VM handle. 488 * @param u64 The virtual clock timestamp. 489 */ 490 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64) 491 { 492 /* 493 * Not ticking? 494 */ 495 if (!pVM->tm.s.fVirtualSyncTicking) 496 { 497 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync); 498 tmVirtualSyncUnlock(pVM); 499 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 500 return u64; 501 } 502 503 /* 504 * Handle catch up in a separate function. 505 */ 506 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync); 507 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 508 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off); 509 510 /* 511 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current 512 * approach is to never pass the head timer. So, when we do stop the clock and 513 * set the timer pending flag. 514 */ 515 u64 -= off; 516 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 517 if (u64 < u64Expire) 518 tmVirtualSyncUnlock(pVM); 519 else 520 { 521 u64 = u64Expire; 522 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64); 523 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 524 525 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 526 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 527 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 528 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 529 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 530 tmVirtualSyncUnlock(pVM); 531 532 #ifdef IN_RING3 533 REMR3NotifyTimerPending(pVM, pVCpuDst); 534 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 535 #endif 536 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 537 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); 538 } 539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 540 return u64; 541 } 542 543 544 /** 396 545 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 397 546 * … … 426 575 #endif 427 576 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 577 } 578 } 579 580 /* 581 * When the clock is ticking, not doing catch ups and not running into an 582 * expired time, we can get away without locking. Try this first. 583 */ 584 uint64_t off; 585 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 586 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 587 { 588 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 589 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 590 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 591 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)) 592 { 593 if (u64 - off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)) 594 { 595 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 596 return u64 - off; 597 } 428 598 } 429 599 } … … 448 618 */ 449 619 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); 450 int cOuterTries = 42; 451 int rcLock = tmVirtualSyncTryLock(pVM); 452 uint64_t off; 620 int cOuterTries = 42; 453 621 for (;; cOuterTries--) 454 622 { 623 /* Try grab the lock, things get simpler when owning the lock. */ 624 int rcLock = tmVirtualSyncTryLock(pVM); 625 if (RT_SUCCESS_NP(rcLock)) 626 return tmVirtualSyncGetLocked(pVM, u64); 627 455 628 /* Re-check the ticking flag. */ 456 629 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) 457 630 { 458 if (RT_SUCCESS(rcLock)) 459 tmVirtualSyncUnlock(pVM); 460 return ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync); 631 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync); 632 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking) 633 && cOuterTries > 0) 634 continue; 635 return off; 461 636 } 462 637 463 off = ASMAtomic UoReadU64(&pVM->tm.s.offVirtualSync);464 if (ASMAtomic UoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))638 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 639 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 465 640 { 466 /* adjust the offset. */467 if (RT_FAILURE(rcLock))468 rcLock = tmVirtualSyncTryLock(pVM);469 if (RT_SUCCESS(rcLock))470 {471 /* We own the lock and may make updates. */472 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;473 uint64_t u64Delta = u64 - u64Prev;474 if (RT_LIKELY(!(u64Delta >> 32)))475 {476 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);477 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)478 {479 off -= u64Sub;480 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);481 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);482 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));483 }484 else485 {486 /* we've completely caught up. */487 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);488 off = pVM->tm.s.offVirtualSyncGivenUp;489 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);490 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);491 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);492 Log4(("TM: %RU64/0: caught up\n", u64));493 }494 }495 else496 {497 /* More than 4 seconds since last time (or negative), ignore it. */498 if (!(u64Delta & RT_BIT_64(63)))499 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);500 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));501 }502 break;503 }504 505 641 /* No changes allowed, try get a consistent set of parameters. */ 506 uint64_t const u64Prev = ASMAtomic UoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);507 uint64_t const offGivenUp = ASMAtomic UoReadU64(&pVM->tm.s.offVirtualSyncGivenUp);642 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev); 643 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp); 508 644 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage); 509 if ( ( u64Prev == ASMAtomic UoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)510 && offGivenUp == ASMAtomic UoReadU64(&pVM->tm.s.offVirtualSyncGivenUp)511 && u32Pct == ASMAtomic UoReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)645 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev) 646 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp) 647 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) 512 648 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 513 649 || cOuterTries <= 0) … … 535 671 536 672 /* Check that we're still running and in catch up. */ 537 if (pVM->tm.s.fVirtualSyncCatchUp) 673 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 674 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 538 675 break; 539 676 if (cOuterTries <= 0) 540 break; 677 break; /* enough */ 541 678 } 542 679 } 543 else if ( off == ASMAtomic UoReadU64(&pVM->tm.s.offVirtualSync)544 && !ASMAtomic UoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))680 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 681 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 545 682 break; /* Got an consistent offset */ 683 else if (cOuterTries <= 0) 684 break; /* enough */ 546 685 } 547 686 if (cOuterTries <= 0) … … 554 693 */ 555 694 u64 -= off; 556 constuint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);695 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 557 696 if (u64 >= u64Expire) 558 697 { 559 u64 = u64Expire;560 698 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 561 if (RT_FAILURE(rcLock)) 562 rcLock = tmVirtualSyncTryLock(pVM); 563 if (RT_SUCCESS(rcLock)) 564 { 565 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64); 566 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 567 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 568 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 569 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 570 tmVirtualSyncUnlock(pVM); 571 #ifdef IN_RING3 572 REMR3NotifyTimerPending(pVM, pVCpuDst); 573 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 574 #endif 575 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 576 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 577 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 578 } 579 else if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 699 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 580 700 { 581 701 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 702 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */ 582 703 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 583 704 #ifdef IN_RING3 … … 590 711 else 591 712 Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 592 } 593 else if (RT_SUCCESS(rcLock)) 594 { 595 tmVirtualSyncUnlock(pVM); 596 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 713 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); 597 714 } 598 715
Note:
See TracChangeset
for help on using the changeset viewer.

