Index: /trunk/src/VBox/VMM/TM.cpp
===================================================================
--- /trunk/src/VBox/VMM/TM.cpp	(revision 19802)
+++ /trunk/src/VBox/VMM/TM.cpp	(revision 19803)
@@ -589,7 +589,9 @@
     STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF,                     STAMTYPE_COUNTER, "/TM/VirtualGetSetFF",                 STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet,                      STAMTYPE_COUNTER, "/TM/VirtualSyncGet",                  STAMUNIT_OCCURENCES, "The number of times tmVirtualSyncGetEx was called.");
-    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGetSetFF",             STAMUNIT_OCCURENCES, "Times we set the FF when calling tmVirtualSyncGetEx.");
-    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGetELoop",             STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx.");
-    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked,                STAMTYPE_COUNTER, "/TM/VirtualSyncGetLocked",            STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop",            STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetExpired,               STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Expired",          STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx encountered an expired timer stopping the clock.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked,                STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Locked",           STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLockless,              STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Lockless",         STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx returned without needing to take the lock.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGet/SetFF",            STAMUNIT_OCCURENCES, "Times we set the FF when calling tmVirtualSyncGetEx.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualPause,                        STAMTYPE_COUNTER, "/TM/VirtualPause",                    STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualResume,                       STAMTYPE_COUNTER, "/TM/VirtualResume",                   STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called.");
@@ -968,7 +970,7 @@
             const uint64_t offNew = offVirtualNow - offVirtualSyncNow;
             Assert(offOld <= offNew);
-            ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
-            ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
-            ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+            ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+            ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
+            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
             LogRel(("TM: Aborting catch-up attempt on reset with a %RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew));
         }
@@ -1862,4 +1864,6 @@
  *
  * @param   pVM             The VM to run the timers for.
+ *
+ * @remarks The caller must own both the TM/EMT and the Virtual Sync locks.
  */
 static void tmR3TimerQueueRunVirtualSync(PVM pVM)
@@ -1891,6 +1895,7 @@
      * and 2) lag behind at a steady rate.
      */
-    const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM);
-    uint64_t u64Now;
+    const uint64_t  u64VirtualNow  = TMVirtualGetNoCheck(pVM);
+    uint64_t const  offSyncGivenUp = pVM->tm.s.offVirtualSyncGivenUp;
+    uint64_t        u64Now;
     if (!pVM->tm.s.fVirtualSyncTicking)
     {
@@ -1901,6 +1906,8 @@
     else
     {
-        /* Calc 'now'. (update order doesn't really matter here) */
-        uint64_t off = pVM->tm.s.offVirtualSync;
+        /* Calc 'now'. */
+        bool     fStopCatchup  = false;
+        bool     fUpdateStuff  = false;
+        uint64_t off           = pVM->tm.s.offVirtualSync;
         if (pVM->tm.s.fVirtualSyncCatchUp)
         {
@@ -1909,31 +1916,36 @@
             {
                 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
-                if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
+                if (off > u64Sub + offSyncGivenUp)
                 {
                     off -= u64Sub;
-                    Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
+                    Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
                 }
                 else
                 {
                     STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
-                    ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
-                    off = pVM->tm.s.offVirtualSyncGivenUp;
+                    fStopCatchup = true;
+                    off = offSyncGivenUp;
                     Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
                 }
             }
-            ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
-            pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow;
         }
         u64Now = u64VirtualNow - off;
 
         /* Check if stopped by expired timer. */
+        uint64_t u64Expire = pNext->u64Expire;
         if (u64Now >= pNext->u64Expire)
         {
             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop);
             u64Now = pNext->u64Expire;
-            ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now);
-            ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
-            Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp));
-
+            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
+            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
+            Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
+        }
+        else if (fUpdateStuff)
+        {
+            ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
+            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow);
+            if (fStopCatchup)
+                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
         }
     }
@@ -1941,11 +1953,12 @@
     /* calc end of frame. */
     uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
-    if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
-        u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp;
+    if (u64Max > u64VirtualNow - offSyncGivenUp)
+        u64Max = u64VirtualNow - offSyncGivenUp;
 
     /* assert sanity */
-    Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
-    Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
+    Assert(u64Now <= u64VirtualNow - offSyncGivenUp);
+    Assert(u64Max <= u64VirtualNow - offSyncGivenUp);
     Assert(u64Now <= u64Max);
+    Assert(offSyncGivenUp == pVM->tm.s.offVirtualSyncGivenUp);
 
     /*
@@ -1984,6 +1997,6 @@
             u64Prev = pTimer->u64Expire;
 #endif
-            ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false);
-            ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
+            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
+            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
 
             /* fire */
@@ -2032,5 +2045,5 @@
         /* Let the time run a little bit while we were busy running timers(?). */
         uint64_t u64Elapsed;
-#define MAX_ELAPSED 30000 /* ns */
+#define MAX_ELAPSED 30000U /* ns */
         if (offSlack > MAX_ELAPSED)
             u64Elapsed = 0;
@@ -2059,5 +2072,5 @@
                 /* stop */
                 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
-                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
                 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
             }
@@ -2072,5 +2085,5 @@
                 {
                     STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
-                    ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
+                    ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
                     Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
                 }
@@ -2082,6 +2095,6 @@
                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp);
                 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
-                ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
-                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+                ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
                 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
                 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
@@ -2099,6 +2112,6 @@
                     i++;
                 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]);
-                ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
-                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
+                ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
+                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
                 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
             }
@@ -2107,5 +2120,5 @@
                 /* don't bother */
                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
-                ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
+                ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
                 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
                 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
@@ -2117,6 +2130,6 @@
          */
         Assert(!(offNew & RT_BIT_64(63)));
-        ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, offNew);
-        ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true);
+        ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, offNew);
+        ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
     }
 }
@@ -2307,5 +2320,5 @@
 {
     RTTimeNow(pTime);
-    RTTimeSpecSubNano(pTime, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp);
+    RTTimeSpecSubNano(pTime, ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) - ASMAtomicReadU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp));
     RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
     return pTime;
Index: /trunk/src/VBox/VMM/TMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/TMInternal.h	(revision 19802)
+++ /trunk/src/VBox/VMM/TMInternal.h	(revision 19803)
@@ -458,7 +458,9 @@
     STAMCOUNTER                 StatVirtualGetSetFF;
     STAMCOUNTER                 StatVirtualSyncGet;
+    STAMCOUNTER                 StatVirtualSyncGetELoop;
+    STAMCOUNTER                 StatVirtualSyncGetExpired;
+    STAMCOUNTER                 StatVirtualSyncGetLockless;
+    STAMCOUNTER                 StatVirtualSyncGetLocked;
     STAMCOUNTER                 StatVirtualSyncGetSetFF;
-    STAMCOUNTER                 StatVirtualSyncGetELoop;
-    STAMCOUNTER                 StatVirtualSyncGetLocked;
     STAMCOUNTER                 StatVirtualPause;
     STAMCOUNTER                 StatVirtualResume;
Index: /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 19802)
+++ /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 19803)
@@ -394,4 +394,153 @@
 
 /**
+ * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
+ *
+ * @returns The timestamp.
+ * @param   pVM                 VM handle.
+ * @param   u64                 raw virtual time.
+ * @param   off                 offVirtualSync.
+ */
+DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off)
+{
+    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+
+    /*
+     * Don't make updates untill
+     */
+    bool            fUpdatePrev = true;
+    bool            fUpdateOff  = true;
+    bool            fStop       = false;
+    const uint64_t  u64Prev     = pVM->tm.s.u64VirtualSyncCatchUpPrev;
+    uint64_t        u64Delta    = u64 - u64Prev;
+    if (RT_LIKELY(!(u64Delta >> 32)))
+    {
+        uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
+        if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
+        {
+            off -= u64Sub;
+            Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
+        }
+        else
+        {
+            /* we've completely caught up. */
+            STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
+            off = pVM->tm.s.offVirtualSyncGivenUp;
+            fStop = true;
+            Log4(("TM: %RU64/0: caught up\n", u64));
+        }
+    }
+    else
+    {
+        /* More than 4 seconds since last time (or negative), ignore it. */
+        fUpdateOff = false;
+        fUpdatePrev = !(u64Delta & RT_BIT_64(63));
+        Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
+    }
+
+    /*
+     * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
+     * approach is to never pass the head timer. So, when we do stop the clock and
+     * set the timer pending flag.
+     */
+    u64 -= off;
+    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
+    if (u64 < u64Expire)
+    {
+        if (fUpdateOff)
+            ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
+        if (fStop)
+            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+        if (fUpdatePrev)
+            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
+        tmVirtualSyncUnlock(pVM);
+    }
+    else
+    {
+        u64 = u64Expire;
+        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
+        ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
+
+        VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
+        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
+        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+        Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+        tmVirtualSyncUnlock(pVM);
+
+#ifdef IN_RING3
+        REMR3NotifyTimerPending(pVM, pVCpuDst);
+        VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
+#endif
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
+    }
+    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+
+    return u64;
+}
+
+
+/**
+ * tmVirtualSyncGetEx worker for when we get the lock.
+ *
+ * @returns timesamp.
+ * @param   pVM                 The VM handle.
+ * @param   u64                 The virtual clock timestamp.
+ */
+DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64)
+{
+    /*
+     * Not ticking?
+     */
+    if (!pVM->tm.s.fVirtualSyncTicking)
+    {
+        u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
+        tmVirtualSyncUnlock(pVM);
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+        return u64;
+    }
+
+    /*
+     * Handle catch up in a separate function.
+     */
+    uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
+    if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+        return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off);
+
+    /*
+     * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
+     * approach is to never pass the head timer. So, when we do stop the clock and
+     * set the timer pending flag.
+     */
+    u64 -= off;
+    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
+    if (u64 < u64Expire)
+        tmVirtualSyncUnlock(pVM);
+    else
+    {
+        u64 = u64Expire;
+        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
+        ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
+
+        VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
+        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
+        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+        Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+        tmVirtualSyncUnlock(pVM);
+
+#ifdef IN_RING3
+        REMR3NotifyTimerPending(pVM, pVCpuDst);
+        VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
+#endif
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
+    }
+    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+    return u64;
+}
+
+
+/**
  * Gets the current TMCLOCK_VIRTUAL_SYNC time.
  *
@@ -426,4 +575,25 @@
 #endif
             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
+        }
+    }
+
+    /*
+     * When the clock is ticking, not doing catch ups and not running into an
+     * expired time, we can get away without locking. Try this first.
+     */
+    uint64_t off;
+    if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+        &&  !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+    {
+        off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+        if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+            &&  !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
+            &&  off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync))
+        {
+            if (u64 - off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))
+            {
+                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
+                return u64 - off;
+            }
         }
     }
@@ -448,66 +618,32 @@
      */
     AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
-    int         cOuterTries = 42;
-    int         rcLock = tmVirtualSyncTryLock(pVM);
-    uint64_t    off;
+    int cOuterTries = 42;
     for (;; cOuterTries--)
     {
+        /* Try grab the lock, things get simpler when owning the lock. */
+        int rcLock = tmVirtualSyncTryLock(pVM);
+        if (RT_SUCCESS_NP(rcLock))
+            return tmVirtualSyncGetLocked(pVM, u64);
+
         /* Re-check the ticking flag. */
         if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
         {
-            if (RT_SUCCESS(rcLock))
-                tmVirtualSyncUnlock(pVM);
-            return ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
+            off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
+            if (   ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
+                && cOuterTries > 0)
+                continue;
+            return off;
         }
 
-        off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
-        if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+        off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+        if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
         {
-            /* adjust the offset. */
-            if (RT_FAILURE(rcLock))
-                rcLock = tmVirtualSyncTryLock(pVM);
-            if (RT_SUCCESS(rcLock))
-            {
-                /* We own the lock and may make updates. */
-                const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
-                uint64_t u64Delta = u64 - u64Prev;
-                if (RT_LIKELY(!(u64Delta >> 32)))
-                {
-                    uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
-                    if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
-                    {
-                        off -= u64Sub;
-                        ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
-                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
-                        Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
-                    }
-                    else
-                    {
-                        /* we've completely caught up. */
-                        STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
-                        off = pVM->tm.s.offVirtualSyncGivenUp;
-                        ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
-                        ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
-                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
-                        Log4(("TM: %RU64/0: caught up\n", u64));
-                    }
-                }
-                else
-                {
-                    /* More than 4 seconds since last time (or negative), ignore it. */
-                    if (!(u64Delta & RT_BIT_64(63)))
-                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
-                    Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
-                }
-                break;
-            }
-
             /* No changes allowed, try get a consistent set of parameters. */
-            uint64_t const u64Prev    = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
-            uint64_t const offGivenUp = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
+            uint64_t const u64Prev    = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
+            uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
             uint32_t const u32Pct     = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
-            if (    (   u64Prev    == ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
-                     && offGivenUp == ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
-                     && u32Pct     == ASMAtomicUoReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
+            if (    (   u64Prev    == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
+                     && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
+                     && u32Pct     == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
                      && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
                 ||  cOuterTries <= 0)
@@ -535,13 +671,16 @@
 
                 /* Check that we're still running and in catch up. */
-                if (pVM->tm.s.fVirtualSyncCatchUp)
+                if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+                    &&  ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
                     break;
                 if (cOuterTries <= 0)
-                    break;
+                    break; /* enough */
             }
         }
-        else if (   off == ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync)
-                 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+        else if (   off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
+                 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
             break; /* Got an consistent offset */
+        else if (cOuterTries <= 0)
+            break; /* enough */
     }
     if (cOuterTries <= 0)
@@ -554,30 +693,12 @@
      */
     u64 -= off;
-    const uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
+    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     if (u64 >= u64Expire)
     {
-        u64 = u64Expire;
         PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
-        if (RT_FAILURE(rcLock))
-            rcLock = tmVirtualSyncTryLock(pVM);
-        if (RT_SUCCESS(rcLock))
-        {
-            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
-            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
-            VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
-            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
-            Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
-            tmVirtualSyncUnlock(pVM);
-#ifdef IN_RING3
-            REMR3NotifyTimerPending(pVM, pVCpuDst);
-            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
-#endif
-            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
-            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
-            Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
-        }
-        else if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+        if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
         {
             Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+            VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
             VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
 #ifdef IN_RING3
@@ -590,9 +711,5 @@
         else
             Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
-    }
-    else if (RT_SUCCESS(rcLock))
-    {
-        tmVirtualSyncUnlock(pVM);
-        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
     }
 
