Index: /trunk/include/VBox/vm.h
===================================================================
--- /trunk/include/VBox/vm.h	(revision 19809)
+++ /trunk/include/VBox/vm.h	(revision 19810)
@@ -893,5 +893,5 @@
         struct TM   s;
 #endif
-        char        padding[1856];      /* multiple of 32 */
+        char        padding[1920];      /* multiple of 32 */
     } tm;
 
Index: /trunk/src/VBox/VMM/TM.cpp
===================================================================
--- /trunk/src/VBox/VMM/TM.cpp	(revision 19809)
+++ /trunk/src/VBox/VMM/TM.cpp	(revision 19810)
@@ -561,8 +561,10 @@
     STAM_REG(pVM, &pVM->tm.s.StatPoll,                                STAMTYPE_COUNTER, "/TM/Poll",                            STAMUNIT_OCCURENCES, "TMTimerPoll calls.");
     STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet,                      STAMTYPE_COUNTER, "/TM/Poll/AlreadySet",                 STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
+    STAM_REG(pVM, &pVM->tm.s.StatPollELoop,                           STAMTYPE_COUNTER, "/TM/Poll/ELoop",                      STAMUNIT_OCCURENCES, "Times TMTimerPoll has given up getting a consistent virtual sync data set.");
+    STAM_REG(pVM, &pVM->tm.s.StatPollMiss,                            STAMTYPE_COUNTER, "/TM/Poll/Miss",                       STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
+    STAM_REG(pVM, &pVM->tm.s.StatPollRunning,                         STAMTYPE_COUNTER, "/TM/Poll/Running",                    STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run.");
+    STAM_REG(pVM, &pVM->tm.s.StatPollSimple,                          STAMTYPE_COUNTER, "/TM/Poll/Simple",                     STAMUNIT_OCCURENCES, "TMTimerPoll calls where we could take the simple path.");
     STAM_REG(pVM, &pVM->tm.s.StatPollVirtual,                         STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual",                STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
     STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync,                     STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync",            STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
-    STAM_REG(pVM, &pVM->tm.s.StatPollMiss,                            STAMTYPE_COUNTER, "/TM/Poll/Miss",                       STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
-    STAM_REG(pVM, &pVM->tm.s.StatPollRunning,                         STAMTYPE_COUNTER, "/TM/Poll/Running",                    STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run.");
 
     STAM_REG(pVM, &pVM->tm.s.StatPollGIP,                             STAMTYPE_COUNTER, "/TM/PollGIP",                         STAMUNIT_OCCURENCES, "TMTimerPollGIP calls.");
@@ -589,5 +591,5 @@
     STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF,                     STAMTYPE_COUNTER, "/TM/VirtualGetSetFF",                 STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet,                      STAMTYPE_COUNTER, "/TM/VirtualSyncGet",                  STAMUNIT_OCCURENCES, "The number of times tmVirtualSyncGetEx was called.");
-    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop",            STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx.");
+    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop",            STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx has given up getting a consistent virtual sync data set.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetExpired,               STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Expired",          STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx encountered an expired timer stopping the clock.");
     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked,                STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Locked",           STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx.");
Index: /trunk/src/VBox/VMM/TMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/TMInternal.h	(revision 19809)
+++ /trunk/src/VBox/VMM/TMInternal.h	(revision 19810)
@@ -470,8 +470,10 @@
     STAMCOUNTER                 StatPoll;
     STAMCOUNTER                 StatPollAlreadySet;
+    STAMCOUNTER                 StatPollELoop;
+    STAMCOUNTER                 StatPollMiss;
+    STAMCOUNTER                 StatPollRunning;
+    STAMCOUNTER                 StatPollSimple;
     STAMCOUNTER                 StatPollVirtual;
     STAMCOUNTER                 StatPollVirtualSync;
-    STAMCOUNTER                 StatPollMiss;
-    STAMCOUNTER                 StatPollRunning;
     /** @} */
     /** TMTimerPollGIP
Index: /trunk/src/VBox/VMM/VMMAll/TMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 19809)
+++ /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 19810)
@@ -369,15 +369,19 @@
      * TMCLOCK_VIRTUAL
      */
-    const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
+    const uint64_t  u64Expire1 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
     const int64_t   i64Delta1  = u64Expire1 - u64Now;
     if (i64Delta1 <= 0)
     {
+        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
+        if (    !pVM->tm.s.fRunningQueues
+            &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+        {
+            Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+#ifdef IN_RING3
+            REMR3NotifyTimerPending(pVM, pVCpuDst);
+#endif
+        }
         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
-        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
-        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
-        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
-#ifdef IN_RING3
-        REMR3NotifyTimerPending(pVM, pVCpuDst);
-#endif
         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     }
@@ -388,28 +392,131 @@
      * we have to adjust the 'now' but when have to adjust the delta as well.
      */
-    int            rc         = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playing safe here... */
-    const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
+
+    /*
+     * Optimistic lockless approach.
+     */
     uint64_t u64VirtualSyncNow;
-    if (!pVM->tm.s.fVirtualSyncTicking)
-        u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
+    uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
+    if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
+    {
+        if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+        {
+            u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+            if (RT_LIKELY(   ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+                          && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
+                          && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
+                          && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
+            {
+                u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
+                if (u64VirtualSyncNow < u64Expire2)
+                {
+                    STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
+                    STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
+                    return pVCpu == pVCpuDst
+                         ? RT_MIN(i64Delta1, (int64_t)(u64Expire2 - u64VirtualSyncNow))
+                         : s_u64OtherRet;
+                }
+
+                if (    !pVM->tm.s.fRunningQueues
+                    &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+                {
+                    Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+                    VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
+#ifdef IN_RING3
+                    REMR3NotifyTimerPending(pVM, pVCpuDst);
+#endif
+                }
+
+                STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
+                STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
+                LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
+                return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
+            }
+        }
+    }
     else
     {
-        if (!pVM->tm.s.fVirtualSyncCatchUp)
-            u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
-        else
-        {
-            uint64_t off = pVM->tm.s.offVirtualSync;
-            uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
-            if (RT_LIKELY(!(u64Delta >> 32)))
+        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
+        STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
+        LogFlow(("TMTimerPoll: stopped\n"));
+        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
+    }
+
+    /*
+     * Complicated lockless approach.
+     */
+    uint64_t    off;
+    uint32_t    u32Pct = 0;
+    bool        fCatchUp;
+    int         cOuterTries = 42;
+    for (;; cOuterTries--)
+    {
+        fCatchUp   = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
+        off        = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+        u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
+        if (fCatchUp)
+        {
+            /* No changes allowed, try get a consistent set of parameters. */
+            uint64_t const u64Prev    = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
+            uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
+            u32Pct                    = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
+            if (    (   u64Prev    == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
+                     && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
+                     && u32Pct     == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
+                     && off        == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
+                     && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
+                     && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
+                     && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
+                ||  cOuterTries <= 0)
             {
-                uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
-                if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
-                    off -= u64Sub;
+                uint64_t u64Delta = u64Now - u64Prev;
+                if (RT_LIKELY(!(u64Delta >> 32)))
+                {
+                    uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
+                    if (off > u64Sub + offGivenUp)
+                        off -= u64Sub;
+                    else /* we've completely caught up. */
+                        off = offGivenUp;
+                }
                 else
-                    off = pVM->tm.s.offVirtualSyncGivenUp;
+                    /* More than 4 seconds since last time (or negative), ignore it. */
+                    Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
+
+                /* Check that we're still running and in catch up. */
+                if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+                    &&  ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+                    break;
             }
-            u64VirtualSyncNow = u64Now - off;
-        }
-    }
+        }
+        else if (   off        == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
+                 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
+                 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
+                 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
+            break; /* Got an consistent offset */
+
+        /* Repeat the initial checks before iterating. */
+        if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
+        {
+            STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
+            return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
+        }
+        if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
+        {
+            STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
+            return s_u64OtherRet;
+        }
+        if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
+        {
+            STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
+            LogFlow(("TMTimerPoll: stopped\n"));
+            return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
+        }
+        if (cOuterTries <= 0)
+            break; /* that's enough */
+    }
+    if (cOuterTries <= 0)
+        STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
+    u64VirtualSyncNow = u64Now - off;
+
     int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
     if (i64Delta2 <= 0)
@@ -425,13 +532,7 @@
         }
         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
-#ifndef IN_RING3
-        if (RT_SUCCESS(rc))
-#endif
-            tmVirtualSyncUnlock(pVM);
         LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     }
-    if (pVM->tm.s.fVirtualSyncCatchUp)
-        i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
 
     /*
@@ -439,9 +540,11 @@
      */
     STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
-#ifndef IN_RING3
-    if (RT_SUCCESS(rc))
-#endif
-        tmVirtualSyncUnlock(pVM);
-    return RT_MIN(i64Delta1, i64Delta2);
+    if (pVCpu == pVCpuDst)
+    {
+        if (fCatchUp)
+            i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
+        return RT_MIN(i64Delta1, i64Delta2);
+    }
+    return s_u64OtherRet;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 19809)
+++ /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 19810)
@@ -583,17 +583,29 @@
      */
     uint64_t off;
-    if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
-        &&  !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
-    {
-        off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
-        if (    ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
-            &&  !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
-            &&  off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync))
-        {
-            if (u64 - off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))
+    if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
+    {
+        if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
+        {
+            off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
+            if (RT_LIKELY(   ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
+                          && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
+                          && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
             {
-                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
-                return u64 - off;
+                off = u64 - off;
+                if (off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))
+                {
+                    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
+                    return off;
+                }
             }
+        }
+    }
+    else
+    {
+        off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
+        if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
+        {
+            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
+            return off;
         }
     }
