Index: /trunk/src/VBox/VMM/TM.cpp
===================================================================
--- /trunk/src/VBox/VMM/TM.cpp	(revision 20049)
+++ /trunk/src/VBox/VMM/TM.cpp	(revision 20050)
@@ -396,5 +396,5 @@
     else
         CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);
-    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"
+    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"
             "TM: fMaybeUseOffsettedHostTSC=%RTbool TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n",
             pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC,
@@ -447,5 +447,5 @@
         if (    (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \
             ||  u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \
-            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %RU64"), u64); \
+            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %'RU64"), u64); \
         pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \
         rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \
@@ -542,6 +542,6 @@
     STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps,STAMTYPE_U32, "/TM/R0/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
     STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
-    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/GC/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
-    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/GC/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
+    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/RC/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
+    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/RC/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
     STAM_REL_REG(     pVM,(void*)&pVM->tm.s.offVirtualSync,               STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset",               STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)");
     STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp,        STAMTYPE_U64, "/TM/VirtualSync/GivenUp",                     STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with.");
@@ -552,6 +552,6 @@
     STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired,    STAMTYPE_U32, "/TM/R0/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
     STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
-    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired,    STAMTYPE_U32, "/TM/GC/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
-    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/GC/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
+    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired,    STAMTYPE_U32, "/TM/RC/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
+    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/RC/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
     STAM_REG(pVM, &pVM->tm.s.StatDoQueues,                            STAMTYPE_PROFILE, "/TM/DoQueues",                    STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
     STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL],      STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Virtual",            STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue.");
@@ -968,5 +968,5 @@
             ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
             ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
-            LogRel(("TM: Aborting catch-up attempt on reset with a %RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew));
+            LogRel(("TM: Aborting catch-up attempt on reset with a %'RU64 ns lag on reset; new total: %'RU64 ns\n", offNew - offOld, offNew));
         }
     }
@@ -1098,5 +1098,5 @@
     if (u64Hz != TMCLOCK_FREQ_VIRTUAL)
     {
-        AssertMsgFailed(("The virtual clock frequency differs! Saved: %RU64 Binary: %RU64\n",
+        AssertMsgFailed(("The virtual clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
                          u64Hz, TMCLOCK_FREQ_VIRTUAL));
         return VERR_SSM_VIRTUAL_CLOCK_HZ;
@@ -1126,5 +1126,5 @@
     if (u64Hz != TMCLOCK_FREQ_REAL)
     {
-        AssertMsgFailed(("The real clock frequency differs! Saved: %RU64 Binary: %RU64\n",
+        AssertMsgFailed(("The real clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
                          u64Hz, TMCLOCK_FREQ_REAL));
         return VERR_SSM_VIRTUAL_CLOCK_HZ; /* missleading... */
@@ -1149,5 +1149,5 @@
         pVM->tm.s.cTSCTicksPerSecond = u64Hz;
 
-    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
+    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
             pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC));
 
@@ -1386,5 +1386,5 @@
         {
             case TMTIMERSTATE_STOPPED:
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
                 break;
 
@@ -1407,4 +1407,5 @@
              * This shouldn't happen as the caller should make sure there are no races.
              */
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
             case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
             case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
@@ -1803,5 +1804,5 @@
               pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
         bool fRc;
-        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
+        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc);
         if (fRc)
         {
@@ -1822,6 +1823,6 @@
             pTimer->offPrev = 0;
 
-
             /* fire */
+            TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
 //            tmUnlock(pVM);
             switch (pTimer->enmType)
@@ -1843,5 +1844,5 @@
 
             /* change the state if it wasn't changed already in the handler. */
-            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
+            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
             Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
         }
@@ -1902,7 +1903,7 @@
     {
         /* Calc 'now'. */
-        bool     fStopCatchup  = false;
-        bool     fUpdateStuff  = false;
-        uint64_t off           = pVM->tm.s.offVirtualSync;
+        bool        fStopCatchup   = false;
+        bool        fUpdateStuff   = false;
+        uint64_t    off            = pVM->tm.s.offVirtualSync;
         if (pVM->tm.s.fVirtualSyncCatchUp)
         {
@@ -1914,5 +1915,5 @@
                 {
                     off -= u64Sub;
-                    Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
+                    Log4(("TM: %'RU64/-%'8RU64: sub %'RU64 [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
                 }
                 else
@@ -1921,5 +1922,4 @@
                     fStopCatchup = true;
                     off = offSyncGivenUp;
-                    Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
                 }
             }
@@ -1935,5 +1935,5 @@
             ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
             ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
-            Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
+            Log4(("TM: %'RU64/-%'8RU64: exp tmr [tmR3TimerQueueRunVirtualSync]\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
         }
         else if (fUpdateStuff)
@@ -1942,5 +1942,8 @@
             ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow);
             if (fStopCatchup)
+            {
                 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
+                Log4(("TM: %'RU64/0: caught up [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow));
+            }
         }
     }
@@ -1970,5 +1973,5 @@
               pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
         bool fRc;
-        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
+        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc);
         if (fRc)
         {
@@ -1989,5 +1992,5 @@
             /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
 #ifdef VBOX_STRICT
-            AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
+            AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
             u64Prev = pTimer->u64Expire;
 #endif
@@ -1996,4 +1999,5 @@
 
             /* fire */
+            TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
             switch (pTimer->enmType)
             {
@@ -2008,5 +2012,5 @@
 
             /* change the state if it wasn't changed already in the handler. */
-            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
+            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
             Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
         }
@@ -2025,5 +2029,5 @@
         const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM);
         Assert(u64VirtualNow2 >= u64VirtualNow);
-        AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
+        AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
         const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
         STAM_STATS({
@@ -2068,5 +2072,5 @@
                 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
                 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
-                Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
+                Log4(("TM: %'RU64/-%'8RU64: caught up [pt]\n", u64VirtualNow2 - offNew, offLag));
             }
             else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
@@ -2081,5 +2085,5 @@
                     STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
                     ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
-                    Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+                    Log4(("TM: %'RU64/%'8RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
                 }
                 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
@@ -2092,6 +2096,6 @@
                 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
                 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
-                Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
-                LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
+                Log4(("TM: %'RU64/%'8RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+                LogRel(("TM: Giving up catch-up attempt at a %'RU64 ns lag; new total: %'RU64 ns\n", offLag, offNew));
             }
         }
@@ -2109,5 +2113,5 @@
                 ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
                 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
-                Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
+                Log4(("TM: %'RU64/%'8RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
             }
             else
@@ -2116,6 +2120,6 @@
                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
                 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
-                Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
-                LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
+                Log4(("TM: %'RU64/%'8RU64: give up\n", u64VirtualNow2 - offNew, offLag));
+                LogRel(("TM: Not bothering to attempt catching up a %'RU64 ns lag; new total: %'RU64\n", offLag, offNew));
             }
         }
@@ -2231,5 +2235,6 @@
             return SSMR3PutU64(pSSM, pTimer->u64Expire);
 
-        case TMTIMERSTATE_EXPIRED:
+        case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+        case TMTIMERSTATE_EXPIRED_DELIVER:
         case TMTIMERSTATE_DESTROY:
         case TMTIMERSTATE_FREE:
Index: /trunk/src/VBox/VMM/TMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/TMInternal.h	(revision 20049)
+++ /trunk/src/VBox/VMM/TMInternal.h	(revision 20050)
@@ -70,6 +70,8 @@
     /** Timer is active. */
     TMTIMERSTATE_ACTIVE,
-    /** Timer is expired, is being delivered. */
-    TMTIMERSTATE_EXPIRED,
+    /** Timer is expired, getting expire and unlinking. */
+    TMTIMERSTATE_EXPIRED_GET_UNLINK,
+    /** Timer is expired and is being delivered. */
+    TMTIMERSTATE_EXPIRED_DELIVER,
 
     /** Timer is stopped but still in the active list.
Index: /trunk/src/VBox/VMM/VMMAll/TMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 20049)
+++ /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 20050)
@@ -466,5 +466,5 @@
 #endif
         }
-        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
+        LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
         return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
     }
@@ -514,5 +514,5 @@
 
                 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
-                LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
+                LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
                 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
             }
@@ -612,5 +612,5 @@
         }
         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
-        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
+        LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
         return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
     }
@@ -740,9 +740,9 @@
          */
         TMTIMERSTATE    enmState = pTimer->enmState;
-        Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%llu\n",
+        Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
               pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
         switch (enmState)
         {
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
             case TMTIMERSTATE_STOPPED:
                 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
@@ -753,5 +753,5 @@
                               ||    pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
                               ||    u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
-                              ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
+                              ("%'RU64 < %'RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
                     pTimer->u64Expire = u64Expire;
                     TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
@@ -799,4 +799,5 @@
 
 
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
             case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
             case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
@@ -953,5 +954,5 @@
         switch (enmState)
         {
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
                 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
                 return VERR_INVALID_PARAMETER;
@@ -989,4 +990,5 @@
                 break;
 
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
             case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
             case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
@@ -1051,5 +1053,5 @@
             return ~(uint64_t)0;
     }
-    //Log2(("TMTimerGet: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
+    //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
     //      u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
     return u64;
@@ -1330,5 +1332,6 @@
         switch (enmState)
         {
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
             case TMTIMERSTATE_STOPPED:
             case TMTIMERSTATE_PENDING_STOP:
@@ -1341,5 +1344,5 @@
             case TMTIMERSTATE_PENDING_RESCHEDULE:
             case TMTIMERSTATE_PENDING_SCHEDULE:
-                Log2(("TMTimerGetExpire: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
+                Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
                       pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
                 return pTimer->u64Expire;
@@ -1388,5 +1391,6 @@
     {
         case TMTIMERSTATE_STOPPED:
-        case TMTIMERSTATE_EXPIRED:
+        case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+        case TMTIMERSTATE_EXPIRED_DELIVER:
         case TMTIMERSTATE_PENDING_STOP:
         case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
@@ -1436,13 +1440,14 @@
         CASE( 1,STOPPED);
         CASE( 2,ACTIVE);
-        CASE( 3,EXPIRED);
-        CASE( 4,PENDING_STOP);
-        CASE( 5,PENDING_STOP_SCHEDULE);
-        CASE( 6,PENDING_SCHEDULE_SET_EXPIRE);
-        CASE( 7,PENDING_SCHEDULE);
-        CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE);
-        CASE( 9,PENDING_RESCHEDULE);
-        CASE(10,DESTROY);
-        CASE(11,FREE);
+        CASE( 3,EXPIRED_GET_UNLINK);
+        CASE( 4,EXPIRED_DELIVER);
+        CASE( 5,PENDING_STOP);
+        CASE( 6,PENDING_STOP_SCHEDULE);
+        CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
+        CASE( 8,PENDING_SCHEDULE);
+        CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
+        CASE(10,PENDING_RESCHEDULE);
+        CASE(11,DESTROY);
+        CASE(12,FREE);
         default:
             AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
@@ -1597,5 +1602,6 @@
             case TMTIMERSTATE_STOPPED:
             case TMTIMERSTATE_ACTIVE:
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
             default:
                 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
@@ -1623,5 +1629,5 @@
      */
     int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
-    Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32}\n", pQueue, pQueue->enmClock, offNext));
+    Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
     if (!offNext)
         return;
@@ -1644,4 +1650,5 @@
         Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
     } /* foreach timer in current schedule batch. */
+    Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
 }
 
@@ -1720,5 +1727,5 @@
             case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
             case TMTIMERSTATE_STOPPED:
-            case TMTIMERSTATE_EXPIRED:
+            case TMTIMERSTATE_EXPIRED_DELIVER:
             {
                 Assert(!pCur->offNext);
@@ -1740,4 +1747,5 @@
 
             /* shouldn't get here! */
+            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
             case TMTIMERSTATE_DESTROY:
             default:
Index: /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 20049)
+++ /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 20050)
@@ -419,5 +419,5 @@
         {
             off -= u64Sub;
-            Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
+            Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
         }
         else
@@ -427,5 +427,5 @@
             off = pVM->tm.s.offVirtualSyncGivenUp;
             fStop = true;
-            Log4(("TM: %RU64/0: caught up\n", u64));
+            Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
         }
     }
@@ -465,5 +465,5 @@
         VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
         Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
-        Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+        Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
         tmVirtualSyncUnlock(pVM);
 
@@ -477,4 +477,5 @@
     STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
 
+    Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
     return u64;
 }
@@ -498,4 +499,5 @@
         tmVirtualSyncUnlock(pVM);
         STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+        Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
         return u64;
     }
@@ -526,6 +528,6 @@
         PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
         VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
-        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
-        Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
+        Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
         tmVirtualSyncUnlock(pVM);
 
@@ -538,4 +540,5 @@
     }
     STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
+    Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
     return u64;
 }
@@ -596,4 +599,5 @@
                 {
                     STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
+                    Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
                     return off;
                 }
@@ -607,4 +611,5 @@
         {
             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
+            Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
             return off;
         }
@@ -645,4 +650,5 @@
                 && cOuterTries > 0)
                 continue;
+            Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
             return off;
         }
@@ -668,5 +674,5 @@
                     {
                         off -= u64Sub;
-                        Log4(("TM: %RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
+                        Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
                     }
                     else
@@ -675,5 +681,5 @@
                         STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
                         off = offGivenUp;
-                        Log4(("TM: %RU64/0: caught up\n", u64));
+                        Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
                     }
                 }
@@ -719,11 +725,12 @@
 #endif
             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
-            Log4(("TM: %RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+            Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
         }
         else
-            Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
+            Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
         STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
     }
 
+    Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
     return u64;
 }
