Index: /trunk/include/VBox/pdmcritsect.h
===================================================================
--- /trunk/include/VBox/pdmcritsect.h	(revision 20007)
+++ /trunk/include/VBox/pdmcritsect.h	(revision 20008)
@@ -65,6 +65,6 @@
 VMMDECL(void)       PDMCritSectLeave(PPDMCRITSECT pCritSect);
 VMMDECL(bool)       PDMCritSectIsOwner(PCPDMCRITSECT pCritSect);
-VMMDECL(bool)       PDMCritSectIsLocked(PCPDMCRITSECT pCritSect);
 VMMDECL(bool)       PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu);
+VMMDECL(bool)       PDMCritSectIsOwned(PCPDMCRITSECT pCritSect);
 VMMDECL(bool)       PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect);
 VMMDECL(uint32_t)   PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect);
@@ -72,5 +72,5 @@
 VMMR3DECL(int)      PDMR3CritSectDelete(PPDMCRITSECT pCritSect);
 VMMDECL(int)        PDMR3CritSectTerm(PVM pVM);
-VMMR3DECL(void)     PDMR3CritSectFF(PVMCPU pVCpu);
+VMMDECL(void)       PDMCritSectFF(PVMCPU pVCpu);
 VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNames);
 
Index: /trunk/include/iprt/critsect.h
===================================================================
--- /trunk/include/iprt/critsect.h	(revision 20007)
+++ /trunk/include/iprt/critsect.h	(revision 20008)
@@ -235,4 +235,41 @@
 RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect);
 
+/**
+ * Checks the caller is the owner of the critical section.
+ *
+ * @returns true if owner.
+ * @returns false if not owner.
+ * @param   pCritSect   The critical section.
+ */
+DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect)
+{
+    return pCritSect->NativeThreadOwner == RTThreadNativeSelf();
+}
+
+#endif /* IN_RING3 */
+
+/**
+ * Checks the section is owned by anyone.
+ *
+ * @returns true if owned.
+ * @returns false if not owned.
+ * @param   pCritSect   The critical section.
+ */
+DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect)
+{
+    return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD;
+}
+
+/**
+ * Gets the thread id of the critical section owner.
+ *
+ * @returns Thread id of the owner thread if owned.
+ * @returns NIL_RTNATIVETHREAD is not owned.
+ * @param   pCritSect   The critical section.
+ */
+DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect)
+{
+    return pCritSect->NativeThreadOwner;
+}
 
 /**
@@ -248,45 +285,4 @@
 }
 
-
-/**
- * Checks the caller is the owner of the critical section.
- *
- * @returns true if owner.
- * @returns false if not owner.
- * @param   pCritSect   The critical section.
- */
-DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect)
-{
-    return pCritSect->NativeThreadOwner == RTThreadNativeSelf();
-}
-
-
-/**
- * Checks the section is owned by anyone.
- *
- * @returns true if owned.
- * @returns false if not owned.
- * @param   pCritSect   The critical section.
- */
-DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect)
-{
-    return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD;
-}
-
-
-/**
- * Gets the thread id of the critical section owner.
- *
- * @returns Thread id of the owner thread if owned.
- * @returns NIL_RTNATIVETHREAD is not owned.
- * @param   pCritSect   The critical section.
- */
-DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect)
-{
-    return pCritSect->NativeThreadOwner;
-}
-
-#endif /* IN_RING3 */
-
 /**
  * Gets the recursion depth.
Index: /trunk/include/iprt/thread.h
===================================================================
--- /trunk/include/iprt/thread.h	(revision 20007)
+++ /trunk/include/iprt/thread.h	(revision 20008)
@@ -47,4 +47,45 @@
  * @{
  */
+
+/**
+ * The thread state.
+ */
+typedef enum RTTHREADSTATE
+{
+    /** The usual invalid 0 value. */
+    RTTHREADSTATE_INVALID = 0,
+    /** The thread is being initialized. */
+    RTTHREADSTATE_INITIALIZING,
+    /** The thread has terminated */
+    RTTHREADSTATE_TERMINATED,
+    /** Probably running. */
+    RTTHREADSTATE_RUNNING,
+    /** Waiting on a critical section. */
+    RTTHREADSTATE_CRITSECT,
+    /** Waiting on a mutex. */
+    RTTHREADSTATE_MUTEX,
+    /** Waiting on a event semaphore. */
+    RTTHREADSTATE_EVENT,
+    /** Waiting on a event multiple wakeup semaphore. */
+    RTTHREADSTATE_EVENTMULTI,
+    /** Waiting on a read write semaphore, read (shared) access. */
+    RTTHREADSTATE_RW_READ,
+    /** Waiting on a read write semaphore, write (exclusive) access. */
+    RTTHREADSTATE_RW_WRITE,
+    /** The thread is sleeping. */
+    RTTHREADSTATE_SLEEP,
+    /** The usual 32-bit size hack. */
+    RTTHREADSTATE_32BIT_HACK = 0x7fffffff
+} RTTHREADSTATE;
+
+/** Checks if a thread state indicates that the thread is sleeping. */
+#define RTTHREAD_IS_SLEEPING(enmState) (    (enmState) == RTTHREADSTATE_CRITSECT \
+                                        ||  (enmState) == RTTHREADSTATE_MUTEX \
+                                        ||  (enmState) == RTTHREADSTATE_EVENT \
+                                        ||  (enmState) == RTTHREADSTATE_EVENTMULTI \
+                                        ||  (enmState) == RTTHREADSTATE_RW_READ \
+                                        ||  (enmState) == RTTHREADSTATE_RW_WRITE \
+                                        ||  (enmState) == RTTHREADSTATE_SLEEP \
+                                       )
 
 /**
@@ -534,4 +575,31 @@
 RTDECL(void) RTThreadReadLockDec(RTTHREAD Thread);
 
+/**
+ * Unblocks a thread.
+ *
+ * This function is paired with rtThreadBlocking.
+ *
+ * @param   hThread     The current thread.
+ * @param   enmCurState The current state, used to check for nested blocking.
+ *                      The new state will be running.
+ */
+RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState);
+
+/**
+ * Change the thread state to blocking and do deadlock detection.
+ *
+ * This is a RT_STRICT method for debugging locks and detecting deadlocks.
+ *
+ * @param   hThread     The current thread.
+ * @param   enmState    The sleep state.
+ * @param   u64Block    The block data. A pointer or handle.
+ * @param   pszFile     Where we are blocking.
+ * @param   uLine       Where we are blocking.
+ * @param   uId         Where we are blocking.
+ */
+RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block,
+                              const char *pszFile, unsigned uLine, RTUINTPTR uId);
+
+
 
 /** @name Thread Local Storage
Index: /trunk/src/VBox/Runtime/common/misc/thread.cpp
===================================================================
--- /trunk/src/VBox/Runtime/common/misc/thread.cpp	(revision 20007)
+++ /trunk/src/VBox/Runtime/common/misc/thread.cpp	(revision 20008)
@@ -1406,5 +1406,5 @@
  * This is a RT_STRICT method for debugging locks and detecting deadlocks.
  *
- * @param   pThread     This thread.
+ * @param   hThread     The current thread.
  * @param   enmState    The sleep state.
  * @param   u64Block    The block data. A pointer or handle.
@@ -1413,7 +1413,8 @@
  * @param   uId         Where we are blocking.
  */
-void rtThreadBlocking(PRTTHREADINT pThread, RTTHREADSTATE enmState, uint64_t u64Block,
-                     const char *pszFile, unsigned uLine, RTUINTPTR uId)
-{
+RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block,
+                              const char *pszFile, unsigned uLine, RTUINTPTR uId)
+{
+    PRTTHREADINT pThread = hThread;
     Assert(RTTHREAD_IS_SLEEPING(enmState));
     if (pThread && pThread->enmState == RTTHREADSTATE_RUNNING)
@@ -1502,12 +1503,12 @@
  * This function is paired with rtThreadBlocking.
  *
- * @param   pThread     The current thread.
+ * @param   hThread     The current thread.
  * @param   enmCurState The current state, used to check for nested blocking.
  *                      The new state will be running.
  */
-void rtThreadUnblocked(PRTTHREADINT pThread, RTTHREADSTATE enmCurState)
-{
-    if (pThread && pThread->enmState == enmCurState)
-        ASMAtomicWriteSize(&pThread->enmState, RTTHREADSTATE_RUNNING);
+RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState)
+{
+    if (hThread && hThread->enmState == enmCurState)
+        ASMAtomicWriteSize(&hThread->enmState, RTTHREADSTATE_RUNNING);
 }
 
Index: /trunk/src/VBox/Runtime/generic/critsect-generic.cpp
===================================================================
--- /trunk/src/VBox/Runtime/generic/critsect-generic.cpp	(revision 20007)
+++ /trunk/src/VBox/Runtime/generic/critsect-generic.cpp	(revision 20008)
@@ -251,10 +251,10 @@
      */
     pCritSect->cNestings = 1;
-    ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
+    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
 #ifdef RTCRITSECT_STRICT
     pCritSect->Strict.pszEnterFile = pszFile;
     pCritSect->Strict.u32EnterLine = uLine;
     pCritSect->Strict.uEnterId     = uId;
-    ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
+    ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
 #endif
 
@@ -319,9 +319,9 @@
         {
 #ifdef RTCRITSECT_STRICT
-            rtThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
+            RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
 #endif
             int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
 #ifdef RTCRITSECT_STRICT
-            rtThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
+            RTThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
 #endif
             if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
@@ -338,10 +338,10 @@
      */
     pCritSect->cNestings = 1;
-    ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
+    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
 #ifdef RTCRITSECT_STRICT
     pCritSect->Strict.pszEnterFile = pszFile;
     pCritSect->Strict.u32EnterLine = uLine;
     pCritSect->Strict.uEnterId     = uId;
-    ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
+    ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
     RTThreadWriteLockInc(ThreadSelf);
 #endif
@@ -383,7 +383,7 @@
         if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */
             RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner);
-        ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
-#endif
-        ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
+        ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
+#endif
+        ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
         if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
         {
Index: /trunk/src/VBox/Runtime/include/internal/thread.h
===================================================================
--- /trunk/src/VBox/Runtime/include/internal/thread.h	(revision 20007)
+++ /trunk/src/VBox/Runtime/include/internal/thread.h	(revision 20008)
@@ -45,45 +45,4 @@
 
 
-/**
- * The thread state.
- */
-typedef enum RTTHREADSTATE
-{
-    /** The usual invalid 0 value. */
-    RTTHREADSTATE_INVALID = 0,
-    /** The thread is being initialized. */
-    RTTHREADSTATE_INITIALIZING,
-    /** The thread has terminated */
-    RTTHREADSTATE_TERMINATED,
-    /** Probably running. */
-    RTTHREADSTATE_RUNNING,
-    /** Waiting on a critical section. */
-    RTTHREADSTATE_CRITSECT,
-    /** Waiting on a mutex. */
-    RTTHREADSTATE_MUTEX,
-    /** Waiting on a event semaphore. */
-    RTTHREADSTATE_EVENT,
-    /** Waiting on a event multiple wakeup semaphore. */
-    RTTHREADSTATE_EVENTMULTI,
-    /** Waiting on a read write semaphore, read (shared) access. */
-    RTTHREADSTATE_RW_READ,
-    /** Waiting on a read write semaphore, write (exclusive) access. */
-    RTTHREADSTATE_RW_WRITE,
-    /** The thread is sleeping. */
-    RTTHREADSTATE_SLEEP,
-    /** The usual 32-bit size hack. */
-    RTTHREADSTATE_32BIT_HACK = 0x7fffffff
-} RTTHREADSTATE;
-
-
-/** Checks if a thread state indicates that the thread is sleeping. */
-#define RTTHREAD_IS_SLEEPING(enmState) (    (enmState) == RTTHREADSTATE_CRITSECT \
-                                        ||  (enmState) == RTTHREADSTATE_MUTEX \
-                                        ||  (enmState) == RTTHREADSTATE_EVENT \
-                                        ||  (enmState) == RTTHREADSTATE_EVENTMULTI \
-                                        ||  (enmState) == RTTHREADSTATE_RW_READ \
-                                        ||  (enmState) == RTTHREADSTATE_RW_WRITE \
-                                        ||  (enmState) == RTTHREADSTATE_SLEEP \
-                                       )
 
 /** Max thread name length. */
Index: /trunk/src/VBox/VMM/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/EM.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/EM.cpp	(revision 20008)
@@ -3298,5 +3298,5 @@
 {
     if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
-        PDMR3CritSectFF(pVCpu);
+        PDMCritSectFF(pVCpu);
 
     if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
Index: /trunk/src/VBox/VMM/PDM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PDM.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/PDM.cpp	(revision 20008)
@@ -335,38 +335,31 @@
      * Initialize sub compontents.
      */
-    int rc = pdmR3CritSectInit(pVM);
+    int rc = RTCritSectInit(&pVM->pdm.s.MiscCritSect);
     if (RT_SUCCESS(rc))
-    {
+        rc = pdmR3CritSectInit(pVM);
+    if (RT_SUCCESS(rc))
         rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.CritSect, "PDM");
+    if (RT_SUCCESS(rc))
+        rc = pdmR3LdrInitU(pVM->pUVM);
+    if (RT_SUCCESS(rc))
+        rc = pdmR3DrvInit(pVM);
+    if (RT_SUCCESS(rc))
+        rc = pdmR3DevInit(pVM);
+#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+    if (RT_SUCCESS(rc))
+        rc = pdmR3AsyncCompletionInit(pVM);
+#endif
+    if (RT_SUCCESS(rc))
+    {
+        /*
+         * Register the saved state data unit.
+         */
+        rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128,
+                                   NULL, pdmR3Save, NULL,
+                                   pdmR3LoadPrep, pdmR3Load, NULL);
         if (RT_SUCCESS(rc))
-            rc = pdmR3LdrInitU(pVM->pUVM);
-        if (RT_SUCCESS(rc))
-        {
-            rc = pdmR3DrvInit(pVM);
-            if (RT_SUCCESS(rc))
-            {
-                rc = pdmR3DevInit(pVM);
-                if (RT_SUCCESS(rc))
-                {
-#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
-                    rc = pdmR3AsyncCompletionInit(pVM);
-                    if (RT_SUCCESS(rc))
-#endif
-                    {
-                        /*
-                         * Register the saved state data unit.
-                         */
-                        rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128,
-                                                   NULL, pdmR3Save, NULL,
-                                                   pdmR3LoadPrep, pdmR3Load, NULL);
-                        if (RT_SUCCESS(rc))
-                        {
-                            LogFlow(("PDM: Successfully initialized\n"));
-                            return rc;
-                        }
-
-                    }
-                }
-            }
+        {
+            LogFlow(("PDM: Successfully initialized\n"));
+            return rc;
         }
     }
@@ -600,4 +593,5 @@
      */
     PDMR3CritSectDelete(&pVM->pdm.s.CritSect);
+    /* The MiscCritSect is deleted by PDMR3CritSectTerm. */
 
     LogFlow(("PDMR3Term: returns %Rrc\n", VINF_SUCCESS));
Index: /trunk/src/VBox/VMM/PDMCritSect.cpp
===================================================================
--- /trunk/src/VBox/VMM/PDMCritSect.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/PDMCritSect.cpp	(revision 20008)
@@ -20,5 +20,4 @@
  */
 
-//#define PDM_WITH_R3R0_CRIT_SECT
 
 /*******************************************************************************
@@ -33,7 +32,5 @@
 #include <VBox/err.h>
 #include <VBox/log.h>
-#ifdef PDM_WITH_R3R0_CRIT_SECT
-# include <VBox/sup.h>
-#endif
+#include <VBox/sup.h>
 #include <iprt/asm.h>
 #include <iprt/assert.h>
@@ -72,8 +69,10 @@
 void pdmR3CritSectRelocate(PVM pVM)
 {
+    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
     for (PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
          pCur;
          pCur = pCur->pNext)
         pCur->pVMRC = pVM->pVMRC;
+    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
 }
 
@@ -92,4 +91,5 @@
 {
     int rc = VINF_SUCCESS;
+    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
     while (pVM->pdm.s.pCritSects)
     {
@@ -99,4 +99,6 @@
             rc = rc2;
     }
+    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
+    RTCritSectDelete(&pVM->pdm.s.MiscCritSect);
     return rc;
 }
@@ -117,5 +119,4 @@
     VM_ASSERT_EMT(pVM);
 
-#ifdef PDM_WITH_R3R0_CRIT_SECT
     /*
      * Allocate the semaphore.
@@ -123,10 +124,6 @@
     AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.EventSem));
     int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.EventSem);
-#else
-    int rc = RTCritSectInit(&pCritSect->Core);
-#endif
     if (RT_SUCCESS(rc))
     {
-#ifdef PDM_WITH_R3R0_CRIT_SECT
         /*
          * Initialize the structure (first bit is c&p from RTCritSectInitEx).
@@ -141,5 +138,4 @@
         pCritSect->Core.Strict.u32EnterLine  = 0;
         pCritSect->Core.Strict.uEnterId      = 0;
-#endif
         pCritSect->pVMR3                     = pVM;
         pCritSect->pVMR0                     = pVM->pVMR0;
@@ -172,4 +168,5 @@
  * @param   pCritSect       Pointer to the critical section.
  * @param   pszName         The name of the critical section (for statistics).
+ * @thread  EMT(0)
  */
 VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, const char *pszName)
@@ -205,12 +202,14 @@
  *
  * @returns Return code from RTCritSectDelete.
+ *
  * @param   pVM         The VM handle.
  * @param   pCritSect   The critical section.
  * @param   pPrev       The previous critical section in the list.
  * @param   fFinal      Set if this is the final call and statistics shouldn't be deregistered.
+ *
+ * @remarks Caller must've entered the MiscCritSect.
  */
 static int pdmR3CritSectDeleteOne(PVM pVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal)
 {
-#ifdef PDM_WITH_R3R0_CRIT_SECT
     /*
      * Assert free waiters and so on (c&p from RTCritSectDelete).
@@ -220,5 +219,5 @@
     Assert(pCritSect->Core.cLockers == -1);
     Assert(pCritSect->Core.NativeThreadOwner == NIL_RTNATIVETHREAD);
-#endif
+    Assert(RTCritSectIsOwner(&pVM->pdm.s.MiscCritSect));
 
     /*
@@ -234,5 +233,4 @@
      * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
      */
-#ifdef PDM_WITH_R3R0_CRIT_SECT
     ASMAtomicWriteU32(&pCritSect->Core.u32Magic, 0);
     SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->Core.EventSem;
@@ -243,5 +241,4 @@
     int rc = SUPSemEventClose(pVM->pSession, hEvent);
     AssertRC(rc);
-#endif
     pCritSect->pNext   = NULL;
     pCritSect->pvKey   = NULL;
@@ -260,7 +257,4 @@
 #endif
     }
-#ifndef PDM_WITH_R3R0_CRIT_SECT
-    int rc = RTCritSectDelete(&pCritSect->Core);
-#endif
     return rc;
 }
@@ -282,7 +276,8 @@
      * Iterate the list and match key.
      */
-    int             rc = VINF_SUCCESS;
+    int             rc    = VINF_SUCCESS;
     PPDMCRITSECTINT pPrev = NULL;
-    PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
+    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
+    PPDMCRITSECTINT pCur  = pVM->pdm.s.pCritSects;
     while (pCur)
     {
@@ -299,4 +294,5 @@
         pCur = pCur->pNext;
     }
+    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
     return rc;
 }
@@ -330,12 +326,17 @@
      * Find and unlink it.
      */
-    PVM             pVM = pCritSect->s.pVMR3;
+    PVM             pVM   = pCritSect->s.pVMR3;
     AssertReleaseReturn(pVM, VERR_INTERNAL_ERROR);
     PPDMCRITSECTINT pPrev = NULL;
-    PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
+    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
+    PPDMCRITSECTINT pCur  = pVM->pdm.s.pCritSects;
     while (pCur)
     {
         if (pCur == &pCritSect->s)
-            return pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */);
+        {
+            int rc = pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */);
+            RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
+            return rc;
+        }
 
         /* next */
@@ -343,33 +344,7 @@
         pCur = pCur->pNext;
     }
+    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
     AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect));
     return VERR_INTERNAL_ERROR;
-}
-
-
-/**
- * Process the critical sections queued for ring-3 'leave'.
- *
- * @param   pVCpu         The VMCPU handle.
- */
-VMMR3DECL(void) PDMR3CritSectFF(PVMCPU pVCpu)
-{
-    Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
-
-    const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
-    for (RTUINT i = 0; i < c; i++)
-    {
-        PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
-#ifdef PDM_WITH_R3R0_CRIT_SECT
-        int rc = pdmCritSectLeave(pCritSect);
-#else
-        int rc = RTCritSectLeave(&pCritSect->s.Core);
-#endif
-        LogFlow(("PDMR3CritSectFF: %p - %Rrc\n", pCritSect, rc));
-        AssertRC(rc);
-    }
-
-    pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
-    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
 }
 
@@ -382,4 +357,5 @@
  * @returns VERR_NOT_OWNER if we're not the critsect owner.
  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
+ *
  * @param   pCritSect       The critical section.
  * @param   EventToSignal   The semapore that should be signalled.
Index: /trunk/src/VBox/VMM/PDMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PDMInternal.h	(revision 20007)
+++ /trunk/src/VBox/VMM/PDMInternal.h	(revision 20008)
@@ -43,4 +43,11 @@
  * @{
  */
+
+/** @def PDM_WITH_R3R0_CRIT_SECT
+ * Enables or disabled ring-3/ring-0 critical sections. */
+#if defined(DOXYGEN_RUNNING) || 1
+# define PDM_WITH_R3R0_CRIT_SECT
+#endif
+
 
 /*******************************************************************************
@@ -227,4 +234,8 @@
 } PDMCRITSECTINT;
 typedef PDMCRITSECTINT *PPDMCRITSECTINT;
+
+/** Indicates that the critical section is queued for unlock.
+ * PDMCritSectIsOwner and PDMCritSectIsOwned optimizations. */
+#define PDMCRITSECT_FLAGS_PENDING_UNLOCK    RT_BIT_32(17)
 
 
@@ -805,6 +816,6 @@
 {
     /** The number of entries in the apQueuedCritSectsLeaves table that's currnetly in use. */
-    RTUINT                          cQueuedCritSectLeaves;
-    RTUINT                          uPadding0; /**< Alignment padding.*/
+    uint32_t                        cQueuedCritSectLeaves;
+    uint32_t                        uPadding0; /**< Alignment padding.*/
     /** Critical sections queued in RC/R0 because of contention preventing leave to complete. (R3 Ptrs)
      * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
@@ -907,4 +918,9 @@
      * the PIC, APIC, IOAPIC and PCI devices pluss some PDM functions. */
     PDMCRITSECT                     CritSect;
+    /** The PDM miscellancous lock.
+     * This is used to protect things like critsect init/delete that formerly was
+     * serialized by there only being one EMT.
+     */
+    RTCRITSECT                      MiscCritSect;
 
     /** Number of times a critical section leave requesed needed to be queued for ring-3 execution. */
Index: /trunk/src/VBox/VMM/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMM.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/VMM.cpp	(revision 20008)
@@ -1550,9 +1550,10 @@
 static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu)
 {
-    /* We must also check for pending releases or else we can deadlock when acquiring a new lock here. 
-     * On return we go straight back to R0/GC.
+    /*
+     * We must also check for pending critsect exits or else we can deadlock
+     * when entering other critsects here.
      */
     if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
-        PDMR3CritSectFF(pVCpu);
+        PDMCritSectFF(pVCpu);
 
     switch (pVCpu->vmm.s.enmCallHostOperation)
Index: /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp	(revision 20008)
@@ -40,4 +40,120 @@
 
 
+/*******************************************************************************
+*   Defined Constants And Macros                                               *
+*******************************************************************************/
+/** The number loops to spin for in ring-3. */
+#define PDMCRITSECT_SPIN_COUNT_R3       20
+/** The number loops to spin for in ring-0. */
+#define PDMCRITSECT_SPIN_COUNT_R0       256
+/** The number loops to spin for in the raw-mode context. */
+#define PDMCRITSECT_SPIN_COUNT_RC       256
+
+/** @def PDMCRITSECT_STRICT
+ * Enables/disables PDM critsect strictness like deadlock detection. */
+#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
+# define PDMCRITSECT_STRICT
+#endif
+
+
+/**
+ * Gets the ring-3 native thread handle of the calling thread.
+ *
+ * @returns native thread handle (ring-3).
+ * @param   pCritSect           The critical section. This is used in R0 and RC.
+ */
+DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
+{
+#ifdef IN_RING3
+    NOREF(pCritSect);
+    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
+#else
+    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
+                    VERR_SEM_DESTROYED);
+    PVM             pVM         = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
+    PVMCPU          pVCpu       = VMMGetCpu(pVM);             AssertPtr(pVCpu);
+    RTNATIVETHREAD  hNativeSelf = pVCpu->hNativeThread;       Assert(hNativeSelf != NIL_RTNATIVETHREAD);
+#endif
+    return hNativeSelf;
+}
+
+
+/**
+ * Tail code called when we've wont the battle for the lock.
+ *
+ * @returns VINF_SUCCESS.
+ *
+ * @param   pCritSect       The critical section.
+ * @param   hNativeSelf     The native handle of this thread.
+ */
+DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
+{
+    AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
+    Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
+
+    pCritSect->s.Core.cNestings = 1;
+    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
+
+# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
+    pCritSect->s.Core.Strict.pszEnterFile = NULL;
+    pCritSect->s.Core.Strict.u32EnterLine = 0;
+    pCritSect->s.Core.Strict.uEnterId     = 0;
+    RTTHREAD hSelf = RTThreadSelf();
+    ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
+    RTThreadWriteLockInc(hSelf);
+# endif
+
+    STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
+    return VINF_SUCCESS;
+}
+
+
+#ifdef IN_RING3
+/**
+ * Deals with the contended case in ring-3.
+ *
+ * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
+ * @param   pCritSect           The critsect.
+ * @param   hNativeSelf         The native thread handle.
+ */
+static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
+{
+    /*
+     * Start waiting.
+     */
+    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
+        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
+    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
+
+    /*
+     * The wait loop.
+     */
+    PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
+    SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
+# ifdef PDMCRITSECT_STRICT
+    RTTHREAD        hSelf    = RTThreadSelf();
+    if (hSelf == NIL_RTTHREAD)
+        RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
+# endif
+    for (;;)
+    {
+# ifdef PDMCRITSECT_STRICT
+        RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
+# endif
+        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
+# ifdef PDMCRITSECT_STRICT
+        RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
+# endif
+        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
+            return VERR_SEM_DESTROYED;
+        if (rc == VINF_SUCCESS)
+            return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
+        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
+    }
+    /* won't get here */
+}
+#endif /* IN_RING3 */
+
+
 /**
  * Enters a PDM critical section.
@@ -54,50 +170,53 @@
 {
     Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
-#ifdef IN_RING3
-    NOREF(rcBusy);
-
-    STAM_REL_STATS({if (pCritSect->s.Core.cLockers >= 0 && !RTCritSectIsOwner(&pCritSect->s.Core))
-                        STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); });
-    int rc = RTCritSectEnter(&pCritSect->s.Core);
-    STAM_STATS({ if (pCritSect->s.Core.cNestings == 1) STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); });
-    return rc;
-
-#else  /* !IN_RING3 */
-    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
-                    VERR_SEM_DESTROYED);
-    PVM pVM = pCritSect->s.CTX_SUFF(pVM);
-    Assert(pVM);
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    Assert(pVCpu);
-
-    /*
-     * Try to take the lock.
-     */
+
+    /*
+     * If the critical section has already been destroyed, then inform the caller.
+     */
+    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
+
+    /*
+     * See if we're lucky.
+     */
+    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
+    /* Not owned ... */
     if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
-    {
-        pCritSect->s.Core.cNestings = 1;
-        Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
-        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
-        STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
+        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
+
+    /* ... or nested. */
+    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
+    {
+        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
+        pCritSect->s.Core.cNestings++;
+        pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
         return VINF_SUCCESS;
     }
 
     /*
-     * Nested?
-     */
-    if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread)
-    {
-        pCritSect->s.Core.cNestings++;
-        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
-        return VINF_SUCCESS;
-    }
-
-    /*
-     * Failed.
-     */
+     * Spin for a bit without incrementing the counter.
+     */
+    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
+     *        cpu systems. */
+    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
+    while (cSpinsLeft-- > 0)
+    {
+        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
+            return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
+        /** @todo need pause/nop instruction here! */
+    }
+
+#ifdef IN_RING3
+    /*
+     * Take the slow path.
+     */
+    return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
+#else
+    /*
+     * Return busy.
+     */
+    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
-    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     return rcBusy;
-#endif /* !IN_RING3 */
+#endif
 }
 
@@ -115,43 +234,38 @@
 VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
 {
-#ifdef IN_RING3
-    return RTCritSectTryEnter(&pCritSect->s.Core);
-#else   /* !IN_RING3 (same code as PDMCritSectEnter except for the log statement) */
-    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
-                    VERR_SEM_DESTROYED);
-    PVM pVM = pCritSect->s.CTX_SUFF(pVM);
-    Assert(pVM);
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    Assert(pVCpu);
-
-    /*
-     * Try to take the lock.
-     */
+    /*
+     * If the critical section has already been destroyed, then inform the caller.
+     */
+    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
+
+    /*
+     * See if we're lucky.
+     */
+    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
+    /* Not owned ... */
     if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
-    {
-        pCritSect->s.Core.cNestings = 1;
-        Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
-        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
-        STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
+        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
+
+    /* ... or nested. */
+    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
+    {
+        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
+        pCritSect->s.Core.cNestings++;
+        pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
         return VINF_SUCCESS;
     }
 
-    /*
-     * Nested?
-     */
-    if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread)
-    {
-        pCritSect->s.Core.cNestings++;
-        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
-        return VINF_SUCCESS;
-    }
-
-    /*
-     * Failed.
-     */
+    /* no spinning */
+
+    /*
+     * Return busy.
+     */
+#ifdef IN_RING3
+    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
+#else
+    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
+#endif
     LogFlow(("PDMCritSectTryEnter: locked\n"));
-    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     return VERR_SEM_BUSY;
-#endif /* !IN_RING3 */
 }
 
@@ -190,52 +304,59 @@
 VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
 {
-#ifdef IN_RING3
-# ifdef VBOX_WITH_STATISTICS
-    if (pCritSect->s.Core.cNestings == 1)
-        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
-# endif
-    RTSEMEVENT EventToSignal = pCritSect->s.EventToSignal;
-    if (RT_LIKELY(EventToSignal == NIL_RTSEMEVENT))
-    {
-        int rc = RTCritSectLeave(&pCritSect->s.Core);
-        AssertRC(rc);
-    }
-    else
-    {
-        pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
-        int rc = RTCritSectLeave(&pCritSect->s.Core);
-        AssertRC(rc);
-        LogBird(("signalling %#x\n", EventToSignal));
-        rc = RTSemEventSignal(EventToSignal);
-        AssertRC(rc);
-    }
-
-#else /* !IN_RING3 */
-    Assert(VALID_PTR(pCritSect));
     Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
-    Assert(pCritSect->s.Core.cNestings > 0);
-    Assert(pCritSect->s.Core.cLockers >= 0);
-    PVM pVM = pCritSect->s.CTX_SUFF(pVM);
-    Assert(pVM);
-
-#ifdef VBOX_STRICT
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    Assert(pVCpu);
-    AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread));
-#endif
-
-    /*
-     * Deal with nested attempts first.
-     * (We're exploiting nesting to avoid queuing multiple R3 leaves for the same section.)
-     */
-    pCritSect->s.Core.cNestings--;
-    if (pCritSect->s.Core.cNestings > 0)
-    {
+    Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
+    Assert(pCritSect->s.Core.cNestings >= 1);
+
+    /*
+     * Nested leave.
+     */
+    if (pCritSect->s.Core.cNestings > 1)
+    {
+        pCritSect->s.Core.cNestings--;
         ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
         return;
     }
-#ifndef VBOX_STRICT
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-#endif
+
+#if defined(IN_RING3) /// @todo enable this later - || defined(IN_RING0)
+    /*
+     * Leave for real.
+     */
+    /* update members. */
+# ifdef IN_RING3
+    RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
+    pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
+#  if defined(PDMCRITSECT_STRICT)
+    if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
+        RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
+    ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
+#  endif
+# endif
+    pCritSect->s.Core.fFlags    &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
+    Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
+    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
+    pCritSect->s.Core.cNestings--;
+
+    /* stop and decrement lockers. */
+    STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
+    if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
+    {
+        /* Someone is waiting, wake up one of them. */
+        SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
+        PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
+        int rc = SUPSemEventSignal(pSession, hEvent);
+        AssertRC(rc);
+    }
+
+# ifdef IN_RING3
+    /* Signal exit event. */
+    if (hEventToSignal != NIL_RTSEMEVENT)
+    {
+        LogBird(("Signalling %#x\n", hEventToSignal));
+        int rc = RTSemEventSignal(hEventToSignal);
+        AssertRC(rc);
+    }
+# endif
+
+#else  /* IN_RC */
     /*
      * Try leave it.
@@ -243,5 +364,9 @@
     if (pCritSect->s.Core.cLockers == 0)
     {
+        pCritSect->s.Core.cNestings  = 0;
+        RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
+        pCritSect->s.Core.fFlags    &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
         STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
+
         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
         if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
@@ -249,14 +374,16 @@
 
         /* darn, someone raced in on us. */
-        Assert(pVCpu->hNativeThread);
-        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
+        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
         STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
-    }
-    pCritSect->s.Core.cNestings = 1;
+        pCritSect->s.Core.cNestings = 1;
+    }
+    pCritSect->s.Core.fFlags |= PDMCRITSECT_FLAGS_PENDING_UNLOCK;
 
     /*
      * Queue the request.
      */
-    RTUINT i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
+    PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
+    PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
+    uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
     LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
     AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
@@ -266,6 +393,35 @@
     STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
     STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
-#endif /* !IN_RING3 */
-}
+#endif /* IN_RC */
+}
+
+
+#if defined(IN_RING3) || defined(IN_RING0)
+/**
+ * Process the critical sections queued for ring-3 'leave'.
+ *
+ * @param   pVCpu         The VMCPU handle.
+ */
+VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
+{
+    Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
+
+    const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
+    for (RTUINT i = 0; i < c; i++)
+    {
+# ifdef IN_RING3
+        PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
+# else
+        PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
+# endif
+
+        PDMCritSectLeave(pCritSect);
+        LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
+    }
+
+    pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
+    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
+}
+#endif /* IN_RING3 || IN_RING0 */
 
 
@@ -282,23 +438,12 @@
     return RTCritSectIsOwner(&pCritSect->s.Core);
 #else
-    PVM     pVM = pCritSect->s.CTX_SUFF(pVM);
-    PVMCPU  pVCpu = VMMGetCpu(pVM);
-    Assert(pVM); Assert(pVCpu);
+    PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
+    PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
     if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
         return false;
-
-    /* Make sure the critical section is not scheduled to be unlocked. */
-    if (    !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PDM_CRITSECT)
-        ||  RTCritSectGetRecursion(&pCritSect->s.Core) > 1)
-        return true;
-
-    for (unsigned i = 0; i < pVCpu->pdm.s.cQueuedCritSectLeaves; i++)
-    {
-        if (pVCpu->pdm.s.apQueuedCritSectsLeaves[i] == MMHyperCCToR3(pVM, (void *)pCritSect))
-            return false;   /* scheduled for release; pretend it's not owned by us. */
-    }
-    return true;
-#endif
-}
+    return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
+#endif
+}
+
 
 /**
@@ -317,22 +462,29 @@
 #else
     PVM pVM = pCritSect->s.CTX_SUFF(pVM);
-    Assert(pVM);
+    AssertPtr(pVM);
     Assert(idCpu < pVM->cCPUs);
-    return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread;
-#endif
-}
+    return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
+        && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
+#endif
+}
+
 
 /**
  * Checks if somebody currently owns the critical section.
- * Note: This doesn't prove that no deadlocks will occur later on; it's just a debugging tool
  *
  * @returns true if locked.
  * @returns false if not locked.
+ *
  * @param   pCritSect   The critical section.
- */
-VMMDECL(bool) PDMCritSectIsLocked(PCPDMCRITSECT pCritSect)
-{
-    return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD;
-}
+ *
+ * @remarks This doesn't prove that no deadlocks will occur later on; it's
+ *          just a debugging tool
+ */
+VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
+{
+    return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
+        && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
+}
+
 
 /**
@@ -345,5 +497,5 @@
 VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
 {
-    return pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC;
+    return RTCritSectIsInitialized(&pCritSect->s.Core);
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 20007)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 20008)
@@ -2033,6 +2033,7 @@
 VMMDECL(bool) PGMIsLocked(PVM pVM)
 {
-    return PDMCritSectIsLocked(&pVM->pgm.s.CritSect);
-}
+    return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
+}
+
 
 /**
