Index: /trunk/include/VBox/vmm/pdmcritsectrw.h
===================================================================
--- /trunk/include/VBox/vmm/pdmcritsectrw.h	(revision 45292)
+++ /trunk/include/VBox/vmm/pdmcritsectrw.h	(revision 45293)
@@ -54,4 +54,6 @@
 VMMR3DECL(int)      PDMR3CritSectRwDelete(PPDMCRITSECTRW pCritSect);
 VMMR3DECL(const char *) PDMR3CritSectRwName(PCPDMCRITSECTRW pCritSect);
+VMMR3DECL(int)      PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3);
+VMMR3DECL(int)      PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3);
 
 VMMDECL(int)        PDMCritSectRwEnterShared(PPDMCRITSECTRW pCritSect, int rcBusy);
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 45292)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 45293)
@@ -82,4 +82,8 @@
     /** Acquire the critical section specified as argument.  */
     VMMCALLRING3_PDM_CRIT_SECT_ENTER,
+    /** Enter the R/W critical section (in argument) exclusively.  */
+    VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL,
+    /** Enter the R/W critical section (in argument) shared.  */
+    VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED,
     /** Acquire the PGM lock. */
     VMMCALLRING3_PGM_LOCK,
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 45292)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 45293)
@@ -421,4 +421,5 @@
  	VMMAll/PDMAll.cpp \
  	VMMAll/PDMAllCritSect.cpp \
+ 	VMMAll/PDMAllCritSectRw.cpp \
  	VMMAll/PDMAllCritSectBoth.cpp \
  	VMMAll/PDMAllQueue.cpp \
@@ -529,4 +530,5 @@
  	VMMAll/PDMAll.cpp \
  	VMMAll/PDMAllCritSect.cpp \
+ 	VMMAll/PDMAllCritSectRw.cpp \
  	VMMAll/PDMAllCritSectBoth.cpp \
  	VMMAll/PDMAllQueue.cpp \
Index: /trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp	(revision 45292)
+++ /trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp	(revision 45293)
@@ -207,5 +207,8 @@
                 return VERR_SEM_BUSY;
 
-            /* Add ourselves to the queue and wait for the direction to change. */
+#if defined(IN_RING3)
+            /*
+             * Add ourselves to the queue and wait for the direction to change.
+             */
             uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
             c++;
@@ -225,12 +228,12 @@
                 {
                     int rc;
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
                     rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
                                                                RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
                     if (RT_SUCCESS(rc))
-#else
+# else
                     RTTHREAD hThreadSelf = RTThreadSelf();
                     RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
-#endif
+# endif
                     {
                         do
@@ -295,9 +298,25 @@
                 }
 
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
                 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
-#endif
+# endif
                 break;
             }
+
+#else
+            /*
+             * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
+             * back to ring-3 and do it there or return rcBusy.
+             */
+            if (rcBusy == VINF_SUCCESS)
+            {
+                PVM     pVM   = pThis->s.CTX_SUFF(pVM);     AssertPtr(pVM);
+                PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
+                /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
+                 *        back to ring-3. Goes for both kind of crit sects. */
+                return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
+            }
+            return rcBusy;
+#endif
         }
 
@@ -342,5 +361,5 @@
 VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
 {
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     return pdmCritSectRwEnterShared(pThis, rcBusy, NULL, false /*fTryOnly*/);
 #else
@@ -375,6 +394,10 @@
 VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
 {
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
+    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryOnly*/);
+#else
     RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
     return pdmCritSectRwEnterShared(pThis, rcBusy, &SrcPos, false /*fTryOnly*/);
+#endif
 }
 
@@ -401,5 +424,5 @@
 VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
 {
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
 #else
@@ -431,7 +454,40 @@
 VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
 {
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
+    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
+#else
     RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
     return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryOnly*/);
-}
+#endif
+}
+
+
+#ifdef IN_RING3
+/**
+ * Enters a PDM read/write critical section with shared (read) access.
+ *
+ * @returns VINF_SUCCESS if entered successfully.
+ * @retval  VERR_SEM_DESTROYED if the critical section is delete before or
+ *          during the operation.
+ *
+ * @param   pThis       Pointer to the read/write critical section.
+ * @param   fCallRing3  Whether this is a VMMRZCallRing3()request.
+ */
+VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
+{
+    int rc = pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
+    if (    rc == VINF_SUCCESS
+        &&  fCallRing3
+        &&  pThis->s.Core.pValidatorRead)
+    {
+        Assert(pThis->s.Core.pValidatorWrite);
+        if (pThis->s.Core.hNativeWriter == NIL_RTNATIVETHREAD)
+            RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
+        else
+            RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
+    }
+    return rc;
+}
+#endif /* IN_RING3 */
 
 
@@ -484,5 +540,6 @@
             else
             {
-                /* Reverse the direction and signal the reader threads. */
+#if defined(IN_RING3)
+                /* Reverse the direction and signal the writer threads. */
                 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
                 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
@@ -493,4 +550,17 @@
                     break;
                 }
+#else
+                /* Queue the exit request (ring-3). */
+                PVM         pVM   = pThis->s.CTX_SUFF(pVM);         AssertPtr(pVM);
+                PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
+                uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
+                LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
+                AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
+                pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
+                VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
+                VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+                STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
+                STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
+#endif
             }
 
@@ -614,10 +684,14 @@
      */
     bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
+#if defined(IN_RING3)
               && (  ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
-                  || fTryOnly);
+                  || fTryOnly)
+#endif
+               ;
     if (fDone)
         ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
     if (!fDone)
     {
+#if defined(IN_RING3)
         /*
          * Wait for our turn.
@@ -626,5 +700,5 @@
         {
             int rc;
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
             if (!fTryOnly)
             {
@@ -637,8 +711,8 @@
                 rc = VINF_SUCCESS;
             if (RT_SUCCESS(rc))
-#else
+# else
             RTTHREAD hThreadSelf = RTThreadSelf();
             RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
-#endif
+# endif
             {
                 do
@@ -676,4 +750,30 @@
             AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
         }
+
+#else
+        /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
+           ring-3 and do it there or return rcBusy. */
+        for (;;)
+        {
+            u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
+            uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
+            c--;
+            u64State &= ~RTCSRW_CNT_WR_MASK;
+            u64State |= c << RTCSRW_CNT_WR_SHIFT;
+            if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
+                break;
+        }
+
+        if (rcBusy == VINF_SUCCESS)
+        {
+            PVM     pVM   = pThis->s.CTX_SUFF(pVM);     AssertPtr(pVM);
+            PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
+            /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
+             *        back to ring-3. Goes for both kind of crit sects. */
+            return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
+        }
+        return rcBusy;
+
+#endif
     }
 
@@ -715,5 +815,5 @@
 VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
 {
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
 #else
@@ -749,6 +849,10 @@
 VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
 {
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
+    return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
+#else
     RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
     return pdmCritSectRwEnterExcl(pThis, rcBusy, &SrcPos, false /*fTryAgain*/);
+#endif
 }
 
@@ -771,5 +875,5 @@
 VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
 {
-#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
 #else
@@ -801,7 +905,35 @@
 VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
 {
+#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
+    return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
+#else
     RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
     return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryAgain*/);
-}
+#endif
+}
+
+
+#ifdef IN_RING3
+/**
+ * Enters a PDM read/write critical section with exclusive (write) access.
+ *
+ * @returns VINF_SUCCESS if entered successfully.
+ * @retval  VERR_SEM_DESTROYED if the critical section is delete before or
+ *          during the operation.
+ *
+ * @param   pThis       Pointer to the read/write critical section.
+ * @param   fCallRing3  Whether this is a VMMRZCallRing3()request.
+ */
+VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
+{
+    int rc = pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
+    if (    rc == VINF_SUCCESS
+        &&  fCallRing3
+        &&  pThis->s.Core.pValidatorWrite
+        &&  pThis->s.Core.pValidatorWrite->hThread != NIL_RTTHREAD)
+        RTLockValidatorRecExclReleaseOwnerUnchecked(pThis->s.Core.pValidatorWrite);
+    return rc;
+}
+#endif /* IN_RING3 */
 
 
@@ -842,7 +974,8 @@
          * Update the state.
          */
+#if defined(IN_RING3)
         ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
+        STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
         ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
-        STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
 
         for (;;)
@@ -890,4 +1023,20 @@
                 return VERR_SEM_DESTROYED;
         }
+#else
+        /*
+         * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
+         * so queue the exit request (ring-3).
+         */
+        PVM         pVM   = pThis->s.CTX_SUFF(pVM);         AssertPtr(pVM);
+        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
+        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
+        LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
+        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
+        pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
+        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
+        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
+        STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
+#endif
     }
     else
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 45292)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 45293)
@@ -81,4 +81,5 @@
 #include <VBox/vmm/pdmqueue.h>
 #include <VBox/vmm/pdmcritsect.h>
+#include <VBox/vmm/pdmcritsectrw.h>
 #include <VBox/vmm/pdmapi.h>
 #include <VBox/vmm/cpum.h>
@@ -2116,4 +2117,24 @@
 
         /*
+         * Enter a r/w critical section exclusively.
+         */
+        case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
+        {
+            pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
+                                                                    true /*fCallRing3*/);
+            break;
+        }
+
+        /*
+         * Enter a r/w critical section shared.
+         */
+        case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
+        {
+            pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
+                                                                    true /*fCallRing3*/);
+            break;
+        }
+
+        /*
          * Acquire the PDM lock.
          */
