Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 38834)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 38835)
@@ -277,6 +277,6 @@
 VMMR3DECL(int)          VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
 VMMR3DECL(int)          VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
-VMMR3DECL(int)          VMMR3AtomicExecuteHandler(PVM pVM, PFNATOMICHANDLER pfnHandler, void *pvUser);
 VMMR3DECL(int)          VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
+VMMR3_INT_DECL(bool)    VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled);
 /** @defgroup grp_VMMR3EmtRendezvous_fFlags     VMMR3EmtRendezvous flags
  *  @{ */
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 38834)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 38835)
@@ -187,8 +187,6 @@
 
     /*
-     * Initialize the VMM sync critical section and semaphores.
-     */
-    rc = RTCritSectInit(&pVM->vmm.s.CritSectSync);
-    AssertRCReturn(rc, rc);
+     * Initialize the VMM rendezvous semaphores.
+     */
     pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
     if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
@@ -740,5 +738,4 @@
     }
 
-    RTCritSectDelete(&pVM->vmm.s.CritSectSync);
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
@@ -1365,5 +1362,5 @@
     AssertReturnVoid(idCpu < pVM->cCpus);
 
-    int rc = VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmmR3SendSipi, 3, pVM, idCpu, uVector);
+    int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendSipi, 3, pVM, idCpu, uVector);
     AssertRC(rc);
 }
@@ -1379,5 +1376,5 @@
     AssertReturnVoid(idCpu < pVM->cCpus);
 
-    int rc = VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
+    int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
     AssertRC(rc);
 }
@@ -1393,4 +1390,5 @@
 VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
 {
+    VM_ASSERT_EMT(pVM);
     if (HWACCMIsEnabled(pVM))
         return HWACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
@@ -1413,60 +1411,4 @@
 
     return VINF_SUCCESS;
-}
-
-
-/**
- * VCPU worker for VMMR3SynchronizeAllVCpus.
- *
- * @param   pVM         The VM to operate on.
- * @param   idCpu       Virtual CPU to perform SIPI on
- * @param   uVector     SIPI vector
- */
-DECLCALLBACK(int) vmmR3SyncVCpu(PVM pVM)
-{
-    /* Block until the job in the caller has finished. */
-    RTCritSectEnter(&pVM->vmm.s.CritSectSync);
-    RTCritSectLeave(&pVM->vmm.s.CritSectSync);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Atomically execute a callback handler
- * Note: This is very expensive; avoid using it frequently!
- *
- * @param   pVM         The VM to operate on.
- * @param   pfnHandler  Callback handler
- * @param   pvUser      User specified parameter
- *
- * @thread  EMT
- * @todo    Remove this if not used again soon.
- */
-VMMR3DECL(int) VMMR3AtomicExecuteHandler(PVM pVM, PFNATOMICHANDLER pfnHandler, void *pvUser)
-{
-    int    rc;
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
-
-    /* Shortcut for the uniprocessor case. */
-    if (pVM->cCpus == 1)
-        return pfnHandler(pVM, pvUser);
-
-    RTCritSectEnter(&pVM->vmm.s.CritSectSync);
-    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
-    {
-        if (idCpu != pVCpu->idCpu)
-        {
-            rc = VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmmR3SyncVCpu, 1, pVM);
-            AssertRC(rc);
-        }
-    }
-    /* Wait until all other VCPUs are waiting for us. */
-    while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (int32_t)(pVM->cCpus - 1))
-        RTThreadSleep(1);
-
-    rc = pfnHandler(pVM, pvUser);
-    RTCritSectLeave(&pVM->vmm.s.CritSectSync);
-    return rc;
 }
 
@@ -1517,4 +1459,5 @@
 {
     int rc;
+    pVCpu->vmm.s.fInRendezvous = true;
 
     /*
@@ -1665,4 +1608,5 @@
     }
 
+    pVCpu->vmm.s.fInRendezvous = false;
     if (!fIsCaller)
         return vmmR3EmtRendezvousNonCallerReturn(pVM);
@@ -1728,8 +1672,13 @@
                                    (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
     else if (pVM->cCpus == 1)
+    {
         /*
          * Shortcut for the single EMT case.
          */
+        AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
+        pVCpu->vmm.s.fInRendezvous = true;
         rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
+        pVCpu->vmm.s.fInRendezvous = false;
+    }
     else
     {
@@ -1742,4 +1691,6 @@
         if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
         {
+            AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
+
             while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
             {
@@ -1820,4 +1771,23 @@
                           VERR_IPE_UNEXPECTED_INFO_STATUS);
     return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * Disables/enables EMT rendezvous. 
+ *  
+ * This is used to make sure EMT rendezvous does not take place while 
+ * processing a priority request.
+ *  
+ * @returns Old rendezvous-disabled state.
+ * @param   pVCpu           The handle of the calling EMT.
+ * @param   fDisabled       True if disabled, false if enabled.
+ */
+VMMR3_INT_DECL(bool) VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled)
+{
+    VMCPU_ASSERT_EMT(pVCpu);
+    bool fOld = pVCpu->vmm.s.fInRendezvous;
+    pVCpu->vmm.s.fInRendezvous = fDisabled;
+    return fOld;
 }
 
Index: /trunk/src/VBox/VMM/include/VMMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/VMMInternal.h	(revision 38834)
+++ /trunk/src/VBox/VMM/include/VMMInternal.h	(revision 38835)
@@ -279,9 +279,4 @@
     /** The timestamp of the previous yield. (nano) */
     uint64_t                    u64LastYield;
-
-    /** Critical section.
-     * Use for synchronizing all VCPUs
-     */
-    RTCRITSECT                  CritSectSync;
 
     /** @name EMT Rendezvous
@@ -425,4 +420,12 @@
 #endif
 
+    /** @name Rendezvous
+     * @{ */
+    /** Whether the EMT is executing a rendezvous right now. For detecting
+     *  attempts at recursive rendezvous. */
+    bool volatile               fInRendezvous;
+    bool                        afPadding[7];
+    /** @} */
+
     /** @name Call Ring-3
      * Formerly known as host calls.
