Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 58829)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 58830)
@@ -63,4 +63,5 @@
 #include <iprt/asm.h>
 #include <iprt/asm-amd64-x86.h>
+#include <iprt/critsect.h>
 #include <iprt/mem.h>
 #include <iprt/semaphore.h>
@@ -260,4 +261,7 @@
     /** Alignment padding. */
     uint32_t            u32Alignment;
+    /** Mini lock for restricting early wake-ups to one thread. */
+    bool volatile       fDoingEarlyWakeUps;
+    bool                afPadding[3]; /**< explicit alignment padding. */
     /** When the next halted or sleeping EMT will wake up.
      * This is set to 0 when it needs recalculating and to UINT64_MAX when
@@ -266,9 +270,9 @@
     /** The lock used to serialize VM creation, destruction and associated events that
      * isn't performance critical. Owners may acquire the list lock. */
-    RTSEMFASTMUTEX      CreateDestroyLock;
+    RTCRITSECT          CreateDestroyLock;
     /** The lock used to serialize used list updates and accesses.
      * This indirectly includes scheduling since the scheduler will have to walk the
      * used list to examin running VMs. Owners may not acquire any other locks. */
-    RTSEMFASTMUTEX      UsedLock;
+    RTCRITSECTRW        UsedLock;
     /** The handle array.
      * The size of this array defines the maximum number of currently running VMs.
@@ -290,11 +294,15 @@
     uint32_t            nsMinSleepCompany;
     /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
-     * The limit for the first round of early wakeups, given in nano seconds.
+     * The limit for the first round of early wake-ups, given in nano seconds.
      */
     uint32_t            nsEarlyWakeUp1;
     /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
-     * The limit for the second round of early wakeups, given in nano seconds.
+     * The limit for the second round of early wake-ups, given in nano seconds.
      */
     uint32_t            nsEarlyWakeUp2;
+
+    /** Set if we're doing early wake-ups.
+     * This reflects  nsEarlyWakeUp1 and nsEarlyWakeUp2.  */
+    bool volatile       fDoEarlyWakeUps;
 
     /** The number of entries in the host CPU array (aHostCpus). */
@@ -303,4 +311,7 @@
     GVMMHOSTCPU         aHostCpus[1];
 } GVMM;
+AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8);
+AssertCompileMemberAlignment(GVMM, UsedLock, 8);
+AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8);
 /** Pointer to the GVMM instance data. */
 typedef GVMM *PGVMM;
@@ -377,8 +388,9 @@
     if (!pGVMM)
         return VERR_NO_MEMORY;
-    int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock);
+    int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE,
+                              "GVMM-CreateDestroyLock");
     if (RT_SUCCESS(rc))
     {
-        rc = RTSemFastMutexCreate(&pGVMM->UsedLock);
+        rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock");
         if (RT_SUCCESS(rc))
         {
@@ -427,4 +439,5 @@
                 pGVMM->nsEarlyWakeUp2    = 0;
             }
+            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
 
             /* The host CPU data. */
@@ -491,9 +504,7 @@
 
             /* bail out. */
-            RTSemFastMutexDestroy(pGVMM->UsedLock);
-            pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
-        }
-        RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
-        pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
+            RTCritSectRwDelete(&pGVMM->UsedLock);
+        }
+        RTCritSectDelete(&pGVMM->CreateDestroyLock);
     }
 
@@ -543,8 +554,6 @@
      */
     pGVMM->u32Magic = ~GVMM_MAGIC;
-    RTSemFastMutexDestroy(pGVMM->UsedLock);
-    pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
-    RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
-    pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
+    RTCritSectRwDelete(&pGVMM->UsedLock);
+    RTCritSectDelete(&pGVMM->CreateDestroyLock);
 
     pGVMM->iFreeHead = 0;
@@ -620,5 +629,8 @@
     {
         if (u64Value <= RT_NS_100MS)
+        {
             pGVMM->nsEarlyWakeUp1 = u64Value;
+            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
+        }
         else
             rc = VERR_OUT_OF_RANGE;
@@ -627,5 +639,8 @@
     {
         if (u64Value <= RT_NS_100MS)
+        {
             pGVMM->nsEarlyWakeUp2 = u64Value;
+            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
+        }
         else
             rc = VERR_OUT_OF_RANGE;
@@ -681,14 +696,55 @@
 
 /**
- * Try acquire the 'used' lock.
+ * Acquire the 'used' lock in shared mode.
+ *
+ * This prevents destruction of the VM while we're in ring-0.
+ *
+ * @returns IPRT status code, see RTSemFastMutexRequest.
+ * @param   a_pGVMM     The GVMM instance data.
+ * @sa      GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK
+ */
+#define GVMMR0_USED_SHARED_LOCK(a_pGVMM)        RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock)
+
+/**
+ * Release the 'used' lock in when owning it in shared mode.
+ *
+ * @returns IPRT status code, see RTSemFastMutexRequest.
+ * @param   a_pGVMM     The GVMM instance data.
+ * @sa      GVMMR0_USED_SHARED_LOCK
+ */
+#define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM)      RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock)
+
+/**
+ * Acquire the 'used' lock in exclusive mode.
+ *
+ * Only use this function when making changes to the used list.
+ *
+ * @returns IPRT status code, see RTSemFastMutexRequest.
+ * @param   a_pGVMM     The GVMM instance data.
+ * @sa      GVMMR0_USED_EXCLUSIVE_UNLOCK
+ */
+#define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM)     RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock)
+
+/**
+ * Release the 'used' lock when owning it in exclusive mode.
+ *
+ * @returns IPRT status code, see RTSemFastMutexRelease.
+ * @param   pGVMM   The GVMM instance data.
+ * @sa      GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK
+ */
+#define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM)   RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock)
+
+
+/**
+ * Try acquire the 'create & destroy' lock.
  *
  * @returns IPRT status code, see RTSemFastMutexRequest.
  * @param   pGVMM   The GVMM instance data.
  */
-DECLINLINE(int) gvmmR0UsedLock(PGVMM pGVMM)
-{
-    LogFlow(("++gvmmR0UsedLock(%p)\n", pGVMM));
-    int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
-    LogFlow(("gvmmR0UsedLock(%p)->%Rrc\n", pGVMM, rc));
+DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
+{
+    LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
+    int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock);
+    LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
     return rc;
 }
@@ -696,43 +752,13 @@
 
 /**
- * Release the 'used' lock.
- *
- * @returns IPRT status code, see RTSemFastMutexRelease.
- * @param   pGVMM   The GVMM instance data.
- */
-DECLINLINE(int) gvmmR0UsedUnlock(PGVMM pGVMM)
-{
-    LogFlow(("--gvmmR0UsedUnlock(%p)\n", pGVMM));
-    int rc = RTSemFastMutexRelease(pGVMM->UsedLock);
-    AssertRC(rc);
-    return rc;
-}
-
-
-/**
- * Try acquire the 'create & destroy' lock.
+ * Release the 'create & destroy' lock.
  *
  * @returns IPRT status code, see RTSemFastMutexRequest.
  * @param   pGVMM   The GVMM instance data.
  */
-DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
-{
-    LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
-    int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
-    LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
-    return rc;
-}
-
-
-/**
- * Release the 'create & destroy' lock.
- *
- * @returns IPRT status code, see RTSemFastMutexRequest.
- * @param   pGVMM   The GVMM instance data.
- */
 DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
 {
     LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
-    int rc = RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
+    int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock);
     AssertRC(rc);
     return rc;
@@ -830,5 +856,5 @@
                  * Move the handle from the free to used list and perform permission checks.
                  */
-                rc = gvmmR0UsedLock(pGVMM);
+                rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
                 AssertRC(rc);
 
@@ -844,5 +870,5 @@
                 pHandle->ProcId   = NIL_RTPROCESS;
 
-                gvmmR0UsedUnlock(pGVMM);
+                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
 
                 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
@@ -924,5 +950,5 @@
 
                                         /* complete the handle - take the UsedLock sem just to be careful. */
-                                        rc = gvmmR0UsedLock(pGVMM);
+                                        rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
                                         AssertRC(rc);
 
@@ -941,5 +967,5 @@
                                             VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pVM, ProcId, (void *)hEMT0, cCpus);
 
-                                            gvmmR0UsedUnlock(pGVMM);
+                                            GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
                                             gvmmR0CreateDestroyUnlock(pGVMM);
 
@@ -948,4 +974,6 @@
                                             return VINF_SUCCESS;
                                         }
+
+                                        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
                                     }
 
@@ -1241,5 +1269,5 @@
     int rc = gvmmR0CreateDestroyLock(pGVMM);
     AssertRC(rc);
-    rc = gvmmR0UsedLock(pGVMM);
+    rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
     AssertRC(rc);
 
@@ -1250,5 +1278,5 @@
     {
         SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
-        gvmmR0UsedUnlock(pGVMM);
+        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
         gvmmR0CreateDestroyUnlock(pGVMM);
         return;
@@ -1266,5 +1294,5 @@
             {
                 SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev);
-                gvmmR0UsedUnlock(pGVMM);
+                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
                 gvmmR0CreateDestroyUnlock(pGVMM);
                 return;
@@ -1283,5 +1311,5 @@
         {
             SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
-            gvmmR0UsedUnlock(pGVMM);
+            GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
             gvmmR0CreateDestroyUnlock(pGVMM);
             return;
@@ -1302,5 +1330,5 @@
     {
         pGVMM->cEMTs -= pGVM->cCpus;
-        gvmmR0UsedUnlock(pGVMM);
+        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
 
         gvmmR0CleanupVM(pGVM);
@@ -1348,5 +1376,5 @@
 
         /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
-        rc = gvmmR0UsedLock(pGVMM);
+        rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
         AssertRC(rc);
     }
@@ -1365,5 +1393,5 @@
     ASMAtomicWriteU32(&pHandle->ProcId,          NIL_RTPROCESS);
 
-    gvmmR0UsedUnlock(pGVMM);
+    GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
     gvmmR0CreateDestroyUnlock(pGVMM);
     LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
@@ -1389,5 +1417,5 @@
     PGVM pGVM;
     PGVMM pGVMM;
-    int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */);
+    int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */
     if (RT_FAILURE(rc))
         return rc;
@@ -1445,7 +1473,9 @@
  * @param   ppGVM           Where to store the GVM pointer.
  * @param   ppGVMM          Where to store the pointer to the GVMM instance data.
- * @param   fTakeUsedLock   Whether to take the used lock or not.
- *                          Be very careful if not taking the lock as it's possible that
- *                          the VM will disappear then.
+ * @param   fTakeUsedLock   Whether to take the used lock or not.  We take it in
+ *                          shared mode when requested.
+ *
+ *                          Be very careful if not taking the lock as it's
+ *                          possible that the VM will disappear then!
  *
  * @remark  This will not assert on an invalid pVM but try return silently.
@@ -1479,5 +1509,5 @@
     if (fTakeUsedLock)
     {
-        int rc = gvmmR0UsedLock(pGVMM);
+        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
         AssertRCReturn(rc, rc);
 
@@ -1489,5 +1519,5 @@
                         ||  pGVM->pVM != pVM))
         {
-            gvmmR0UsedUnlock(pGVMM);
+            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
             return VERR_INVALID_HANDLE;
         }
@@ -1524,4 +1554,6 @@
  * @remark  This will not take the 'used'-lock because it doesn't do
  *          nesting and this function will be used from under the lock.
+ *          Update: This is no longer true.  Consider taking the lock in shared
+ *          mode!
  */
 GVMMR0DECL(int) GVMMR0ByVM(PVM pVM, PGVM *ppGVM)
@@ -1681,6 +1713,5 @@
      * the user.
      */
-    if (   !pGVMM->nsEarlyWakeUp1
-        && !pGVMM->nsEarlyWakeUp2)
+    if (!pGVMM->fDoEarlyWakeUps)
         return 0;
 
@@ -1693,4 +1724,10 @@
     if (   pGVMM->cHaltedEMTs == 0
         || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
+        return 0;
+
+    /*
+     * Only one thread doing this at a time.
+     */
+    if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false))
         return 0;
 
@@ -1809,4 +1846,5 @@
     pGVMM->uNsNextEmtWakeup = u64Min;
 
+    ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false);
     return cWoken;
 }
@@ -1844,11 +1882,14 @@
 
     /*
-     * Take the UsedList semaphore, get the current time
-     * and check if anyone needs waking up.
-     * Interrupts must NOT be disabled at this point because we ask for GIP time!
-     */
-    rc = gvmmR0UsedLock(pGVMM);
-    AssertRC(rc);
-    GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+     * If we're doing early wake-ups, we must take the UsedList lock before we
+     * start querying the current time.
+     * Note! Interrupts must NOT be disabled at this point because we ask for GIP time!
+     */
+    bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps;
+    if (fDoEarlyWakeUps)
+    {
+        rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
+        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+    }
 
     pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
@@ -1861,6 +1902,10 @@
     const uint64_t u64NowGip = RTTimeNanoTS();
     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
-    pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
-    GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+
+    if (fDoEarlyWakeUps)
+    {
+        pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
+        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+    }
 
     /*
@@ -1877,9 +1922,12 @@
         if (cNsInterval > RT_NS_1SEC)
             u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
-        if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
-            pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
         ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
         ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
-        gvmmR0UsedUnlock(pGVMM);
+        if (fDoEarlyWakeUps)
+        {
+            if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
+                pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
+            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
+        }
         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
 
@@ -1907,5 +1955,6 @@
     {
         pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
-        gvmmR0UsedUnlock(pGVMM);
+        if (fDoEarlyWakeUps)
+            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
         RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti);
@@ -1993,5 +2042,5 @@
             GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
 
-            if (fTakeUsedLock)
+            if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
             {
                 /*
@@ -2009,5 +2058,5 @@
         if (fTakeUsedLock)
         {
-            int rc2 = gvmmR0UsedUnlock(pGVMM);
+            int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
             AssertRC(rc2);
             GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
@@ -2092,5 +2141,5 @@
         if (fTakeUsedLock)
         {
-            int rc2 = gvmmR0UsedUnlock(pGVMM);
+            int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
             AssertRC(rc2);
         }
@@ -2165,5 +2214,5 @@
         }
 
-        int rc2 = gvmmR0UsedUnlock(pGVMM);
+        int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
         AssertRC(rc2);
         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
@@ -2219,20 +2268,27 @@
     if (RT_SUCCESS(rc))
     {
-        rc = gvmmR0UsedLock(pGVMM);
-        AssertRC(rc);
-        pGVM->gvmm.s.StatsSched.cPollCalls++;
-
-        Assert(ASMGetFlags() & X86_EFL_IF);
-        const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
-
-        if (!fYield)
+        /*
+         * We currently only implement helping doing wakeups (fYield = false), so don't
+         * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything.
+         */
+        if (!fYield && pGVMM->fDoEarlyWakeUps)
+        {
+            rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
+            pGVM->gvmm.s.StatsSched.cPollCalls++;
+
+            Assert(ASMGetFlags() & X86_EFL_IF);
+            const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
+
             pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
+
+            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
+        }
+        /*
+         * Not quite sure what we could do here...
+         */
+        else if (fYield)
+            rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */
         else
-        {
-            /** @todo implement this... */
-            rc = VERR_NOT_IMPLEMENTED;
-        }
-
-        gvmmR0UsedUnlock(pGVMM);
+            rc = VINF_SUCCESS;
     }
 
@@ -2446,5 +2502,5 @@
         memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
 
-        int rc = gvmmR0UsedLock(pGVMM);
+        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
         AssertRCReturn(rc, rc);
     }
@@ -2519,5 +2575,5 @@
     pStats->cHostCpus = iDstCpu;
 
-    gvmmR0UsedUnlock(pGVMM);
+    GVMMR0_USED_SHARED_UNLOCK(pGVMM);
 
     return VINF_SUCCESS;
@@ -2594,5 +2650,5 @@
         GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
 
-        int rc = gvmmR0UsedLock(pGVMM);
+        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
         AssertRCReturn(rc, rc);
     }
@@ -2634,5 +2690,5 @@
     }
 
-    gvmmR0UsedUnlock(pGVMM);
+    GVMMR0_USED_SHARED_UNLOCK(pGVMM);
 
     return VINF_SUCCESS;
