Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 92391)
+++ /trunk/include/VBox/err.h	(revision 92392)
@@ -1258,6 +1258,4 @@
  * @{
  */
-/** Reason for leaving RZ: Calling host function. */
-#define VINF_VMM_CALL_HOST                  2700
 /** Reason for leaving R0: Hit a ring-0 assertion on EMT. */
 #define VERR_VMM_RING0_ASSERTION            (-2701)
Index: /trunk/include/VBox/err.mac
===================================================================
--- /trunk/include/VBox/err.mac	(revision 92391)
+++ /trunk/include/VBox/err.mac	(revision 92392)
@@ -488,5 +488,4 @@
 %define VERR_IOM_MMIO_REGION_ALREADY_MAPPED    (-2662)
 %define VERR_IOM_MMIO_REGION_NOT_MAPPED    (-2663)
-%define VINF_VMM_CALL_HOST    2700
 %define VERR_VMM_RING0_ASSERTION    (-2701)
 %define VERR_VMM_HYPER_CR3_MISMATCH    (-2702)
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 92391)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 92392)
@@ -49,27 +49,13 @@
 
 /**
- * VMMRZCallRing3 operations.
- */
-typedef enum VMMCALLRING3
-{
-    /** Invalid operation.  */
-    VMMCALLRING3_INVALID = 0,
-    /** Signal a ring 0 assertion. */
-    VMMCALLRING3_VM_R0_ASSERTION,
-    /** The usual 32-bit hack. */
-    VMMCALLRING3_32BIT_HACK = 0x7fffffff
-} VMMCALLRING3;
-
-/**
- * VMMRZCallRing3 notification callback.
+ * Ring-0 assertion notification callback.
  *
  * @returns VBox status code.
  * @param   pVCpu           The cross context virtual CPU structure.
- * @param   enmOperation    The operation causing the ring-3 jump.
  * @param   pvUser          The user argument.
  */
-typedef DECLCALLBACKTYPE(int, FNVMMR0CALLRING3NOTIFICATION,(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, void *pvUser));
-/** Pointer to a FNRTMPNOTIFICATION(). */
-typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION;
+typedef DECLCALLBACKTYPE(int, FNVMMR0ASSERTIONNOTIFICATION,(PVMCPUCC pVCpu, void *pvUser));
+/** Pointer to a FNVMMR0ASSERTIONNOTIFICATION(). */
+typedef FNVMMR0ASSERTIONNOTIFICATION *PFNVMMR0ASSERTIONNOTIFICATION;
 
 /**
@@ -513,4 +499,7 @@
 VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent);
 VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent);
+VMMR0_INT_DECL(int)  VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser);
+VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu);
+VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu);
 
 /** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
@@ -592,12 +581,7 @@
  * @{
  */
-VMMRZDECL(int)      VMMRZCallRing3(PVMCC pVMCC, PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
-VMMRZDECL(int)      VMMRZCallRing3NoCpu(PVMCC pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
 VMMRZDECL(void)     VMMRZCallRing3Disable(PVMCPUCC pVCpu);
 VMMRZDECL(void)     VMMRZCallRing3Enable(PVMCPUCC pVCpu);
 VMMRZDECL(bool)     VMMRZCallRing3IsEnabled(PVMCPUCC pVCpu);
-VMMRZDECL(int)      VMMRZCallRing3SetNotification(PVMCPUCC pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser);
-VMMRZDECL(void)     VMMRZCallRing3RemoveNotification(PVMCPUCC pVCpu);
-VMMRZDECL(bool)     VMMRZCallRing3IsNotificationSet(PVMCPUCC pVCpu);
 /** @} */
 #endif
Index: /trunk/src/VBox/VMM/VMMR0/HMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 92392)
@@ -78,5 +78,5 @@
     DECLR0CALLBACKMEMBER(int,          pfnEnterSession, (PVMCPUCC pVCpu));
     DECLR0CALLBACKMEMBER(void,         pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit));
-    DECLR0CALLBACKMEMBER(int,          pfnCallRing3Callback, (PVMCPUCC pVCpu, VMMCALLRING3 enmOperation));
+    DECLR0CALLBACKMEMBER(int,          pfnAssertionCallback, (PVMCPUCC pVCpu));
     DECLR0CALLBACKMEMBER(int,          pfnExportHostState, (PVMCPUCC pVCpu));
     DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPUCC pVCpu));
@@ -162,5 +162,5 @@
     /* .pfnEnterSession = */        VMXR0Enter,
     /* .pfnThreadCtxCallback = */   VMXR0ThreadCtxCallback,
-    /* .pfnCallRing3Callback = */   VMXR0CallRing3Callback,
+    /* .pfnAssertionCallback = */   VMXR0AssertionCallback,
     /* .pfnExportHostState = */     VMXR0ExportHostState,
     /* .pfnRunGuestCode = */        VMXR0RunGuestCode,
@@ -177,5 +177,5 @@
     /* .pfnEnterSession = */        SVMR0Enter,
     /* .pfnThreadCtxCallback = */   SVMR0ThreadCtxCallback,
-    /* .pfnCallRing3Callback = */   SVMR0CallRing3Callback,
+    /* .pfnAssertionCallback = */   SVMR0AssertionCallback,
     /* .pfnExportHostState = */     SVMR0ExportHostState,
     /* .pfnRunGuestCode = */        SVMR0RunGuestCode,
@@ -233,7 +233,7 @@
 }
 
-static DECLCALLBACK(int) hmR0DummyCallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
-{
-    RT_NOREF(pVCpu, enmOperation);
+static DECLCALLBACK(int) hmR0DummyAssertionCallback(PVMCPUCC pVCpu)
+{
+    RT_NOREF(pVCpu);
     return VINF_SUCCESS;
 }
@@ -256,5 +256,5 @@
     /* .pfnEnterSession = */        hmR0DummyEnter,
     /* .pfnThreadCtxCallback = */   hmR0DummyThreadCtxCallback,
-    /* .pfnCallRing3Callback = */   hmR0DummyCallRing3Callback,
+    /* .pfnAssertionCallback = */   hmR0DummyAssertionCallback,
     /* .pfnExportHostState = */     hmR0DummyExportHostState,
     /* .pfnRunGuestCode = */        hmR0DummyRunGuestCode,
@@ -1373,17 +1373,16 @@
 
 /**
- * Notification callback before performing a longjump to ring-3.
+ * Notification callback before an assertion longjump and guru mediation.
  *
  * @returns VBox status code.
  * @param   pVCpu           The cross context virtual CPU structure.
- * @param   enmOperation    The operation causing the ring-3 longjump.
  * @param   pvUser          User argument, currently unused, NULL.
  */
-static DECLCALLBACK(int) hmR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
+static DECLCALLBACK(int) hmR0AssertionCallback(PVMCPUCC pVCpu, void *pvUser)
 {
     RT_NOREF(pvUser);
     Assert(pVCpu);
-    Assert(g_HmR0Ops.pfnCallRing3Callback);
-    return g_HmR0Ops.pfnCallRing3Callback(pVCpu, enmOperation);
+    Assert(g_HmR0Ops.pfnAssertionCallback);
+    return g_HmR0Ops.pfnAssertionCallback(pVCpu);
 }
 
@@ -1412,5 +1411,5 @@
 
     /* Register a callback to fire prior to performing a longjmp to ring-3 so HM can disable VT-x/AMD-V if needed. */
-    VMMRZCallRing3SetNotification(pVCpu, hmR0CallRing3Callback, NULL /* pvUser */);
+    VMMR0AssertionSetNotification(pVCpu, hmR0AssertionCallback, NULL /*pvUser*/);
 
     /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */
@@ -1497,5 +1496,5 @@
 
     /* De-register the longjmp-to-ring 3 callback now that we have reliquished hardware resources. */
-    VMMRZCallRing3RemoveNotification(pVCpu);
+    VMMR0AssertionRemoveNotification(pVCpu);
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 92392)
@@ -3043,69 +3043,37 @@
 
 /**
- * Does the necessary state syncing before doing a longjmp to ring-3.
- *
- * @returns VBox status code.
- * @param   pVCpu       The cross context virtual CPU structure.
- *
- * @remarks No-long-jmp zone!!!
- */
-static int hmR0SvmLongJmpToRing3(PVMCPUCC pVCpu)
-{
-    return hmR0SvmLeaveSession(pVCpu);
-}
-
-
-/**
  * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
- * any remaining host state) before we longjump to ring-3 and possibly get
- * preempted.
+ * any remaining host state) before we go back to ring-3 due to an assertion.
  *
  * @param   pVCpu           The cross context virtual CPU structure.
- * @param   enmOperation    The operation causing the ring-3 longjump.
- */
-VMMR0DECL(int) SVMR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
-{
-    if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
-    {
-        /*
-         * !!! IMPORTANT !!!
-         * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
-         * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
-         */
-        VMMRZCallRing3RemoveNotification(pVCpu);
-        VMMRZCallRing3Disable(pVCpu);
-        HM_DISABLE_PREEMPT(pVCpu);
-
-        /* Import the entire guest state. */
-        hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
-
-        /* Restore host FPU state if necessary and resync on next R0 reentry. */
-        CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
-
-        /* Restore host debug registers if necessary and resync on next R0 reentry. */
-        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
-
-        /* Deregister the hook now that we've left HM context before re-enabling preemption. */
-        /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
-        VMMR0ThreadCtxHookDisable(pVCpu);
-
-        /* Leave HM context. This takes care of local init (term). */
-        HMR0LeaveCpu(pVCpu);
-
-        HM_RESTORE_PREEMPT();
-        return VINF_SUCCESS;
-    }
-
-    Assert(pVCpu);
-    Assert(VMMRZCallRing3IsEnabled(pVCpu));
-    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
-
+ */
+VMMR0DECL(int) SVMR0AssertionCallback(PVMCPUCC pVCpu)
+{
+    /*
+     * !!! IMPORTANT !!!
+     * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
+     * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
+     */
+    VMMR0AssertionRemoveNotification(pVCpu);
     VMMRZCallRing3Disable(pVCpu);
-
-    Log4Func(("Calling hmR0SvmLongJmpToRing3\n"));
-    int rc = hmR0SvmLongJmpToRing3(pVCpu);
-    AssertRCReturn(rc, rc);
-
-    VMMRZCallRing3Enable(pVCpu);
+    HM_DISABLE_PREEMPT(pVCpu);
+
+    /* Import the entire guest state. */
+    hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
+
+    /* Restore host FPU state if necessary and resync on next R0 reentry. */
+    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
+
+    /* Restore host debug registers if necessary and resync on next R0 reentry. */
+    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
+
+    /* Deregister the hook now that we've left HM context before re-enabling preemption. */
+    /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
+    VMMR0ThreadCtxHookDisable(pVCpu);
+
+    /* Leave HM context. This takes care of local init (term). */
+    HMR0LeaveCpu(pVCpu);
+
+    HM_RESTORE_PREEMPT();
     return VINF_SUCCESS;
 }
@@ -4814,5 +4782,5 @@
     rc = hmR0SvmExitToRing3(pVCpu, rc);
     Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
-    Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
+    Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
     return rc;
 }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 92392)
@@ -41,5 +41,5 @@
 VMMR0DECL(int)          SVMR0Enter(PVMCPUCC pVCpu);
 VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit);
-VMMR0DECL(int)          SVMR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation);
+VMMR0DECL(int)          SVMR0AssertionCallback(PVMCPUCC pVCpu);
 VMMR0DECL(int)          SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
                                        bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 92392)
@@ -8691,66 +8691,48 @@
 /**
  * VMMRZCallRing3() callback wrapper which saves the guest state before we
- * longjump to ring-3 and possibly get preempted.
+ * longjump due to a ring-0 assertion.
  *
  * @returns VBox status code.
  * @param   pVCpu           The cross context virtual CPU structure.
- * @param   enmOperation    The operation causing the ring-3 longjump.
- */
-VMMR0DECL(int) VMXR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
-{
-    if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
-    {
-        /*
-         * !!! IMPORTANT !!!
-         * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
-         * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
-         */
-        VMMRZCallRing3RemoveNotification(pVCpu);
-        VMMRZCallRing3Disable(pVCpu);
-        HM_DISABLE_PREEMPT(pVCpu);
-
-        PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
-        hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
-        CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
-        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
-
-        /* Restore host-state bits that VT-x only restores partially. */
-        if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
-            VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
-        pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
-
-        /* Restore the lazy host MSRs as we're leaving VT-x context. */
-        if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
-            hmR0VmxLazyRestoreHostMsrs(pVCpu);
-
-        /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
-        pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
-        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
-
-        /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
-           cleared as part of importing the guest state above. */
-        hmR0VmxClearVmcs(pVmcsInfo);
-
-        /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
-        VMMR0ThreadCtxHookDisable(pVCpu);
-
-        /* Leave HM context. This takes care of local init (term). */
-        HMR0LeaveCpu(pVCpu);
-        HM_RESTORE_PREEMPT();
-        return VINF_SUCCESS;
-    }
-
-    Assert(pVCpu);
-    Assert(VMMRZCallRing3IsEnabled(pVCpu));
-    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
-
+ */
+VMMR0DECL(int) VMXR0AssertionCallback(PVMCPUCC pVCpu)
+{
+    /*
+     * !!! IMPORTANT !!!
+     * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
+     * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
+     */
+    VMMR0AssertionRemoveNotification(pVCpu);
     VMMRZCallRing3Disable(pVCpu);
-
-    Log4Func(("-> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
-
-    int rc = hmR0VmxLongJmpToRing3(pVCpu);
-    AssertRCReturn(rc, rc);
-
-    VMMRZCallRing3Enable(pVCpu);
+    HM_DISABLE_PREEMPT(pVCpu);
+
+    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
+    hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
+    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
+    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
+
+    /* Restore host-state bits that VT-x only restores partially. */
+    if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
+        VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
+    pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
+
+    /* Restore the lazy host MSRs as we're leaving VT-x context. */
+    if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
+        hmR0VmxLazyRestoreHostMsrs(pVCpu);
+
+    /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
+    pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
+    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
+
+    /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
+       cleared as part of importing the guest state above. */
+    hmR0VmxClearVmcs(pVmcsInfo);
+
+    /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
+    VMMR0ThreadCtxHookDisable(pVCpu);
+
+    /* Leave HM context. This takes care of local init (term). */
+    HMR0LeaveCpu(pVCpu);
+    HM_RESTORE_PREEMPT();
     return VINF_SUCCESS;
 }
@@ -12970,5 +12952,5 @@
     }
     Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
-    Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
+    Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
     return rcStrict;
 }
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h	(revision 92392)
@@ -33,5 +33,5 @@
 VMMR0DECL(int)          VMXR0Enter(PVMCPUCC pVCpu);
 VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit);
-VMMR0DECL(int)          VMXR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation);
+VMMR0DECL(int)          VMXR0AssertionCallback(PVMCPUCC pVCpu);
 VMMR0DECL(int)          VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
                                        bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 92392)
@@ -1291,13 +1291,4 @@
             STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
             break;
-        case VINF_VMM_CALL_HOST:
-            switch (pVCpu->vmm.s.enmCallRing3Operation)
-            {
-                case VMMCALLRING3_VM_R0_ASSERTION:
-                default:
-                    STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
-                    break;
-            }
-            break;
         case VINF_PATM_DUPLICATE_FUNCTION:
             STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
@@ -1458,5 +1449,5 @@
                             if (RT_UNLIKELY(   VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
                                             && RT_SUCCESS_NP(rc)
-                                            && rc != VINF_VMM_CALL_HOST ))
+                                            && rc != VERR_VMM_RING0_ASSERTION ))
                             {
                                 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
@@ -3540,4 +3531,52 @@
 *********************************************************************************************************************************/
 
+/**
+ * Installs a notification callback for ring-0 assertions.
+ *
+ * @param   pVCpu         The cross context virtual CPU structure.
+ * @param   pfnCallback   Pointer to the callback.
+ * @param   pvUser        The user argument.
+ *
+ * @return VBox status code.
+ */
+VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
+{
+    AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
+    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+
+    if (!pVCpu->vmm.s.pfnRing0AssertCallback)
+    {
+        pVCpu->vmm.s.pfnRing0AssertCallback    = pfnCallback;
+        pVCpu->vmm.s.pvRing0AssertCallbackUser = pvUser;
+        return VINF_SUCCESS;
+    }
+    return VERR_ALREADY_EXISTS;
+}
+
+
+/**
+ * Removes the ring-0 callback.
+ *
+ * @param   pVCpu   The cross context virtual CPU structure.
+ */
+VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
+{
+    pVCpu->vmm.s.pfnRing0AssertCallback    = NULL;
+    pVCpu->vmm.s.pvRing0AssertCallbackUser = NULL;
+}
+
+
+/**
+ * Checks whether there is a ring-0 callback notification active.
+ *
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @returns true if there the notification is active, false otherwise.
+ */
+VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
+{
+    return pVCpu->vmm.s.pfnRing0AssertCallback != NULL;
+}
+
+
 /*
  * Jump back to ring-3 if we're the EMT and the longjmp is armed.
@@ -3565,5 +3604,7 @@
 # endif
             {
-                int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
+                if (pVCpu->vmm.s.pfnRing0AssertCallback)
+                    pVCpu->vmm.s.pfnRing0AssertCallback(pVCpu, pVCpu->vmm.s.pvRing0AssertCallbackUser);
+                int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VERR_VMM_RING0_ASSERTION);
                 return RT_FAILURE_NP(rc);
             }
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 92392)
@@ -174,5 +174,5 @@
 static VBOXSTRICTRC         vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
                                                      uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
-static int                  vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
+static int                  vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
 static FNRTTHREAD           vmmR3LogFlusher;
 static void                 vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
@@ -429,5 +429,4 @@
     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest,      STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest",      STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR,            STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR",            STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
-    STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3,           STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc",             STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
 
     STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes",  STAMUNIT_OCCURENCES, "Total number of buffer flushes");
@@ -525,27 +524,20 @@
      * Call Ring-0 entry with init code.
      */
-    for (;;)
-    {
 #ifdef NO_SUPCALLR0VMM
-        //rc = VERR_GENERAL_FAILURE;
-        rc = VINF_SUCCESS;
+    //rc = VERR_GENERAL_FAILURE;
+    rc = VINF_SUCCESS;
 #else
-        rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
-#endif
-        /*
-         * Flush the logs.
-         */
+    rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
+#endif
+
+    /*
+     * Flush the logs & deal with assertions.
+     */
 #ifdef LOG_ENABLED
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
-#endif
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
-        if (rc != VINF_VMM_CALL_HOST)
-            break;
-        rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
-        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
-            break;
-        /* Resume R0 */
-    }
-
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+    if (rc == VERR_VMM_RING0_ASSERTION)
+        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
     if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
     {
@@ -555,4 +547,7 @@
     }
 
+    /*
+     * Log stuff we learned in ring-0.
+     */
     /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
     if (pVM->vmm.s.fIsUsingContextHooks)
@@ -657,27 +652,20 @@
      * Call Ring-0 entry with termination code.
      */
-    int rc;
-    for (;;)
-    {
 #ifdef NO_SUPCALLR0VMM
-        //rc = VERR_GENERAL_FAILURE;
-        rc = VINF_SUCCESS;
+    //rc = VERR_GENERAL_FAILURE;
+    int rc = VINF_SUCCESS;
 #else
-        rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
-#endif
-        /*
-         * Flush the logs.
-         */
+    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
+#endif
+
+    /*
+     * Flush the logs & deal with assertions.
+     */
 #ifdef LOG_ENABLED
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
-#endif
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
-        if (rc != VINF_VMM_CALL_HOST)
-            break;
-        rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
-        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
-            break;
-        /* Resume R0 */
-    }
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+    if (rc == VERR_VMM_RING0_ASSERTION)
+        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
     if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
     {
@@ -687,4 +675,7 @@
     }
 
+    /*
+     * Do clean ups.
+     */
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
@@ -1238,39 +1229,33 @@
     Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
 
-    for (;;)
-    {
-        int rc;
-        do
-        {
+    int rc;
+    do
+    {
 #ifdef NO_SUPCALLR0VMM
-            rc = VERR_GENERAL_FAILURE;
+        rc = VERR_GENERAL_FAILURE;
 #else
-            rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
-            if (RT_LIKELY(rc == VINF_SUCCESS))
-                rc = pVCpu->vmm.s.iLastGZRc;
-#endif
-        } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
+        rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
+        if (RT_LIKELY(rc == VINF_SUCCESS))
+            rc = pVCpu->vmm.s.iLastGZRc;
+#endif
+    } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
 
 #if 0 /** @todo triggers too often */
-        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
-#endif
-
-        /*
-         * Flush the logs
-         */
+    Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
+#endif
+
+    /*
+     * Flush the logs
+     */
 #ifdef LOG_ENABLED
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
-#endif
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
-        if (rc != VINF_VMM_CALL_HOST)
-        {
-            Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
-            return rc;
-        }
-        rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
-        if (RT_FAILURE(rc))
-            return rc;
-        /* Resume R0 */
-    }
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+    if (rc != VERR_VMM_RING0_ASSERTION)
+    {
+        Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
+        return rc;
+    }
+    return vmmR3HandleRing0Assert(pVM, pVCpu);
 }
 
@@ -1286,32 +1271,26 @@
 VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
 {
-    for (;;)
-    {
-        VBOXSTRICTRC rcStrict;
-        do
-        {
+    VBOXSTRICTRC rcStrict;
+    do
+    {
 #ifdef NO_SUPCALLR0VMM
-            rcStrict = VERR_GENERAL_FAILURE;
+        rcStrict = VERR_GENERAL_FAILURE;
 #else
-            rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
-            if (RT_LIKELY(rcStrict == VINF_SUCCESS))
-                rcStrict = pVCpu->vmm.s.iLastGZRc;
-#endif
-        } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
-
-        /*
-         * Flush the logs
-         */
+        rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
+        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+            rcStrict = pVCpu->vmm.s.iLastGZRc;
+#endif
+    } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
+
+    /*
+     * Flush the logs
+     */
 #ifdef LOG_ENABLED
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
-#endif
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
-        if (rcStrict != VINF_VMM_CALL_HOST)
-            return rcStrict;
-        int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
-        if (RT_FAILURE(rc))
-            return rc;
-        /* Resume R0 */
-    }
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+    if (rcStrict != VERR_VMM_RING0_ASSERTION)
+        return rcStrict;
+    return vmmR3HandleRing0Assert(pVM, pVCpu);
 }
 
@@ -2449,76 +2428,56 @@
 VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
 {
-    int rc;
-    for (;;)
-    {
+    /*
+     * Call ring-0.
+     */
 #ifdef NO_SUPCALLR0VMM
-        rc = VERR_GENERAL_FAILURE;
+    int rc = VERR_GENERAL_FAILURE;
 #else
-        rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
-#endif
-        /*
-         * Flush the logs.
-         */
+    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
+#endif
+
+    /*
+     * Flush the logs and deal with ring-0 assertions.
+     */
 #ifdef LOG_ENABLED
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
-#endif
-        VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
-        if (rc != VINF_VMM_CALL_HOST)
-            break;
-        rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
-        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
-            break;
-        /* Resume R0 */
-    }
-
-    AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
-                          ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
-                          VERR_IPE_UNEXPECTED_INFO_STATUS);
-    return rc;
-}
-
-
-/**
- * Service a call to the ring-3 host code.
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
+#endif
+    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
+    if (rc != VERR_VMM_RING0_ASSERTION)
+    {
+        AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
+                              ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
+                              VERR_IPE_UNEXPECTED_INFO_STATUS);
+        return rc;
+    }
+    return vmmR3HandleRing0Assert(pVM, pVCpu);
+}
+
+
+/**
+ * Logs a ring-0 assertion ASAP after returning to ring-3.
  *
  * @returns VBox status code.
- * @param   pVM     The cross context VM structure.
- * @param   pVCpu   The cross context virtual CPU structure.
- * @remarks Careful with critsects.
- */
-static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
-{
-    /*
-     * We must also check for pending critsect exits or else we can deadlock
-     * when entering other critsects here.
-     */
-    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
-        PDMCritSectBothFF(pVM, pVCpu);
-
-    switch (pVCpu->vmm.s.enmCallRing3Operation)
-    {
-        /*
-         * Signal a ring 0 hypervisor assertion.
-         * Cancel the longjmp operation that's in progress.
-         */
-        case VMMCALLRING3_VM_R0_ASSERTION:
-            pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
-            pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
+ * @param   pVM         The cross context VM structure.
+ * @param   pVCpu       The cross context virtual CPU structure.
+ */
+static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
+{
+    /*
+     * Signal a ring 0 hypervisor assertion.
+     * Cancel the longjmp operation that's in progress.
+     */
+    pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
 #ifdef RT_ARCH_X86
-            pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
+    pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
 #else
-            pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
+    pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
 #endif
 #ifdef VMM_R0_SWITCH_STACK
-            *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
-#endif
-            LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
-            LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
-            return VERR_VMM_RING0_ASSERTION;
-
-        default:
-            AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
-            return VERR_VMM_UNKNOWN_RING3_CALL;
-    }
+    *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
+#endif
+    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
+    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
+    return VERR_VMM_RING0_ASSERTION;
 }
 
Index: /trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp	(revision 92392)
@@ -29,96 +29,4 @@
 #include <iprt/asm-amd64-x86.h>
 #include <iprt/string.h>
-
-
-/**
- * Calls the ring-3 host code.
- *
- * @returns VBox status code of the ring-3 call.
- * @retval  VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
- *          be passed up the stack, or if that isn't possible then VMMRZCallRing3
- *          needs to change it into an assertion.
- *
- *
- * @param   pVM             The cross context VM structure.
- * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
- * @param   enmOperation    The operation.
- * @param   uArg            The argument to the operation.
- */
-VMMRZDECL(int) VMMRZCallRing3(PVMCC pVM, PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg)
-{
-    VMCPU_ASSERT_EMT(pVCpu);
-
-    /*
-     * Check if calling ring-3 has been disabled and only let let fatal calls thru.
-     */
-    if (RT_UNLIKELY(    pVCpu->vmm.s.cCallRing3Disabled != 0
-                    &&  enmOperation != VMMCALLRING3_VM_R0_ASSERTION))
-    {
-#ifndef IN_RING0
-        /*
-         * In most cases, it's sufficient to return a status code which
-         * will then be propagated up the code usually encountering several
-         * AssertRC invocations along the way. Hitting one of those is more
-         * helpful than stopping here.
-         *
-         * However, some doesn't check the status code because they are called
-         * from void functions, and for these we'll turn this into a ring-0
-         * assertion host call.
-         */
-        if (enmOperation != VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS)
-            return VERR_VMM_RING3_CALL_DISABLED;
-#endif
-#ifdef IN_RC
-        RTStrPrintf(g_szRTAssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
-                    "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x cCallRing3Disabled=%#x\n",
-                    enmOperation, uArg, pVCpu->idCpu, pVCpu->vmm.s.cCallRing3Disabled);
-#endif
-        RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
-                    "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x cCallRing3Disabled=%#x\n",
-                    enmOperation, uArg, pVCpu->idCpu, pVCpu->vmm.s.cCallRing3Disabled);
-        enmOperation = VMMCALLRING3_VM_R0_ASSERTION;
-    }
-
-    /*
-     * The normal path.
-     */
-/** @todo profile this! */
-    pVCpu->vmm.s.enmCallRing3Operation = enmOperation;
-    pVCpu->vmm.s.u64CallRing3Arg = uArg;
-    pVCpu->vmm.s.rcCallRing3 = VERR_VMM_RING3_CALL_NO_RC;
-#ifdef IN_RC
-    pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST);
-#else
-    int rc;
-    if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
-    {
-        rc = pVCpu->vmm.s.pfnCallRing3CallbackR0(pVCpu, enmOperation, pVCpu->vmm.s.pvCallRing3CallbackUserR0);
-        if (RT_FAILURE(rc))
-            return rc;
-    }
-    rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST);
-    if (RT_FAILURE(rc))
-        return rc;
-#endif
-    return pVCpu->vmm.s.rcCallRing3;
-}
-
-
-/**
- * Simple wrapper that adds the pVCpu argument.
- *
- * @returns VBox status code of the ring-3 call.
- * @retval  VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
- *          be passed up the stack, or if that isn't possible then VMMRZCallRing3
- *          needs to change it into an assertion.
- *
- * @param   pVM             The cross context VM structure.
- * @param   enmOperation    The operation.
- * @param   uArg            The argument to the operation.
- */
-VMMRZDECL(int) VMMRZCallRing3NoCpu(PVMCC pVM, VMMCALLRING3 enmOperation, uint64_t uArg)
-{
-    return VMMRZCallRing3(pVM, VMMGetCpu(pVM), enmOperation, uArg);
-}
 
 
@@ -194,48 +102,2 @@
 }
 
-
-/**
- * Sets the ring-0 callback before doing the ring-3 call.
- *
- * @param   pVCpu         The cross context virtual CPU structure.
- * @param   pfnCallback   Pointer to the callback.
- * @param   pvUser        The user argument.
- *
- * @return VBox status code.
- */
-VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPUCC pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser)
-{
-    AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
-    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
-
-    if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
-        return VERR_ALREADY_EXISTS;
-
-    pVCpu->vmm.s.pfnCallRing3CallbackR0    = pfnCallback;
-    pVCpu->vmm.s.pvCallRing3CallbackUserR0 = pvUser;
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Removes the ring-0 callback.
- *
- * @param   pVCpu   The cross context virtual CPU structure.
- */
-VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPUCC pVCpu)
-{
-    pVCpu->vmm.s.pfnCallRing3CallbackR0 = NULL;
-}
-
-
-/**
- * Checks whether there is a ring-0 callback notification active.
- *
- * @param   pVCpu   The cross context virtual CPU structure.
- * @returns true if there the notification is active, false otherwise.
- */
-VMMRZDECL(bool) VMMRZCallRing3IsNotificationSet(PVMCPUCC pVCpu)
-{
-    return pVCpu->vmm.s.pfnCallRing3CallbackR0 != NULL;
-}
-
Index: /trunk/src/VBox/VMM/include/VMMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/VMMInternal.h	(revision 92391)
+++ /trunk/src/VBox/VMM/include/VMMInternal.h	(revision 92392)
@@ -408,5 +408,4 @@
     STAMCOUNTER                 StatRZRetTimerPending;
     STAMCOUNTER                 StatRZRetInterruptPending;
-    STAMCOUNTER                 StatRZRetCallRing3;
     STAMCOUNTER                 StatRZRetPATMDuplicateFn;
     STAMCOUNTER                 StatRZRetPGMChangeMode;
@@ -466,7 +465,4 @@
     /** @} */
 
-    /** Alignment padding, making sure u64CallRing3Arg and CallRing3JmpBufR0 are nicely aligned. */
-    uint32_t                    au32Padding3[1];
-
     /** @name Call Ring-3
      * Formerly known as host calls.
@@ -474,14 +470,9 @@
     /** The disable counter. */
     uint32_t                    cCallRing3Disabled;
-    /** The pending operation. */
-    VMMCALLRING3                enmCallRing3Operation;
-    /** The result of the last operation. */
-    int32_t                     rcCallRing3;
-    /** The argument to the operation. */
-    uint64_t                    u64CallRing3Arg;
-    /** The Ring-0 notification callback. */
-    R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION)   pfnCallRing3CallbackR0;
-    /** The Ring-0 notification callback user argument. */
-    R0PTRTYPE(void *)           pvCallRing3CallbackUserR0;
+    uint32_t                    u32Padding3;
+    /** Ring-0 assertion notification callback. */
+    R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnRing0AssertCallback;
+    /** Argument for pfnRing0AssertionNotificationCallback. */
+    R0PTRTYPE(void *)           pvRing0AssertCallbackUser;
     /** The Ring-0 jmp buffer.
      * @remarks The size of this type isn't stable in assembly, so don't put
Index: /trunk/src/VBox/VMM/include/VMMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/VMMInternal.mac	(revision 92391)
+++ /trunk/src/VBox/VMM/include/VMMInternal.mac	(revision 92392)
@@ -127,14 +127,10 @@
         .TracerCtx              resb SUPDRVTRACERUSRCTX64_size
 
-        .au32Padding3           resd 1
-
         .cCallRing3Disabled     resd 1
-        .enmCallRing3Operation  resd 1
-        .rcCallRing3            resd 1
         alignb 8
-        .u64CallRing3Arg        resq 1
-        .pfnCallRing3CallbackR0         RTR0PTR_RES 1
-        .pvCallRing3CallbackUserR0      RTR0PTR_RES 1
-        ; .CallRing3JmpBufR0    resb no-can-do
+        .pfnRing0AssertCallback RTR0PTR_RES 1
+        .pvRing0AssertCallbackUser RTR0PTR_RES 1
+        alignb 16
+        .CallRing3JmpBufR0      resb 1
 endstruc
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 92391)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 92392)
@@ -1186,6 +1186,4 @@
     GEN_CHECK_OFF(VMMCPU, cCallRing3Disabled);
     GEN_CHECK_OFF(VMMCPU, enmCallRing3Operation);
-    GEN_CHECK_OFF(VMMCPU, rcCallRing3);
-    GEN_CHECK_OFF(VMMCPU, u64CallRing3Arg);
     GEN_CHECK_OFF(VMMCPU, CallRing3JmpBufR0);
     GEN_CHECK_OFF_DOT(VMMCPU, CallRing3JmpBufR0.SpCheck);
Index: /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 92391)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 92392)
@@ -265,5 +265,4 @@
     PVM pVM = NULL; NOREF(pVM);
 
-    CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.u64CallRing3Arg, 8);
 #if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
     CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0, 16);
