Index: /trunk/src/VBox/VMM/HWACCM.cpp
===================================================================
--- /trunk/src/VBox/VMM/HWACCM.cpp	(revision 20131)
+++ /trunk/src/VBox/VMM/HWACCM.cpp	(revision 20132)
@@ -376,4 +376,8 @@
 #endif
 
+    /* Max number of resume loops. */
+    rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
+    AssertRC(rc);
+
     return VINF_SUCCESS;
 }
@@ -475,4 +479,5 @@
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow,          "/HWACCM/CPU%d/Exit/IrqWindow");
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume,          "/HWACCM/CPU%d/Exit/MaxResume");
+        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending,     "/HWACCM/CPU%d/Exit/PreemptPending");
 
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq,         "/HWACCM/CPU%d/Switch/IrqPending");
@@ -494,5 +499,5 @@
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown,           "/HWACCM/CPU%d/Flush/Shootdown/Page");
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush,      "/HWACCM/CPU%d/Flush/Shootdown/TLB");
-        
+
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset,              "/HWACCM/CPU%d/TSC/Offset");
         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept,           "/HWACCM/CPU%d/TSC/Intercept");
Index: /trunk/src/VBox/VMM/HWACCMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/HWACCMInternal.h	(revision 20131)
+++ /trunk/src/VBox/VMM/HWACCMInternal.h	(revision 20132)
@@ -120,7 +120,4 @@
 
 
-/** Maxium resume loops allowed in ring 0 (safety precaution) */
-#define HWACCM_MAX_RESUME_LOOPS             1024
-
 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES      8
@@ -217,4 +214,8 @@
     /** Maximum ASID allowed. */
     RTUINT                      uMaxASID;
+
+    /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
+     * This number is set much higher when RTThreadPreemptIsPending is reliable. */
+    uint32_t                    cMaxResumeLoops;
 
 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
@@ -644,4 +645,5 @@
     STAMCOUNTER             StatExitIrqWindow;
     STAMCOUNTER             StatExitMaxResume;
+    STAMCOUNTER             StatExitPreemptPending;
     STAMCOUNTER             StatIntReinject;
     STAMCOUNTER             StatPendingHostIrq;
Index: /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 20131)
+++ /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 20132)
@@ -37,11 +37,12 @@
 #include <VBox/selm.h>
 #include <VBox/iom.h>
-#include <iprt/param.h>
 #include <iprt/assert.h>
 #include <iprt/asm.h>
+#include <iprt/cpuset.h>
+#include <iprt/memobj.h>
+#include <iprt/param.h>
+#include <iprt/power.h>
 #include <iprt/string.h>
-#include <iprt/memobj.h>
-#include <iprt/cpuset.h>
-#include <iprt/power.h>
+#include <iprt/thread.h>
 #include "HWVMXR0.h"
 #include "HWSVMR0.h"
@@ -836,4 +837,14 @@
     pVM->hwaccm.s.uMaxASID                  = HWACCMR0Globals.uMaxASID;
 
+
+    if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
+    {
+        pVM->hwaccm.s.cMaxResumeLoops       = 1024;
+#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
+        if (RTThreadPreemptIsPendingTrusty())
+            pVM->hwaccm.s.cMaxResumeLoops   = 8192;
+#endif
+    }
+
     for (unsigned i=0;i<pVM->cCPUs;i++)
     {
@@ -852,5 +863,5 @@
     PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
 
-    /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
+    /* Note: Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
     ASMAtomicWriteBool(&pCpu->fInUse, true);
     ASMSetFlags(fFlags);
Index: /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 20131)
+++ /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 20132)
@@ -847,5 +847,5 @@
     uint8_t     u8LastVTPR;
     PHWACCM_CPUINFO pCpu = 0;
-    RTCCUINTREG uOldEFlags;
+    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
 #ifdef VBOX_STRICT
     RTCPUID  idCpuCheck;
@@ -863,5 +863,5 @@
 
     /* Safety precaution; looping for too long here can have a very bad effect on the host */
-    if (++cResume > HWACCM_MAX_RESUME_LOOPS)
+    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
     {
         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
@@ -919,13 +919,22 @@
 
 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
-    /** @todo This must be repeated (or moved down) after we've disabled interrupts
-     *        below because a rescheduling request (IPI) might arrive before we get
-     *        there and we end up exceeding our timeslice. (Putting it here for
-     *        now because I don't want to mess up anything.) */
+    /*
+     * Exit to ring-3 preemption/work is pending.
+     *
+     * Interrupts are disabled before the call to make sure we don't miss any interrupt
+     * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
+     * further down, but SVMR0CheckPendingInterrupt makes that hard.)
+     *
+     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
+     *       shootdowns rely on this.
+     */
+    uOldEFlags = ASMIntDisableFlags();
     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     {
-        rc = VINF_EM_RAW_INTERRUPT_HYPER;
+        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
+        rc = VINF_EM_RAW_INTERRUPT;
         goto end;
     }
+    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
 #endif
 
@@ -1003,4 +1012,5 @@
     }
 
+#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     /* Disable interrupts to make sure a poke will interrupt execution.
      * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
@@ -1008,4 +1018,5 @@
     uOldEFlags = ASMIntDisableFlags();
     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
+#endif
 
     pCpu = HWACCMR0GetCurrentCpu();
@@ -1099,4 +1110,7 @@
     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     ASMSetFlags(uOldEFlags);
+#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
+    uOldEFlags = ~(RTCCUINTREG)0;
+#endif
     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatInGC, x);
 
@@ -2255,4 +2269,10 @@
     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
 
+#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
+    /* Restore interrupts if we exitted after disabling them. */
+    if (uOldEFlags != ~(RTCCUINTREG)0)
+        ASMSetFlags(uOldEFlags);
+#endif
+
     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
     return rc;
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 20131)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 20132)
@@ -433,5 +433,5 @@
             if (pVM->hwaccm.s.fHasIoApic)
                 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
-            
+
             /* Mask away the bits that the CPU doesn't support */
             /** @todo make sure they don't conflict with the above requirements. */
@@ -2002,5 +2002,5 @@
     bool        fSetupTPRCaching = false;
     PHWACCM_CPUINFO pCpu = 0;
-    RTCCUINTREG uOldEFlags;
+    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
     unsigned    cResume = 0;
 #ifdef VBOX_STRICT
@@ -2104,5 +2104,5 @@
 
     /* Safety precaution; looping for too long here can have a very bad effect on the host */
-    if (++cResume > HWACCM_MAX_RESUME_LOOPS)
+    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
     {
         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
@@ -2153,13 +2153,22 @@
 
 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
-    /** @todo This must be repeated (or moved down) after we've disabled interrupts
-     *        below because a rescheduling request (IPI) might arrive before we get
-     *        there and we end up exceeding our timeslice. (Putting it here for
-     *        now because I don't want to mess up anything.) */
+    /*
+     * Exit to ring-3 preemption/work is pending.
+     *
+     * Interrupts are disabled before the call to make sure we don't miss any interrupt
+     * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
+     * further down, but VMXR0CheckPendingInterrupt makes that hard.)
+     *
+     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
+     *       shootdowns rely on this.
+     */
+    uOldEFlags = ASMIntDisableFlags();
     if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     {
-        rc = VINF_EM_RAW_INTERRUPT_HYPER;
+        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
+        rc = VINF_EM_RAW_INTERRUPT;
         goto end;
     }
+    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
 #endif
 
@@ -2248,4 +2257,5 @@
         goto end;
 
+#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     /* Disable interrupts to make sure a poke will interrupt execution.
      * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
@@ -2253,4 +2263,5 @@
     uOldEFlags = ASMIntDisableFlags();
     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
+#endif
 
     /* Deal with tagged TLB setup and invalidation. */
@@ -2293,4 +2304,7 @@
     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     ASMSetFlags(uOldEFlags);
+#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
+    uOldEFlags = ~(RTCCUINTREG)0;
+#endif
 
     AssertMsg(!pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n", pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries));
@@ -3683,4 +3697,10 @@
     /* Just set the correct state here instead of trying to catch every goto above. */
     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
+
+#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
+    /* Restore interrupts if we exitted after disabling them. */
+    if (uOldEFlags != ~(RTCCUINTREG)0)
+        ASMSetFlags(uOldEFlags);
+#endif
 
     STAM_STATS({
