Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 72064)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 72065)
@@ -1251,4 +1251,5 @@
 VMM_INT_DECL(bool)      CPUMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
 VMM_INT_DECL(uint8_t)   CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)      CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx);
 VMM_INT_DECL(void)      CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx);
 VMM_INT_DECL(void)      CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 72064)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 72065)
@@ -574,4 +574,8 @@
     uint64_t    u;
 } SVMINTCTRL;
+/** Pointer to an SVMINTCTRL structure. */
+typedef SVMINTCTRL *PSVMINTCTRL;
+/** Pointer to a const SVMINTCTRL structure. */
+typedef const SVMINTCTRL *PCSVMINTCTRL;
 
 /**
@@ -1145,7 +1149,4 @@
 VMM_INT_DECL(bool)     HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx);
-VMM_INT_DECL(bool)     HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
-VMM_INT_DECL(bool)     HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
-
 /** @} */
 
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 72064)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 72065)
@@ -2568,17 +2568,11 @@
     Assert(pCtx->hwvirt.fGif);
 
-    if (!pCtx->hwvirt.svm.fHMCachedVmcb)
-    {
-        PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
-        X86EFLAGS fEFlags;
-        if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking)
-            fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
-        else
-            fEFlags.u = pCtx->eflags.u;
-
-        return fEFlags.Bits.u1IF;
-    }
-
-    return HMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
+    X86EFLAGS fEFlags;
+    if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
+        fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
+    else
+        fEFlags.u = pCtx->eflags.u;
+
+    return fEFlags.Bits.u1IF;
 #endif
 }
@@ -2604,26 +2598,17 @@
     Assert(pCtx->hwvirt.fGif);
 
-    /*
-     * Although at present, the V_TPR and V_INTR_PRIO fields are not modified
-     * by SVM R0 code and we could inspect them directly here, we play it
-     * safe and ask HM if it has cached the VMCB.
-     */
-    if (!pCtx->hwvirt.svm.fHMCachedVmcb)
-    {
-        PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
-        if (   !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR
-            &&  pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR)
-            return false;
-
-        X86EFLAGS fEFlags;
-        if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking)
-            fEFlags.u = pCtx->eflags.u;
-        else
-            fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
-
-        return fEFlags.Bits.u1IF;
-    }
-
-    return HMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx);
+    PCSVMVMCBCTRL pVmcbCtrl    = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
+    PCSVMINTCTRL  pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
+    if (   !pVmcbIntCtrl->n.u1IgnoreTPR
+        &&  pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
+        return false;
+
+    X86EFLAGS fEFlags;
+    if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
+        fEFlags.u = pCtx->eflags.u;
+    else
+        fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
+
+    return fEFlags.Bits.u1IF;
 #endif
 }
@@ -2644,4 +2629,25 @@
     PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
+#endif
+}
+
+
+/**
+ * Gets the SVM nested-guest virtual GIF.
+ *
+ * @returns The nested-guest virtual GIF.
+ * @param   pCtx            The guest-CPU context.
+ */
+VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx)
+{
+#ifdef IN_RC
+    RT_NOREF(pCtx);
+    AssertReleaseFailedReturn(false);
+#else
+    PCSVMVMCBCTRL pVmcbCtrl    = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
+    PCSVMINTCTRL  pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
+    if (pVmcbIntCtrl->n.u1VGifEnable)
+        return pVmcbIntCtrl->n.u1VGif;
+    return true;
 #endif
 }
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 72064)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 72065)
@@ -577,69 +577,2 @@
 }
 
-
-/**
- * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
- * interrupts.
- *
- * @returns true if it's ready, false otherwise.
- * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
- * @param   pCtx        The guest-CPU context.
- *
- * @remarks This function looks at the VMCB cache rather than directly at the
- *          nested-guest VMCB. The latter may have been modified for executing
- *          using hardware-assisted SVM.
- *
- * @sa      CPUMCanSvmNstGstTakePhysIntr.
- */
-VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
-{
-    Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
-    Assert(pCtx->hwvirt.fGif);
-    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
-    X86EFLAGS fEFlags;
-    if (pVmcbNstGstCache->fVIntrMasking)
-        fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
-    else
-        fEFlags.u = pCtx->eflags.u;
-    return fEFlags.Bits.u1IF;
-}
-
-
-/**
- * Checks whether the SVM nested-guest is in a state to receive virtual (setup
- * for injection by VMRUN instruction) interrupts.
- *
- * @returns true if it's ready, false otherwise.
- * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
- * @param   pCtx        The guest-CPU context.
- *
- * @remarks This function looks at the VMCB cache rather than directly at the
- *          nested-guest VMCB. The latter may have been modified for executing
- *          using hardware-assisted SVM.
- *
- * @sa      CPUMCanSvmNstGstTakeVirtIntr.
- */
-VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
-{
-#ifdef IN_RC
-    RT_NOREF2(pVCpu, pCtx);
-    AssertReleaseFailedReturn(false);
-#else
-    Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
-    Assert(pCtx->hwvirt.fGif);
-    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
-
-    PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
-    if (   !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR
-        &&  pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR)
-        return false;
-
-    X86EFLAGS fEFlags;
-    if (pVmcbNstGstCache->fVIntrMasking)
-        fEFlags.u = pCtx->eflags.u;
-    else
-        fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
-    return fEFlags.Bits.u1IF;
-#endif
-}
-
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 72064)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 72065)
@@ -174,17 +174,16 @@
              *
              *   - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
-             *   - V_TPR: Already updated by iemCImpl_load_CrX or by the physical CPU for
-             *     hardware-assisted SVM execution.
+             *   - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted
+             *     SVM execution.
              *   - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
              */
             PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
-            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))        /* V_IRQ. */
+            if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))       /* V_IRQ. */
+                pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
+            else
             {
                 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
-                pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 1;
                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
             }
-            else
-                pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
 
             pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR;           /* V_TPR. */
@@ -439,6 +438,6 @@
 
         /* Virtual GIF. */
-        if (   pVmcbCtrl->IntCtrl.n.u1VGifEnable
-            && pVM->cpum.ro.GuestFeatures.fSvmVGif)
+        if (    pVmcbCtrl->IntCtrl.n.u1VGifEnable
+            && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
         {
             Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n"));
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 72064)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 72065)
@@ -215,5 +215,5 @@
 /** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
 V_INTR_VECTOR. */
-#define HMSVM_VMCB_CLEAN_TPR                    RT_BIT(3)
+#define HMSVM_VMCB_CLEAN_INT_CTRL               RT_BIT(3)
 /** Nested Paging: Nested CR3 (nCR3), PAT. */
 #define HMSVM_VMCB_CLEAN_NP                     RT_BIT(4)
@@ -237,5 +237,5 @@
                                                  | HMSVM_VMCB_CLEAN_IOPM_MSRPM  \
                                                  | HMSVM_VMCB_CLEAN_ASID        \
-                                                 | HMSVM_VMCB_CLEAN_TPR         \
+                                                 | HMSVM_VMCB_CLEAN_INT_CTRL    \
                                                  | HMSVM_VMCB_CLEAN_NP          \
                                                  | HMSVM_VMCB_CLEAN_CRX_EFER    \
@@ -982,5 +982,5 @@
 
     /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
-       and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
+       and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
     pVmcbCtrl->IntCtrl.n.u1IgnoreTPR = 1;
 
@@ -1070,5 +1070,5 @@
         Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
 
-        /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs. */
+        /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
         Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu->hm.s.fGIMTrapXcptUD);
     }
@@ -2022,5 +2022,5 @@
          * since SVM doesn't have a preemption timer.
          *
-         * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the
+         * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
          * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
          * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
@@ -2105,5 +2105,5 @@
             }
 
-            pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+            pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
         }
     }
@@ -2407,5 +2407,5 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT
-    if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable == 1)
+    if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
     {
         Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
@@ -2482,5 +2482,5 @@
  * @sa      HMSvmNstGstVmExitNotify.
  */
-static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
+static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
     /*
@@ -2511,5 +2511,5 @@
         pVmcbNstGstCache->fLbrVirt                = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
         pCtx->hwvirt.svm.fHMCachedVmcb            = true;
-        Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
+        Log4(("hmR0SvmCacheVmcbNested: Cached VMCB fields\n"));
     }
 
@@ -2523,10 +2523,10 @@
  * This is done the first time we enter nested-guest execution using SVM R0
  * until the nested-guest \#VMEXIT (not to be confused with physical CPU
- * \#VMEXITs which may or may not cause the nested-guest \#VMEXIT).
+ * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
  *
  * @param   pVCpu           The cross context virtual CPU structure.
  * @param   pCtx            Pointer to the nested-guest-CPU context.
  */
-static void hmR0SvmVmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
+static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
     PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
@@ -2536,5 +2536,5 @@
      * First cache the nested-guest VMCB fields we may potentially modify.
      */
-    bool const fVmcbCached = hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
+    bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu, pCtx);
     if (!fVmcbCached)
     {
@@ -2604,5 +2604,8 @@
     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
 
-    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcbNstGst);
+    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    Assert(pVmcbNstGst);
+
+    hmR0SvmSetupVmcbNested(pVCpu, pCtx);
 
     int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx);
@@ -2626,5 +2629,5 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT
-    Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0);        /* Nested VGIF not supported yet. */
+    Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable);            /* Nested VGIF not supported yet. */
 #endif
 
@@ -2745,13 +2748,9 @@
         /*
          * Nested-guest interrupt pending.
-         * Sync/verify nested-guest's V_IRQ and its force-flag.
+         * Sync nested-guest's V_IRQ and its force-flag.
          */
-        if (!pVmcbCtrl->IntCtrl.n.u1VIrqPending)
-        {
-            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
-                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
-        }
-        else
-            Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
+        if (  !pVmcbCtrl->IntCtrl.n.u1VIrqPending
+            && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
+            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     }
 #endif
@@ -3478,23 +3477,47 @@
  * Sets the virtual interrupt intercept control in the VMCB.
  *
- * @param   pVmcb       Pointer to the VM control block.
- */
-DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @param   pVmcb   Pointer to the VM control block.
+ * @param   pCtx    Pointer to the guest-CPU context.
+ */
+DECLINLINE(void) hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
 {
     /*
-     * When AVIC isn't supported, indicate that a virtual interrupt is pending and to
-     * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we
-     * then get the interrupt from the APIC (updating ISR at the right time) and
-     * inject the interrupt.
+     * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when
+     * the guest is ready to accept interrupts. At #VMEXIT, we then get the interrupt
+     * from the APIC (updating ISR at the right time) and inject the interrupt.
      *
      * With AVIC is supported, we could make use of the asynchronously delivery without
      * #VMEXIT and we would be passing the AVIC page to SVM.
+     *
+     * In AMD-V, an interrupt window is achieved using a combination of
+     * V_IRQ (an interrupt is pending), V_IGN_TPR (ignore TPR priorities) and the
+     * VINTR intercept all being set.
      */
-    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
-    {
-        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0);
+#ifdef VBOX_WITH_NESTED_HWVIRT
+    /*
+     * Currently we don't overlay interupt windows and if there's any V_IRQ pending
+     * in the nested-guest VMCB, we avoid setting up any interrupt window on behalf
+     * of the outer guest.
+     */
+    /** @todo Does this mean we end up prioritizing virtual interrupt
+     *        delivery/window over a physical interrupt (from the outer guest)
+     *        might be pending? */
+    bool const fEnableIntWindow = !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
+    if (!fEnableIntWindow)
+    {
+        Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
+        Log4(("Nested-guest V_IRQ already pending\n"));
+    }
+#else
+    RT_NOREF2(pVCpu, pCtx);
+    bool const fEnableIntWindow = true;
+#endif
+    if (fEnableIntWindow)
+    {
+        Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
         pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
-        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
-        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
+        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
         Log4(("Set VINTR intercept\n"));
     }
@@ -3507,57 +3530,22 @@
  * at this point of time.
  *
- * @param   pVmcb       Pointer to the VM control block.
- */
-DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb)
-{
-    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
-    {
-        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1);
-        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;
-        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
-        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @param   pVmcb   Pointer to the VM control block.
+ * @param   pCtx    Pointer to the guest-CPU context.
+ */
+DECLINLINE(void) hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
+{
+    PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
+    if (    pVmcbCtrl->IntCtrl.n.u1VIrqPending
+        || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
+    {
+        pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
+        pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
+        hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
         Log4(("Cleared VINTR intercept\n"));
     }
 }
 
-
-/**
- * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
- * \#VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
- * virtual NMIs.
- *
- * @param   pVmcb       Pointer to the VM control block.
- */
-DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
-{
-    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))
-    {
-        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
-        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
-
-        Log4(("Setting IRET intercept\n"));
-    }
-}
-
-
-/**
- * Clears the IRET intercept control in the VMCB.
- *
- * @param   pVmcb       Pointer to the VM control block.
- */
-DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
-{
-    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
-    {
-        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
-        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
-
-        Log4(("Clearing IRET intercept\n"));
-    }
-}
-
 #ifdef VBOX_WITH_NESTED_HWVIRT
-
-
 /**
  * Evaluates the event to be delivered to the nested-guest and sets it as the
@@ -3570,124 +3558,110 @@
 static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
-    Log4Func(("\n"));
+    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
 
     Assert(!pVCpu->hm.s.Event.fPending);
-
-    bool const fGif = pCtx->hwvirt.fGif;
-    if (fGif)
-    {
-        PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
-
-        bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
-
-        /*
-         * Check if the nested-guest can receive NMIs.
-         * NMIs are higher priority than regular interrupts.
-         */
-        /** @todo SMI. SMIs take priority over NMIs. */
-        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
-        {
-            bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
-            if (fBlockNmi)
-                hmR0SvmSetIretIntercept(pVmcbNstGst);
-            else if (fIntShadow)
+    Assert(pCtx->hwvirt.fGif);
+    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
+    Assert(pVmcb);
+
+    bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
+    bool const fIntShadow  = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
+    bool const fBlockNmi   = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
+
+    Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",
+              fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
+              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
+
+    /** @todo SMI. SMIs take priority over NMIs. */
+
+    /*
+     * Check if the guest can receive NMIs.
+     * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
+     * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
+     */
+    if (    VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
+        && !fBlockNmi)
+    {
+        if (    fVirtualGif
+            && !fIntShadow)
+        {
+            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
             {
-                /** @todo Figure this out, how we shall manage virt. intercept if the
-                 *        nested-guest already has one set and/or if we really need it? */
-                //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
+                Log4(("Intercepting NMI -> #VMEXIT\n"));
+                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
             }
-            else
+
+            Log4(("Setting NMI pending for injection\n"));
+            SVMEVENT Event;
+            Event.u = 0;
+            Event.n.u1Valid  = 1;
+            Event.n.u8Vector = X86_XCPT_NMI;
+            Event.n.u3Type   = SVM_EVENT_NMI;
+            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
+            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+        }
+        else if (!fVirtualGif)
+            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
+        else
+            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
+    }
+    /*
+     * Check if the nested-guest can receive external interrupts (generated by
+     * the guest's PIC/APIC).
+     *
+     * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
+     * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
+     *
+     * External interrupts that are generated for the outer guest may be intercepted
+     * depending on how the nested-guest VMCB was programmed by guest software.
+     *
+     * Physical interrupts always take priority over virtual interrupts,
+     * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
+     */
+    else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
+             && !pVCpu->hm.s.fSingleInstruction)
+    {
+        if (    fVirtualGif
+            && !fIntShadow
+            &&  CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
+        {
+            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
             {
-                if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
-                {
-                    Log4(("Intercepting NMI -> #VMEXIT\n"));
-                    return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
-                }
-
-                Log4(("Pending NMI\n"));
+                Log4(("Intercepting INTR -> #VMEXIT\n"));
+                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
+            }
+
+            uint8_t u8Interrupt;
+            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+            if (RT_SUCCESS(rc))
+            {
+                Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
                 SVMEVENT Event;
                 Event.u = 0;
                 Event.n.u1Valid  = 1;
-                Event.n.u8Vector = X86_XCPT_NMI;
-                Event.n.u3Type   = SVM_EVENT_NMI;
+                Event.n.u8Vector = u8Interrupt;
+                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
                 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-                hmR0SvmSetIretIntercept(pVmcbNstGst);
-                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
-                return VINF_SUCCESS;
             }
-        }
-
-        /*
-         * Check if the nested-guest can receive external interrupts (generated by
-         * the guest's PIC/APIC).
-         *
-         * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
-         * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
-         *
-         * External interrupts that are generated for the outer guest may be intercepted
-         * depending on how the nested-guest VMCB was programmed by guest software.
-         *
-         * Physical interrupts always take priority over virtual interrupts,
-         * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
-         */
-        if (!fIntShadow)
-        {
-            if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
-                && !pVCpu->hm.s.fSingleInstruction
-                && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
+            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
             {
-                if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
-                {
-                    Log4(("Intercepting external interrupt -> #VMEXIT\n"));
-                    return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
-                }
-
-                uint8_t u8Interrupt;
-                int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
-                if (RT_SUCCESS(rc))
-                {
-                    Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
-                    SVMEVENT Event;
-                    Event.u = 0;
-                    Event.n.u1Valid  = 1;
-                    Event.n.u8Vector = u8Interrupt;
-                    Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
-                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-                }
-                else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
-                {
-                    /*
-                     * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
-                     * updated eventually when the TPR is written by the guest.
-                     */
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
-                }
-                else
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
+                /*
+                 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
+                 * updated eventually when the TPR is written by the guest.
+                 */
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
             }
-
-            /*
-             * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields)
-             * interrupt injection. The virtual interrupt injection itself, if any, will be done
-             * by the physical CPU.
-             */
-            /** @todo later explore this for performance reasons. Right now the hardware
-             *        takes care of virtual interrupt injection for nested-guest. */
-#if 0
-            if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
-                && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR)
-                && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
-            {
-                Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
-                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
-            }
+            else
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
+        }
+        else if (!fVirtualGif)
+            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
+        else
+            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
+    }
+
+    return VINF_SUCCESS;
+}
 #endif
-        }
-    }
-
-    return VINF_SUCCESS;
-}
-#endif
-
 
 /**
@@ -3697,7 +3671,4 @@
  * @param   pVCpu       The cross context virtual CPU structure.
  * @param   pCtx        Pointer to the guest-CPU context.
- *
- * @remarks Don't use this function when we are actively executing a
- *          nested-guest, use hmR0SvmEvaluatePendingEventNested instead.
  */
 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
@@ -3705,87 +3676,86 @@
     HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     Assert(!pVCpu->hm.s.Event.fPending);
+    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
+    Assert(pVmcb);
 
 #ifdef VBOX_WITH_NESTED_HWVIRT
-    bool const fGif = pCtx->hwvirt.fGif;
+    bool const fGif       = pCtx->hwvirt.fGif;
 #else
-    bool const fGif = true;
+    bool const fGif       = true;
 #endif
-    Log4Func(("fGif=%RTbool\n", fGif));
+    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
+    bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
+    bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
+
+    Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",
+              fGif, fBlockNmi, fBlockInt, fIntShadow,
+              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
+              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
+
+    /** @todo SMI. SMIs take priority over NMIs. */
 
     /*
-     * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked.
-     * See AMD spec. Table 15-10. "Effect of the GIF on Interrupt Handling".
+     * Check if the guest can receive NMIs.
+     * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
+     * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
      */
-    if (fGif)
-    {
-        bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
-        bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
-        bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
-        PSVMVMCB pVmcb        = pVCpu->hm.s.svm.pVmcb;
-
-        Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow,
-                  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
-
-        /** @todo SMI. SMIs take priority over NMIs. */
-        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts. */
-        {
-            if (fBlockNmi)
-                hmR0SvmSetIretIntercept(pVmcb);
-            else if (fIntShadow)
-                hmR0SvmSetVirtIntrIntercept(pVmcb);
-            else
+    if (    VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
+        && !fBlockNmi)
+    {
+        if (    fGif
+            && !fIntShadow)
+        {
+            Log4(("Setting NMI pending for injection\n"));
+            SVMEVENT Event;
+            Event.u = 0;
+            Event.n.u1Valid  = 1;
+            Event.n.u8Vector = X86_XCPT_NMI;
+            Event.n.u3Type   = SVM_EVENT_NMI;
+            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
+            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+        }
+        else if (!fGif)
+            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
+        else
+            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
+    }
+    /*
+     * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
+     * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
+     */
+    else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
+             && !pVCpu->hm.s.fSingleInstruction)
+    {
+        if (    fGif
+            && !fBlockInt
+            && !fIntShadow)
+        {
+            uint8_t u8Interrupt;
+            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+            if (RT_SUCCESS(rc))
             {
-                Log4(("Pending NMI\n"));
-
+                Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
                 SVMEVENT Event;
                 Event.u = 0;
                 Event.n.u1Valid  = 1;
-                Event.n.u8Vector = X86_XCPT_NMI;
-                Event.n.u3Type   = SVM_EVENT_NMI;
-
+                Event.n.u8Vector = u8Interrupt;
+                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
                 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-                hmR0SvmSetIretIntercept(pVmcb);
-                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
-                return;
             }
-        }
-        else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
-                 && !pVCpu->hm.s.fSingleInstruction)
-        {
-            /*
-             * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
-             * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
-             */
-            if (   !fBlockInt
-                && !fIntShadow)
+            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
             {
-                uint8_t u8Interrupt;
-                int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
-                if (RT_SUCCESS(rc))
-                {
-                    Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
-
-                    SVMEVENT Event;
-                    Event.u = 0;
-                    Event.n.u1Valid  = 1;
-                    Event.n.u8Vector = u8Interrupt;
-                    Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
-
-                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-                }
-                else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
-                {
-                    /*
-                     * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
-                     * updated eventually when the TPR is written by the guest.
-                     */
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
-                }
-                else
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
+                /*
+                 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
+                 * updated eventually when the TPR is written by the guest.
+                 */
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
             }
             else
-                hmR0SvmSetVirtIntrIntercept(pVmcb);
-        }
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
+        }
+        else if (!fGif)
+            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
+        else
+            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
     }
 }
@@ -3798,4 +3768,8 @@
  * @param   pCtx        Pointer to the guest-CPU context.
  * @param   pVmcb       Pointer to the VM control block.
+ *
+ * @remarks Must only be called when we are guaranteed to enter
+ *          hardware-assisted SVM execution and not return to ring-3
+ *          prematurely.
  */
 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
@@ -3842,4 +3816,19 @@
 
         /*
+         * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We do this only
+         * when we are surely going to inject the NMI as otherwise if we return to ring-3 prematurely we
+         * could leave NMIs blocked indefinitely upon re-entry into SVM R0.
+         *
+         * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set
+         * the VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
+         */
+        if (    Event.n.u3Type   == SVM_EVENT_NMI
+            &&  Event.n.u8Vector == X86_XCPT_NMI
+            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
+        {
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
+        }
+
+        /*
          * Inject it (update VMCB for injection by the hardware).
          */
@@ -3855,4 +3844,12 @@
     else
         Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
+
+    /*
+     * We could have injected an NMI through IEM and continue guest execution using
+     * hardware-assisted SVM. In which case, we would not have any events pending (above)
+     * but we still need to intercept IRET in order to eventually clear NMI inhibition.
+     */
+    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
+        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
 
     /*
@@ -4165,9 +4162,4 @@
 
     /*
-     * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
-     */
-    hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
-
-    /*
      * Load the nested-guest state.
      */
@@ -4212,26 +4204,4 @@
         return VINF_EM_RAW_INTERRUPT;
     }
-
-    /*
-     * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
-     * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
-     * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
-     *
-     * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
-     * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
-     */
-    if (pVCpu->hm.s.Event.fPending)
-    {
-        SVMEVENT Event;
-        Event.u = pVCpu->hm.s.Event.u64IntInfo;
-        if (    Event.n.u1Valid
-            &&  Event.n.u3Type == SVM_EVENT_NMI
-            &&  Event.n.u8Vector == X86_XCPT_NMI
-            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
-        {
-            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
-        }
-    }
-
     return VINF_SUCCESS;
 }
@@ -4342,25 +4312,4 @@
     }
 
-    /*
-     * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
-     * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
-     * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
-     *
-     * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
-     * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
-     */
-    if (pVCpu->hm.s.Event.fPending)
-    {
-        SVMEVENT Event;
-        Event.u = pVCpu->hm.s.Event.u64IntInfo;
-        if (    Event.n.u1Valid
-            &&  Event.n.u3Type == SVM_EVENT_NMI
-            &&  Event.n.u8Vector == X86_XCPT_NMI
-            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
-        {
-            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
-        }
-    }
-
     return VINF_SUCCESS;
 }
@@ -4389,6 +4338,7 @@
     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
 
-    PVM      pVM = pVCpu->CTX_SUFF(pVM);
+    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
     PSVMVMCB pVmcb = pSvmTransient->pVmcb;
+
     hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
 
@@ -6153,4 +6103,5 @@
     {
         PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
+        Assert(pVmcb);
         Assert(pVmcb->ctrl.u64NextRIP);
         AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);    /* temporary, remove later */
@@ -7165,5 +7116,5 @@
     /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
     PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
-    hmR0SvmClearVirtIntrIntercept(pVmcb);
+    hmR0SvmClearIntWindowExiting(pVCpu, pVmcb, pCtx);
 
     /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
@@ -7272,9 +7223,10 @@
 
     /* Clear NMI blocking. */
-    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
+    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
+        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
 
     /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
     PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
-    hmR0SvmClearIretIntercept(pVmcb);
+    hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_IRET);
 
     /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
@@ -7547,6 +7499,4 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
 
-    /** @todo if triple-fault is returned in nested-guest scenario convert to a
-     *        shutdown VMEXIT. */
     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
 
@@ -7733,13 +7683,12 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
 
-#ifdef VBOX_STRICT
-    PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
-    Assert(pVmcb);
-    Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
-    RT_NOREF(pVmcb);
-#endif
-
-    /** @todo Stat. */
-    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
+    /*
+     * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
+     * we only intercept STGI when events are pending for GIF to become 1.
+     */
+    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
+    if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
+        hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_STGI);
+
     uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
     VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
@@ -7762,6 +7711,4 @@
 #endif
 
-    /** @todo Stat. */
-    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */
     uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
     VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
@@ -7791,6 +7738,4 @@
 #endif
 
-    /** @todo Stat. */
-    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */
     uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
     VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
@@ -7842,8 +7787,6 @@
 
     /* If this #DB is the result of delivering an event, go back to the interpreter. */
-    /** @todo if triple-fault is returned in nested-guest scenario convert to a
-     *        shutdown VMEXIT. */
     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
-    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
+    if (pVCpu->hm.s.Event.fPending)
     {
         STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
@@ -7864,6 +7807,4 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
 
-    /** @todo if triple-fault is returned in nested-guest scenario convert to a
-     *        shutdown VMEXIT. */
     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
 
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 72064)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 72065)
@@ -1674,10 +1674,11 @@
     if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     {
-        PVM  pVM  = pVCpu->CTX_SUFF(pVM);
-        bool fGif = pCtx->hwvirt.fGif;
+        PVM pVM  = pVCpu->CTX_SUFF(pVM);
+        Assert(pCtx->hwvirt.fGif);
+        bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
 #ifdef VBOX_WITH_RAW_MODE
-        fGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
-#endif
-        if (fGif)
+        fVirtualGif     &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
+#endif
+        if (fVirtualGif)
         {
             if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
