Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 68310)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 68311)
@@ -314,4 +314,6 @@
 static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
 static FNSVMEXITHANDLER hmR0SvmExitVmrun;
+static FNSVMEXITHANDLER hmR0SvmNestedExitIret;
+static FNSVMEXITHANDLER hmR0SvmNestedExitVIntr;
 #endif
 /** @} */
@@ -3026,5 +3028,11 @@
             hmR0SvmSetIretIntercept(pVmcbNstGst);
         else if (fIntShadow)
+        {
+            /** @todo Figure this out, how we shall manage virt. intercept if the
+             *        nested-guest already has one set and/or if we really need it? */
+#if 0
             hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
+#endif
+        }
         else
         {
@@ -3079,5 +3087,11 @@
         }
         else
+        {
+            /** @todo Figure this out, how we shall manage virt. intercept if the
+             *        nested-guest already has one set and/or if we really need it? */
+#if 0
             hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
+#endif
+        }
     }
     /*
@@ -3140,4 +3154,5 @@
             hmR0SvmSetIretIntercept(pVmcb);
             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+            return;
         }
     }
@@ -4628,5 +4643,5 @@
             if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
                 return hmR0SvmExecVmexit(pVCpu, pCtx);
-            return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
+            return hmR0SvmNestedExitVIntr(pVCpu, pCtx, pSvmTransient);
         }
 
@@ -4741,5 +4756,5 @@
                     if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
                         return hmR0SvmExecVmexit(pVCpu, pCtx);
-                    return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
+                    return hmR0SvmNestedExitIret(pVCpu, pCtx, pSvmTransient);
                 }
 
@@ -7461,4 +7476,46 @@
     return VBOXSTRICTRC_VAL(rcStrict);
 }
+
+/**
+ * Nested-guest \#VMEXIT handler for IRET (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmNestedExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+
+    /* Clear NMI blocking. */
+    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
+
+    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
+    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    hmR0SvmClearIretIntercept(pVmcbNstGst);
+
+    /* Deliver the pending NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
+ * \#VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmNestedExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+
+    /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
+    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    pVmcbNstGst->ctrl.IntCtrl.n.u1VIrqPending = 0;
+    pVmcbNstGst->ctrl.IntCtrl.n.u8VIntrVector = 0;
+
+    /* Indicate that we no longer need to #VMEXIT when the nested-guest is ready to receive interrupts/NMIs, it is now ready. */
+    pVmcbNstGst->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
+    pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+
+    /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */
+    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
+    return VINF_SUCCESS;
+}
+
 #endif /* VBOX_WITH_NESTED_HWVIRT */
 
