Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45378)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45379)
@@ -1790,4 +1790,8 @@
     /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
     rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, 0xffffffffffffffffULL);
+
+    /* Setup debug controls */
+    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);                /** @todo think about this. */
+    rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS,  0);    /** @todo Intel spec. 26.6.3 think about this */
     AssertRCReturn(rc, rc);
     return rc;
@@ -2932,4 +2936,5 @@
         return VINF_SUCCESS;
 
+#ifdef DEBUG
     /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
     if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
@@ -2940,4 +2945,5 @@
         Assert((pCtx->dr[7] & 0x400) == 0x400);              /* bit 10 is reserved (MB1). */
     }
+#endif
 
     int rc                = VERR_INTERNAL_ERROR_5;
@@ -3014,9 +3020,4 @@
     /* The guest's view of its DR7 is unblemished. */
     rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
-
-    /* Setup other debug controls */
-    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);                /** @todo think about this. */
-    rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS,  0);    /** @todo Intel spec. 26.6.3 think about this */
-    AssertRCReturn(rc, rc);
 
     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
@@ -4534,10 +4535,9 @@
 
 /**
- * Saves, if necessary, any event that occurred during event delivery as a
- * pending VMX event to handle before the next VM-entry or to be translated as a
- * TRPM event in the case of exiting to ring-3.
+ * Handle a condition that occurred while delivering an event through the guest
+ * IDT.
  *
  * @returns VBox status code (informational error codes included).
- * @retval VINF_SUCCESS if we should continue handling VM-exits.
+ * @retval VINF_SUCCESS if we should continue handling the VM-exit.
  * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
  *         continue execution of the guest which will delivery the #DF.
@@ -4552,16 +4552,7 @@
  *
  * @remarks No-long-jump zone!!!
- * @remarks Called unconditionally after every VM-exit.
- *
- */
-static int hmR0VmxSavePendingEventDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
-{
-
-    Assert(pVCpu);
-    Assert(pVmxTransient);
-
-    if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed))     /* Don't bother with pending events if the VM-entry itself failed. */
-        return VINF_SUCCESS;
-
+ */
+static int hmR0VmxCheckExitDueToEventDelivery(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
+{
     int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
     AssertRCReturn(rc, rc);
@@ -5064,8 +5055,8 @@
 
     /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
-    if (   pVM->hm.s.fNestedPaging
-        && CPUMIsGuestPagingEnabledEx(pMixedCtx))
-    {
-        if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
+    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
+    {
+        if (   pVM->hm.s.fNestedPaging
+            && CPUMIsGuestPagingEnabledEx(pMixedCtx))
         {
             rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
@@ -5093,10 +5084,7 @@
                 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
             }
-
-            pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;
-        }
-    }
-    else
+        }
         pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;
+    }
     return rc;
 }
@@ -5535,8 +5523,17 @@
     AssertRC(rc);
 
-    /* Restore debug registers if necessary and resync on next R0 re-entry. */
+    /* Restore FPU state if necessary and resync on next R0 reentry .*/
+    if (CPUMIsGuestFPUStateActive(pVCpu))
+    {
+        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
+        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
+        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
+    }
+
+    /* Restore debug registers if necessary and resync on next R0 reentry. */
     if (CPUMIsGuestDebugStateActive(pVCpu))
     {
         CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
+        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     }
@@ -5544,5 +5541,5 @@
     {
         CPUMR0LoadHostDebugState(pVM, pVCpu);
-        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
+        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     }
 
@@ -6219,6 +6216,5 @@
     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
 
-    /* Currently we always atleast reload CR0 (longjmps included because of FPU state sharing). */
-    Log(("LdGstFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
+    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
 
     /* Determine real-on-v86 mode. */
@@ -6266,4 +6262,6 @@
              ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
               pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
+
+    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     return rc;
 }
@@ -6382,5 +6380,9 @@
     /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
     Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
-    int rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
+    int rc;
+    if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_INTR_STATE)
+        rc = hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx);
+    else
+        rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
     AssertRC(rc);
     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
@@ -6444,5 +6446,5 @@
 {
     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
-    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatInGC, x);
+    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
 
     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
@@ -6450,5 +6452,5 @@
     pVCpu->hm.s.vmx.fUpdatedGuestState = 0;                     /* Exits/longjmps to ring-3 requires saving the guest state. */
     pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
-    pVmxTransient->fVectoringPF        = false;                 /* Clear the vectoring page-fault flag. */
+    pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
 
     if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
@@ -6528,5 +6530,4 @@
     Assert(VMMRZCallRing3IsEnabled(pVCpu));
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
-    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
 
     VMXTRANSIENT VmxTransient;
@@ -6543,4 +6544,5 @@
 
         /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
+        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
         rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
         if (rc != VINF_SUCCESS)
@@ -6566,30 +6568,26 @@
         if (RT_UNLIKELY(rc != VINF_SUCCESS))        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
         {
+            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
             hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
             return rc;
         }
 
-        /* Handle any exception that caused a VM-exit while delivering an event to the guest.  */
-        rc = hmR0VmxSavePendingEventDueToEventDelivery(pVM, pVCpu, pCtx, &VmxTransient);
-        if (RT_LIKELY(rc == VINF_SUCCESS))
-        {
-            /* Handle VM-exits. */
-            AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("Invalid VM-exit %#x\n", VmxTransient.uExitReason));
-            STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
-            rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient);
-            if (rc != VINF_SUCCESS)
-                break;
-            else if (cLoops > pVM->hm.s.cMaxResumeLoops)
-            {
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
-                rc = VINF_EM_RAW_INTERRUPT;
-                break;
-            }
-        }
-        else if (RT_UNLIKELY(rc == VINF_EM_RESET))
+        /* Handle the VM-exit. */
+        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
+        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
+        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
+        rc = (*s_apfnVMExitHandlers[VmxTransient.uExitReason])(pVM, pVCpu, pCtx, &VmxTransient);
+        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
+        if (rc != VINF_SUCCESS)
             break;
-        /* else continue guest execution for (VINF_VMX_DOUBLE_FAULT) */
-    }
-
+        else if (cLoops > pVM->hm.s.cMaxResumeLoops)
+        {
+            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
+            rc = VINF_EM_RAW_INTERRUPT;
+            break;
+        }
+    }
+
+    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     if (rc == VERR_EM_INTERPRETER)
         rc = VINF_EM_RAW_EMULATE_INSTR;
@@ -6644,5 +6642,5 @@
     VMX_VALIDATE_EXIT_HANDLER_PARAMS();
     STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
-    return VINF_EM_RAW_INTERRUPT;
+    return VINF_SUCCESS;
 }
 
@@ -6655,6 +6653,4 @@
     VMX_VALIDATE_EXIT_HANDLER_PARAMS();
     int rc = hmR0VmxReadExitIntrInfoVmcs(pVmxTransient);
-    rc     = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
-    rc    |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient);
     AssertRCReturn(rc, rc);
 
@@ -6668,4 +6664,11 @@
     uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
     Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(uExitIntrInfo));
+
+    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
+    rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
+    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
+        return VINF_SUCCESS;
+    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
+        return rc;
 
     uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
@@ -6707,4 +6710,7 @@
                         Assert(pVM->hm.s.vmx.pRealModeTSS);
                         Assert(PDMVmmDevHeapIsEnabled(pVM));
+                        rc     = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
+                        rc    |= hmR0VmxReadExitIntrErrorCodeVmcs(pVmxTransient);
+                        AssertRCReturn(rc, rc);
                         rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
                                                     VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
@@ -7788,7 +7794,32 @@
     VMX_VALIDATE_EXIT_HANDLER_PARAMS();
 
+    /* Check if this task-switch occurred while delivery an event through the guest IDT. */
+    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
+    AssertRCReturn(rc, rc);
+    if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
+    {
+        rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
+        AssertRCReturn(rc, rc);
+        if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
+        {
+            uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
+            if (   uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
+                && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
+                && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
+            {
+                /* Save it as a pending event while will be converted to a TRPM event on the way out to ring-3. */
+                pVCpu->hm.s.Event.fPending = true;
+                pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
+                rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
+                AssertRCReturn(rc, rc);
+                if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringErrorCode))
+                    pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
+                else
+                    pVCpu->hm.s.Event.u32ErrCode = 0;
+            }
+        }
+    }
     /** @todo Emulate task switch someday, currently just going back to ring-3 for
      *        emulation. */
-
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
     return VERR_EM_INTERPRETER;
@@ -7819,7 +7850,21 @@
     int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
 
+    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
+    rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
+    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
+        return VINF_SUCCESS;
+    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
+        return rc;
+
+#if 0
     /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
      *   just sync the whole thing. */
-    rc    |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
+    rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
+#else
+    /* Aggressive state sync. for now. */
+    rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
+#endif
     AssertRCReturn(rc, rc);
 
@@ -7840,8 +7885,8 @@
             GCPhys &= PAGE_BASE_GC_MASK;
             GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
-            Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
             VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
                                                   CPUMCTX2CORE(pMixedCtx), GCPhys);
             rc = VBOXSTRICTRC_VAL(rc2);
+            Log(("ApicAccess %RGp %#x\n", GCPhys, VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
             if (   rc == VINF_SUCCESS
                 || rc == VERR_PAGE_TABLE_NOT_PRESENT
@@ -7957,12 +8002,24 @@
     Assert(pVM->hm.s.fNestedPaging);
 
+    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
+    int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
+    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
+        return VINF_SUCCESS;
+    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
+        return rc;
+
     RTGCPHYS GCPhys = 0;
-    int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
+    rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
     AssertRCReturn(rc, rc);
 
+#if 0
     rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
+#else
+    /* Aggressive state sync. for now. */
+    rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
+#endif
     AssertRCReturn(rc, rc);
-
-    Log(("EPT misconfig at %#RX64 RIP=%#RX64\n", GCPhys, pMixedCtx->rip));
 
     /*
@@ -7974,6 +8031,6 @@
      */
     VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
+    Log(("EPT misconfig at %#RX64 RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
     rc = VBOXSTRICTRC_VAL(rc2);
-    Log(("EPT misconfig rc=%d\n",  rc));
     if (   rc == VINF_SUCCESS
         || rc == VERR_PAGE_TABLE_NOT_PRESENT
@@ -7997,8 +8054,22 @@
     Assert(pVM->hm.s.fNestedPaging);
 
+    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
+    int rc = hmR0VmxCheckExitDueToEventDelivery(pVM, pVCpu, pMixedCtx, pVmxTransient);
+    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
+        return VINF_SUCCESS;
+    else if (RT_UNLIKELY(rc == VINF_EM_RESET))
+        return rc;
+
     RTGCPHYS GCPhys = 0;
-    int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
-    rc    |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
-    rc    |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);  /** @todo can we do better? */
+    rc  = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
+    rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
+#if 0
+    rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
+#else
+    /* Aggressive state sync. for now. */
+    rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
+    rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
+#endif
     AssertRCReturn(rc, rc);
 
@@ -8178,5 +8249,4 @@
     {
         Assert(CPUMIsGuestFPUStateActive(pVCpu));
-
         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
@@ -8444,6 +8514,6 @@
     VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
 
-    /* Re-inject the exception into the guest. This cannot be a double-fault condition which are handled in
-       hmR0VmxSavePendingEventDueToEventDelivery(). */
+    /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
+       hmR0VmxCheckExitDueToEventDelivery(). */
     int rc = hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
                                     VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
