VirtualBox

Changeset 52041 in vbox


Ignore:
Timestamp:
Jul 15, 2014 3:43:30 PM (10 years ago)
Author:
vboxsync
Message:

VMM: Fix NMI handling.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm_vmx.h

    r51946 r52041  
    13511351/** Use TPR shadow. */
    13521352#define VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW                  RT_BIT(21)
    1353 /** VM-exit when virtual nmi blocking is disabled. */
     1353/** VM-exit when virtual NMI blocking is disabled. */
    13541354#define VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT                 RT_BIT(22)
    13551355/** VM-exit when executing a MOV DRx instruction. */
     
    14891489#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID               RT_BIT(11)
    14901490#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(a)         RT_BOOL((a) & VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID)
    1491 #define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(a)                 ((a) & RT_BIT(12))
     1491#define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(a)            ((a) & RT_BIT(12))
    14921492#define VMX_EXIT_INTERRUPTION_INFO_VALID                          RT_BIT(31)
    14931493#define VMX_EXIT_INTERRUPTION_INFO_IS_VALID(a)                    RT_BOOL((a) & RT_BIT(31))
  • trunk/include/VBox/vmm/vm.h

    r51934 r52041  
    405405/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
    406406#define VMCPU_FF_INHIBIT_INTERRUPTS         RT_BIT_32(24)
     407/** Inhibit non-maskable interrupts. */
     408#define VMCPU_FF_INHIBIT_NMIS               RT_BIT_32(25)
    407409#ifdef VBOX_WITH_RAW_MODE
    408410/** CSAM needs to scan the page that's being executed */
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r51434 r52041  
    29892989IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
    29902990{
     2991    /*
     2992     * First, clear NMI inhibition before causing any exceptions.
     2993     */
     2994    PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
     2995    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     2996
    29912997    /*
    29922998     * Call a mode specific worker.
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r52009 r52041  
    229229typedef enum SVMMSREXITREAD
    230230{
    231     /** Reading this MSR causes a VM-exit. */
     231    /** Reading this MSR causes a #VMEXIT. */
    232232    SVMMSREXIT_INTERCEPT_READ = 0xb,
    233     /** Reading this MSR does not cause a VM-exit. */
     233    /** Reading this MSR does not cause a #VMEXIT. */
    234234    SVMMSREXIT_PASSTHRU_READ
    235235} SVMMSREXITREAD;
     
    240240typedef enum SVMMSREXITWRITE
    241241{
    242     /** Writing to this MSR causes a VM-exit. */
     242    /** Writing to this MSR causes a #VMEXIT. */
    243243    SVMMSREXIT_INTERCEPT_WRITE = 0xd,
    244     /** Writing to this MSR does not cause a VM-exit. */
     244    /** Writing to this MSR does not cause a #VMEXIT. */
    245245    SVMMSREXIT_PASSTHRU_WRITE
    246246} SVMMSREXITWRITE;
    247247
    248248/**
    249  * SVM VM-exit handler.
     249 * SVM #VMEXIT handler.
    250250 *
    251251 * @returns VBox status code.
     
    263263static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    264264
    265 /** @name VM-exit handlers.
     265/** @name #VMEXIT handlers.
    266266 * @{
    267267 */
     
    289289static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
    290290static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
     291static FNSVMEXITHANDLER hmR0SvmExitIret;
    291292static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
    292293static FNSVMEXITHANDLER hmR0SvmExitXcptNM;
     
    692693
    693694        /* Set up unconditional intercepts and conditions. */
    694         pVmcb->ctrl.u32InterceptCtrl1 =   SVM_CTRL1_INTERCEPT_INTR          /* External interrupt causes a VM-exit. */
    695                                         | SVM_CTRL1_INTERCEPT_NMI           /* Non-Maskable Interrupts causes a VM-exit. */
    696                                         | SVM_CTRL1_INTERCEPT_INIT          /* INIT signal causes a VM-exit. */
    697                                         | SVM_CTRL1_INTERCEPT_RDPMC         /* RDPMC causes a VM-exit. */
    698                                         | SVM_CTRL1_INTERCEPT_CPUID         /* CPUID causes a VM-exit. */
    699                                         | SVM_CTRL1_INTERCEPT_RSM           /* RSM causes a VM-exit. */
    700                                         | SVM_CTRL1_INTERCEPT_HLT           /* HLT causes a VM-exit. */
    701                                         | SVM_CTRL1_INTERCEPT_INOUT_BITMAP  /* Use the IOPM to cause IOIO VM-exits. */
    702                                         | SVM_CTRL1_INTERCEPT_MSR_SHADOW    /* MSR access not covered by MSRPM causes a VM-exit.*/
    703                                         | SVM_CTRL1_INTERCEPT_INVLPGA       /* INVLPGA causes a VM-exit. */
    704                                         | SVM_CTRL1_INTERCEPT_SHUTDOWN      /* Shutdown events causes a VM-exit. */
     695        pVmcb->ctrl.u32InterceptCtrl1 =   SVM_CTRL1_INTERCEPT_INTR          /* External interrupt causes a #VMEXIT. */
     696                                        | SVM_CTRL1_INTERCEPT_NMI           /* Non-maskable interrupts causes a #VMEXIT. */
     697                                        | SVM_CTRL1_INTERCEPT_INIT          /* INIT signal causes a #VMEXIT. */
     698                                        | SVM_CTRL1_INTERCEPT_RDPMC         /* RDPMC causes a #VMEXIT. */
     699                                        | SVM_CTRL1_INTERCEPT_CPUID         /* CPUID causes a #VMEXIT. */
     700                                        | SVM_CTRL1_INTERCEPT_RSM           /* RSM causes a #VMEXIT. */
     701                                        | SVM_CTRL1_INTERCEPT_HLT           /* HLT causes a #VMEXIT. */
     702                                        | SVM_CTRL1_INTERCEPT_INOUT_BITMAP  /* Use the IOPM to cause IOIO #VMEXITs. */
     703                                        | SVM_CTRL1_INTERCEPT_MSR_SHADOW    /* MSR access not covered by MSRPM causes a #VMEXIT.*/
     704                                        | SVM_CTRL1_INTERCEPT_INVLPGA       /* INVLPGA causes a #VMEXIT. */
     705                                        | SVM_CTRL1_INTERCEPT_SHUTDOWN      /* Shutdown events causes a #VMEXIT. */
    705706                                        | SVM_CTRL1_INTERCEPT_FERR_FREEZE;  /* Intercept "freezing" during legacy FPU handling. */
    706707
    707         pVmcb->ctrl.u32InterceptCtrl2 =   SVM_CTRL2_INTERCEPT_VMRUN         /* VMRUN causes a VM-exit. */
    708                                         | SVM_CTRL2_INTERCEPT_VMMCALL       /* VMMCALL causes a VM-exit. */
    709                                         | SVM_CTRL2_INTERCEPT_VMLOAD        /* VMLOAD causes a VM-exit. */
    710                                         | SVM_CTRL2_INTERCEPT_VMSAVE        /* VMSAVE causes a VM-exit. */
    711                                         | SVM_CTRL2_INTERCEPT_STGI          /* STGI causes a VM-exit. */
    712                                         | SVM_CTRL2_INTERCEPT_CLGI          /* CLGI causes a VM-exit. */
    713                                         | SVM_CTRL2_INTERCEPT_SKINIT        /* SKINIT causes a VM-exit. */
    714                                         | SVM_CTRL2_INTERCEPT_WBINVD        /* WBINVD causes a VM-exit. */
    715                                         | SVM_CTRL2_INTERCEPT_MONITOR       /* MONITOR causes a VM-exit. */
    716                                         | SVM_CTRL2_INTERCEPT_MWAIT;        /* MWAIT causes a VM-exit. */
     708        pVmcb->ctrl.u32InterceptCtrl2 =   SVM_CTRL2_INTERCEPT_VMRUN         /* VMRUN causes a #VMEXIT. */
     709                                        | SVM_CTRL2_INTERCEPT_VMMCALL       /* VMMCALL causes a #VMEXIT. */
     710                                        | SVM_CTRL2_INTERCEPT_VMLOAD        /* VMLOAD causes a #VMEXIT. */
     711                                        | SVM_CTRL2_INTERCEPT_VMSAVE        /* VMSAVE causes a #VMEXIT. */
     712                                        | SVM_CTRL2_INTERCEPT_STGI          /* STGI causes a #VMEXIT. */
     713                                        | SVM_CTRL2_INTERCEPT_CLGI          /* CLGI causes a #VMEXIT. */
     714                                        | SVM_CTRL2_INTERCEPT_SKINIT        /* SKINIT causes a #VMEXIT. */
     715                                        | SVM_CTRL2_INTERCEPT_WBINVD        /* WBINVD causes a #VMEXIT. */
     716                                        | SVM_CTRL2_INTERCEPT_MONITOR       /* MONITOR causes a #VMEXIT. */
     717                                        | SVM_CTRL2_INTERCEPT_MWAIT;        /* MWAIT causes a #VMEXIT. */
    717718
    718719        /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
     
    11331134        {
    11341135            u64GuestCR0 |= X86_CR0_PG;     /* When Nested Paging is not available, use shadow page tables. */
    1135             u64GuestCR0 |= X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
     1136            u64GuestCR0 |= X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
    11361137        }
    11371138
     
    11531154        else
    11541155        {
    1155             fInterceptNM = true;           /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
     1156            fInterceptNM = true;           /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
    11561157            u64GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
    11571158                          | X86_CR0_MP;    /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
     
    18861887    if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
    18871888        EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    1888     else
     1889    else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    18891890        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    18901891
     
    25252526
    25262527/**
     2528 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
     2529 * #VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
     2530 * virtual NMIs.
     2531 *
     2532 * @param pVmcb         Pointer to the VM control block.
     2533 */
     2534DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
     2535{
     2536    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))
     2537    {
     2538        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;
     2539        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
     2540
     2541        Log4(("Setting IRET intercept\n"));
     2542    }
     2543}
     2544
     2545
     2546/**
     2547 * Clears the IRET intercept control in the VMCB.
     2548 *
     2549 * @param pVmcb         Pointer to the VM control block.
     2550 */
     2551DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
     2552{
     2553    if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)
     2554    {
     2555        pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;
     2556        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
     2557
     2558        Log4(("Clearing IRET intercept\n"));
     2559    }
     2560}
     2561
     2562
     2563/**
    25272564 * Evaluates the event to be delivered to the guest and sets it as the pending
    25282565 * event.
     
    25382575    const bool fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
    25392576    const bool fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
     2577    const bool fBlockNmi  = RT_BOOL(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS));
    25402578    PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    25412579
     
    25452583    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts . */
    25462584    {
    2547         if (!fIntShadow)
     2585        if (fBlockNmi)
     2586            hmR0SvmSetIretIntercept(pVmcb);
     2587        else if (fIntShadow)
     2588            hmR0SvmSetVirtIntrIntercept(pVmcb);
     2589        else
    25482590        {
    25492591            Log4(("Pending NMI\n"));
     
    25562598            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    25572599        }
    2558         else
    2559             hmR0SvmSetVirtIntrIntercept(pVmcb);
    25602600    }
    25612601    else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
     
    29623002    }
    29633003
     3004    /*
     3005     * If we are injecting an NMI, we must set VMCPU_FF_INHIBIT_NMIS only when we are going to execute
     3006     * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
     3007     * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
     3008     *
     3009     * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
     3010     * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
     3011     */
     3012    if (pVCpu->hm.s.Event.fPending)
     3013    {
     3014        SVMEVENT Event;
     3015        Event.u = pVCpu->hm.s.Event.u64IntInfo;
     3016        if (   Event.n.u1Valid
     3017            && Event.n.u3Type == SVM_EVENT_NMI
     3018            && Event.n.u8Vector == X86_XCPT_NMI)
     3019        {
     3020            VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     3021        }
     3022    }
     3023
    29643024    return VINF_SUCCESS;
    29653025}
     
    33903450
    33913451    /*
    3392      * The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under
     3452     * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
    33933453     * normal workloads (for some definition of "normal").
    33943454     */
     
    34873547                case SVM_EXIT_VMMCALL:
    34883548                    return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
     3549
     3550                case SVM_EXIT_IRET:
     3551                    return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
    34893552
    34903553                case SVM_EXIT_SHUTDOWN:
     
    39303993 *
    39313994 * @returns VBox status code (informational error codes included).
    3932  * @retval VINF_SUCCESS if we should continue handling the VM-exit.
     3995 * @retval VINF_SUCCESS if we should continue handling the #VMEXIT.
    39333996 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
    39343997 *         continue execution of the guest which will delivery the #DF.
     
    40024065                /*
    40034066                 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
    4004                  * exception to the guest after handling the VM-exit.
     4067                 * exception to the guest after handling the #VMEXIT.
    40054068                 */
    40064069                enmReflect = SVMREFLECTXCPT_XCPT;
     
    40774140/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    40784141
    4079 /** @name VM-exit handlers.
     4142/** @name #VMEXIT handlers.
    40804143 * @{
    40814144 */
     
    45154578    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    45164579
    4517     /* We should -not- get this VM-exit if the guest's debug registers were active. */
     4580    /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
    45184581    if (pSvmTransient->fWasGuestDebugStateActive)
    45194582    {
     
    48554918    pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;
    48564919
    4857     /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
     4920    /* Clear NMI inhibition, if it's active. */
     4921    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS))
     4922    {
     4923        hmR0SvmClearIretIntercept(pVmcb);
     4924        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     4925    }
     4926
     4927    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
    48584928    pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
    48594929    pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    48604930
    4861     /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */
     4931    /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
    48624932    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
    48634933    return VINF_SUCCESS;
     
    49184988    if (rc != VINF_SUCCESS)
    49194989        hmR0SvmSetPendingXcptUD(pVCpu);
     4990    return VINF_SUCCESS;
     4991}
     4992
     4993
     4994/**
     4995 * #VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional #VMEXIT.
     4996 */
     4997HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4998{
     4999    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     5000
     5001    /* Clear NMI inhibition. */
     5002    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     5003
     5004    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
     5005    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     5006    hmR0SvmClearIretIntercept(pVmcb);
     5007
     5008    /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
    49205009    return VINF_SUCCESS;
    49215010}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r52009 r52041  
    23232323
    23242324    val |=   VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT           /* External interrupts causes a VM-exits. */
    2325            | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;              /* Non-maskable interrupts causes a VM-exit. */
    2326     Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
     2325           | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;              /* Non-maskable interrupts (NMIs) causes a VM-exit. */
     2326
     2327    if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
     2328        val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI;         /* Use virtual NMIs and virtual-NMI blocking features. */
    23272329
    23282330    /* Enable the VMX preemption timer. */
     
    24512453        }
    24522454#endif
     2455    }
     2456
     2457    /* If we're using virtual NMIs, we need the NMI-window exiting feature. */
     2458    if (   (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
     2459        && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
     2460    {
     2461        val |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
    24532462    }
    24542463
     
    35133522            uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
    35143523    }
     3524
     3525    /*
     3526     * NMIs to the guest are inhibited until the guest executes an IRET. We only
     3527     * bother with virtual-NMI blocking when we have support for virtual NMIs in the
     3528     * CPU, otherwise setting this would block host-NMIs and IRET will not clear the
     3529     * blocking.
     3530     *
     3531     * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
     3532     */
     3533    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)
     3534        && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
     3535    {
     3536        uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
     3537    }
     3538
    35153539    return uIntrState;
    35163540}
     
    60696093
    60706094    if (!uIntrState)
    6071         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6095    {
     6096        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     6097            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6098        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS))
     6099            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     6100    }
    60726101    else
    60736102    {
    6074         Assert(   uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
    6075                || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    6076         rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    6077         AssertRC(rc);
    6078         rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);    /* for hmR0VmxGetGuestIntrState(). */
    6079         AssertRC(rc);
    6080 
    6081         EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    6082         Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     6103        if (   (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
     6104            || (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
     6105        {
     6106            rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     6107            AssertRC(rc);
     6108            rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);    /* for hmR0VmxGetGuestIntrState(). */
     6109            AssertRC(rc);
     6110
     6111            EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
     6112            Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     6113        }
     6114
     6115        if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
     6116            VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     6117        else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS))
     6118            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
    60836119    }
    60846120}
     
    72777313
    72787314/**
     7315 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
     7316 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
     7317 *
     7318 * @param pVCpu         Pointer to the VMCPU.
     7319 */
     7320DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
     7321{
     7322    if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
     7323    {
     7324        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
     7325        {
     7326            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
     7327            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     7328            AssertRC(rc);
     7329            Log4(("Setup NMI-window exiting\n"));
     7330        }
     7331    } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
     7332}
     7333
     7334
     7335/**
     7336 * Clears the NMI-window exiting control in the VMCS.
     7337 *
     7338 * @param pVCpu             Pointer to the VMCPU.
     7339 */
     7340DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
     7341{
     7342    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
     7343    pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
     7344    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     7345    AssertRC(rc);
     7346    Log4(("Cleared NMI-window exiting\n"));
     7347}
     7348
     7349
     7350/**
    72797351 * Evaluates the event to be delivered to the guest and sets it as the pending
    72807352 * event.
     
    72937365    bool fBlockMovSS    = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    72947366    bool fBlockSti      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     7367    bool fBlockNmi      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
    72957368
    72967369    Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    7297     Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
    7298            && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
     7370    Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    72997371    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    73007372    Assert(!TRPMHasTrap(pVCpu));
     
    73037375    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))    /* NMI. NMIs take priority over regular interrupts . */
    73047376    {
    7305         /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
    7306         if (   !fBlockMovSS
    7307             && !fBlockSti)
     7377        if (   fBlockNmi
     7378            || fBlockSti
     7379            || fBlockMovSS)
    73087380        {
    73097381            /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
     7382            hmR0VmxSetNmiWindowExitVmcs(pVCpu);
     7383        }
     7384        else
     7385        {
    73107386            Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
    73117387            uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
     
    73157391            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    73167392        }
    7317         else
    7318             hmR0VmxSetIntWindowExitVmcs(pVCpu);
    73197393    }
    73207394    /*
     
    73967470
    73977471    Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    7398     Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
    7399            && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
     7472    Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));     /* We don't support block-by-SMI yet.*/
    74007473    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);       /* Cannot set block-by-STI when interrupts are disabled. */
    74017474    Assert(!TRPMHasTrap(pVCpu));
     
    74117484        uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
    74127485        if (   (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
    7413             && (   uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT
    7414                 || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI))
     7486            && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
    74157487        {
    74167488            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
    74177489            hmR0VmxClearIntWindowExitVmcs(pVCpu);
    74187490        }
    7419 #if 1 /* defined(VBOX_STRICT) */  /* Temporarily for debugging. */
     7491
     7492#ifdef VBOX_STRICT
    74207493        if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
    74217494        {
     
    74257498            Assert(!fBlockSti);
    74267499            Assert(!fBlockMovSS);
    7427             Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));
    74287500        }
    74297501        else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
    74307502        {
     7503            bool fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
    74317504            Assert(!fBlockSti);
    74327505            Assert(!fBlockMovSS);
    7433             Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));
     7506            Assert(!fBlockNmi);
    74347507        }
    74357508#endif
     
    78387911
    78397912    /* Validate. */
    7840     Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo));        /* Bit 31 (Valid bit) must be set by caller. */
    7841     Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntInfo));    /* Bit 12 MBZ. */
    7842     Assert(!(u32IntInfo & 0x7ffff000));                             /* Bits 30:12 MBZ. */
     7913    Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo));             /* Bit 31 (Valid bit) must be set by caller. */
     7914    Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo));    /* Bit 12 MBZ. */
     7915    Assert(!(u32IntInfo & 0x7ffff000));                                  /* Bits 30:12 MBZ. */
    78437916
    78447917    /* Inject. */
     
    98129885    hmR0VmxClearIntWindowExitVmcs(pVCpu);
    98139886
    9814     /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
     9887    /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
    98159888    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
    98169889    return VINF_SUCCESS;
     
    98249897{
    98259898    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    9826     AssertMsgFailed(("Unexpected NMI-window exit.\n"));
    9827     HMVMX_RETURN_UNEXPECTED_EXIT();
     9899    if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
     9900    {
     9901        AssertMsgFailed(("Unexpected NMI-window exit.\n"));
     9902        HMVMX_RETURN_UNEXPECTED_EXIT();
     9903    }
     9904
     9905    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS));
     9906
     9907    /*
     9908     * Clear block-by-STI if it's active. The force-flag couldn't have been set by block-by-Mov SS in
     9909     * hmR0VmxSaveGuestIntrState() when this VM-exit happens as Intel CPUs are consistent with
     9910     * block-by-Mov SS and NMIs. See @bugref{7445}.
     9911     */
     9912    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     9913        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     9914
     9915    /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
     9916    hmR0VmxClearNmiWindowExitVmcs(pVCpu);
     9917
     9918    /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
     9919    return VINF_SUCCESS;
    98289920}
    98299921
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r52005 r52041  
    7070    EXIT_REASON(VMX_EXIT_SMI_IRQ            ,  6, "Other SMI."),
    7171    EXIT_REASON(VMX_EXIT_INT_WINDOW         ,  7, "Interrupt window."),
    72     EXIT_REASON_NIL(),
     72    EXIT_REASON(VMX_EXIT_NMI_WINDOW         ,  8, "NMI window."),
    7373    EXIT_REASON(VMX_EXIT_TASK_SWITCH        ,  9, "Task switch."),
    7474    EXIT_REASON(VMX_EXIT_CPUID              , 10, "Guest attempted to execute CPUID."),
  • trunk/src/recompiler/VBoxRecompiler.c

    r51326 r52041  
    22372237    }
    22382238
     2239    /* Update the inhibit NMI mask. */
     2240    pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
     2241    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS))
     2242        pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
     2243
    22392244    /*
    22402245     * Sync the A20 gate.
     
    27232728        Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
    27242729        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2730    }
     2731
     2732    /* Inhibit NMI flag. */
     2733    if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
     2734    {
     2735        Log(("Settings VMCPU_FF_INHIBIT_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
     2736        VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS);
     2737    }
     2738    else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS))
     2739    {
     2740        Log(("Clearing VMCPU_FF_INHIBIT_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
     2741        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS);
    27252742    }
    27262743
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette