VirtualBox

Changeset 79783 in vbox


Ignore:
Timestamp:
Jul 15, 2019 10:45:01 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Fix exception handling. If a guest hypervisor is intercepting an exception, always forward IDT-vectoring info. as part of the VM-exit (i.e. those generated by hardware or by injected by guest hypervisor VMLAUNCH/VMRESUME).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r79766 r79783  
    923923
    924924/**
    925  * Checks whether one of the given Pin-based VM-execution controls are set.
    926  *
    927  * @returns @c true if set, @c false otherwise.
    928  * @param   pVCpu           The cross context virtual CPU structure.
    929  * @param   pVmxTransient   The VMX-transient structure.
    930  * @param   uPinCtls        The Pin-based VM-execution controls to check.
    931  *
    932  * @remarks This will not check merged controls when executing a nested-guest
    933  *          but the original control specified by the guest hypervisor.
    934  */
    935 static bool hmR0VmxIsPinCtlsSet(PVMCPU pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uPinCtls)
    936 {
    937     if (!pVmxTransient->fIsNestedGuest)
    938     {
    939         PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    940         return RT_BOOL(pVmcsInfo->u32PinCtls & uPinCtls);
    941     }
    942     return CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, uPinCtls);
    943 }
    944 
    945 
    946 /**
    947925 * Sets the given Processor-based VM-execution controls.
    948926 *
     
    52545232     * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
    52555233     */
    5256     if (   hmR0VmxIsPinCtlsSet(pVCpu, pVmxTransient, VMX_PIN_CTLS_VIRT_NMI)
     5234    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     5235    if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    52575236        && CPUMIsGuestNmiBlocking(pVCpu))
    52585237        fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
     
    1357713556
    1357813557
    13579 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13580 /**
    13581  * Handle a condition that occurred while delivering an event through the
    13582  * nested-guest IDT.
    13583  *
    13584  * @returns VBox status code.
    13585  * @param   pVCpu           The cross context virtual CPU structure.
    13586  * @param   pVmxTransient   The VMX-transient structure.
    13587  *
    13588  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    13589  * @remarks No-long-jump zone!!!
    13590  */
    13591 static int hmR0VmxCheckExitDueToEventDeliveryNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    13592 {
    13593     Assert(!pVCpu->hm.s.Event.fPending);
    13594     Assert(pVmxTransient->fIsNestedGuest);
    13595     HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
    13596 
    13597     /*
    13598      * Construct a pending event from IDT vectoring information.
    13599      *
    13600      * This event could have originated from an event that we or the guest hypervisor injected
    13601      * during nested-guest VM-entry or could arise from hardware-assisted VMX execution of the
    13602      * nested-guest (for e.g. a #GP fault causing a #PF VM-exit).
    13603      *
    13604      * If the VM-exit is caused indirectly due to delivery of:
    13605      *   - #PF: the CPU would have updated CR2.
    13606      *   - NMI: NMI/virtual-NMI blocking is in effect.
    13607      *
    13608      * The main differences between this function and its non-nested version are as follows:
    13609      *
    13610      *   - Here we record software interrupts, software exceptions and privileged software
    13611      *     exceptions as pending for re-injection when necessary along with gathering the
    13612      *     instruction length. The non-nested version would fix-up the VM-exit that occurred
    13613      *     during delivery of such an event and restart execution of the guest without
    13614      *     re-injecting the event and does not record the instruction length.
    13615      *
    13616      *   - Here we record #PF as pending for re-injection while the non-nested version would
    13617      *     handle it via the page-fault VM-exit handler which isn't required when nested paging
    13618      *     is a requirement for hardware-assisted VMX execution of nested-guests.
    13619      *
    13620      * See Intel spec. 27.1 "Architectural State Before A VM Exit".
    13621      */
    13622     uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
    13623     if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
    13624     {
    13625         uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
    13626         uint8_t const  uIdtVector     = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
    13627 
    13628         /*
    13629          * Get the nasty stuff out of the way.
    13630          */
    13631         {
    13632             uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
    13633             if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
    13634             {
    13635                 uint8_t const  uExitVector      = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
    13636                 uint32_t const uExitVectorType  = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
    13637                 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
    13638 
    13639                 uint32_t const fIdtVectorFlags  = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
    13640                 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
    13641 
    13642                 IEMXCPTRAISEINFO   fRaiseInfo;
    13643                 IEMXCPTRAISE const enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags,
    13644                                                                        uExitVector, &fRaiseInfo);
    13645                 if (enmRaise == IEMXCPTRAISE_CPU_HANG)
    13646                 {
    13647                     Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
    13648                     return VERR_EM_GUEST_CPU_HANG;
    13649                 }
    13650             }
    13651         }
    13652 
    13653         /*
    13654          * Things look legit, continue...
    13655          */
    13656         uint32_t u32ErrCode;
    13657         if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
    13658             u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
    13659         else
    13660             u32ErrCode = 0;
    13661 
    13662         uint32_t cbInstr;
    13663         if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    13664             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT
    13665             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT)
    13666             cbInstr = pVmxTransient->cbInstr;
    13667         else
    13668             cbInstr = 0;
    13669 
    13670         RTGCUINTPTR GCPtrFaultAddress;
    13671         if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(uIdtVectorInfo))
    13672             GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
    13673         else
    13674             GCPtrFaultAddress = 0;
    13675 
    13676         /** @todo NSTVMX: Verify the docs on this one again please. */
    13677         if (VMX_IDT_VECTORING_INFO_IS_XCPT_NMI(uIdtVectorInfo))
    13678             CPUMSetGuestNmiBlocking(pVCpu, true);
    13679 
    13680         hmR0VmxSetPendingEvent(pVCpu, uIdtVectorInfo, cbInstr, u32ErrCode, GCPtrFaultAddress);
    13681     }
    13682 
    13683     return VINF_SUCCESS;
    13684 }
    13685 #endif
    13686 
    13687 
    1368813558/**
    1368913559 * Handle a condition that occurred while delivering an event through the guest or
     
    1370213572 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    1370313573 *          Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
    13704  *          is due to an EPT violation, PML-full and SPP-related event.
     13574 *          is due to an EPT violation, PML full or SPP-related event.
    1370513575 *
    1370613576 * @remarks No-long-jump zone!!!
     
    1370813578static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1370913579{
    13710 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13711     if (pVmxTransient->fIsNestedGuest)
    13712         return hmR0VmxCheckExitDueToEventDeliveryNested(pVCpu, pVmxTransient);
    13713 #endif
    13714 
    1371513580    Assert(!pVCpu->hm.s.Event.fPending);
    13716     Assert(!pVmxTransient->fIsNestedGuest);
    1371713581    HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
     13582    if (   pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
     13583        || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
     13584        || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
     13585        HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
    1371813586
    1371913587    VBOXSTRICTRC   rcStrict       = VINF_SUCCESS;
     13588    PCVMXVMCSINFO  pVmcsInfo      = pVmxTransient->pVmcsInfo;
     13589    uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
    1372013590    uint32_t const uExitIntInfo   = pVmxTransient->uExitIntInfo;
    13721     uint8_t const  uExitVector    = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    13722     uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
    1372313591    if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
    1372413592    {
     
    1374613614        else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
    1374713615        {
    13748             uint32_t const uExitVectorType  = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
     13616            uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
     13617            uint8_t const  uExitVector     = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
    1374913618            Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
    1375013619
     
    1378413653         */
    1378513654        if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    13786             && (   enmRaise   == IEMXCPTRAISE_PREV_EVENT
    13787                 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
    13788             && hmR0VmxIsPinCtlsSet(pVCpu, pVmxTransient, VMX_PIN_CTLS_VIRT_NMI)
     13655            && enmRaise == IEMXCPTRAISE_PREV_EVENT
     13656            && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    1378913657            && CPUMIsGuestNmiBlocking(pVCpu))
    1379013658        {
     
    1379613664            case IEMXCPTRAISE_CURRENT_XCPT:
    1379713665            {
    13798                 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", uIdtVectorInfo,
    13799                           uExitIntInfo));
     13666                Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
    1380013667                Assert(rcStrict == VINF_SUCCESS);
    1380113668                break;
     
    1384313710                    hmR0VmxSetPendingXcptDF(pVCpu);
    1384413711                    Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
    13845                               uIdtVector, uExitVector));
     13712                              uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
    1384613713                    rcStrict = VINF_HM_DOUBLE_FAULT;
    1384713714                }
     
    1385113718            case IEMXCPTRAISE_TRIPLE_FAULT:
    1385213719            {
    13853                 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
     13720                Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
     13721                          VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
    1385413722                rcStrict = VINF_EM_RESET;
    1385513723                break;
     
    1387113739        }
    1387213740    }
    13873     else if (hmR0VmxIsPinCtlsSet(pVCpu, pVmxTransient, VMX_PIN_CTLS_VIRT_NMI))
     13741    else if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     13742             && !CPUMIsGuestNmiBlocking(pVCpu))
    1387413743    {
    1387513744        if (    VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
     
    1389913768             * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
    1390013769             */
    13901             HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
    1390213770            if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
    1390313771            {
     
    1478914657
    1479014658/**
    14791  * VM-exit exception handler for all exceptions.
     14659 * VM-exit exception handler for all exceptions (except NMIs!).
    1479214660 *
    1479314661 * @remarks This may be called for both guests and nested-guests. Take care to not
     
    1479514663 *          executing a nested-guest (e.g., Mesa driver hacks).
    1479614664 */
    14797 DECL_FORCE_INLINE(VBOXSTRICTRC) hmR0VmxExitXcptAll(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint8_t uVector)
    14798 {
    14799     HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
    14800     switch (uVector)
    14801     {
    14802         case X86_XCPT_PF: return hmR0VmxExitXcptPF(pVCpu, pVmxTransient);
    14803         case X86_XCPT_GP: return hmR0VmxExitXcptGP(pVCpu, pVmxTransient);
    14804         case X86_XCPT_MF: return hmR0VmxExitXcptMF(pVCpu, pVmxTransient);
    14805         case X86_XCPT_DB: return hmR0VmxExitXcptDB(pVCpu, pVmxTransient);
    14806         case X86_XCPT_BP: return hmR0VmxExitXcptBP(pVCpu, pVmxTransient);
    14807         case X86_XCPT_AC: return hmR0VmxExitXcptAC(pVCpu, pVmxTransient);
    14808         default:
    14809             return hmR0VmxExitXcptOthers(pVCpu, pVmxTransient);
    14810     }
     14665static VBOXSTRICTRC hmR0VmxExitXcpt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14666{
     14667    /*
     14668     * If this VM-exit occurred while delivering an event through the guest IDT, take
     14669     * action based on the return code and additional hints (e.g. for page-faults)
     14670     * that will be updated in the VMX transient structure.
     14671     */
     14672    VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
     14673    if (rcStrict == VINF_SUCCESS)
     14674    {
     14675        /*
     14676         * If an exception caused a VM-exit due to delivery of an event, the original
     14677         * event may have to be re-injected into the guest. We shall reinject it and
     14678         * continue guest execution. However, page-fault is a complicated case and
     14679         * needs additional processing done in hmR0VmxExitXcptPF().
     14680         */
     14681        uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
     14682        if (   !pVCpu->hm.s.Event.fPending
     14683            || uVector == X86_XCPT_PF)
     14684        {
     14685            switch (uVector)
     14686            {
     14687                case X86_XCPT_PF: return hmR0VmxExitXcptPF(pVCpu, pVmxTransient);
     14688                case X86_XCPT_GP: return hmR0VmxExitXcptGP(pVCpu, pVmxTransient);
     14689                case X86_XCPT_MF: return hmR0VmxExitXcptMF(pVCpu, pVmxTransient);
     14690                case X86_XCPT_DB: return hmR0VmxExitXcptDB(pVCpu, pVmxTransient);
     14691                case X86_XCPT_BP: return hmR0VmxExitXcptBP(pVCpu, pVmxTransient);
     14692                case X86_XCPT_AC: return hmR0VmxExitXcptAC(pVCpu, pVmxTransient);
     14693                default:
     14694                    return hmR0VmxExitXcptOthers(pVCpu, pVmxTransient);
     14695            }
     14696        }
     14697    }
     14698    else if (rcStrict == VINF_HM_DOUBLE_FAULT)
     14699    {
     14700        Assert(pVCpu->hm.s.Event.fPending);
     14701        rcStrict = VINF_SUCCESS;
     14702    }
     14703
     14704    return rcStrict;
    1481114705}
    1481214706/** @} */
     
    1489314787            AssertRCReturn(rc, rc);
    1489414788
    14895             /*
    14896              * If this VM-exit occurred while delivering an event through the guest IDT, take
    14897              * action based on the return code and additional hints (e.g. for page-faults)
    14898              * that will be updated in the VMX transient structure.
    14899              */
    14900             rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    14901             if (rcStrict == VINF_SUCCESS)
    14902             {
    14903                 /*
    14904                  * If an exception caused a VM-exit due to delivery of an event, the original
    14905                  * event may have to be re-injected into the guest. We shall reinject it and
    14906                  * continue guest execution. However, page-fault is a complicated case and
    14907                  * needs additional processing done in hmR0VmxExitXcptPF().
    14908                  */
    14909                 if (   !pVCpu->hm.s.Event.fPending
    14910                     || uVector == X86_XCPT_PF)
    14911                     rcStrict = hmR0VmxExitXcptAll(pVCpu, pVmxTransient, uVector);
    14912             }
    14913             else if (rcStrict == VINF_HM_DOUBLE_FAULT)
    14914             {
    14915                 Assert(pVCpu->hm.s.Event.fPending);
    14916                 rcStrict = VINF_SUCCESS;
    14917             }
     14789            rcStrict = hmR0VmxExitXcpt(pVCpu, pVmxTransient);
    1491814790            break;
    1491914791        }
     
    1656616438    AssertRCReturn(rc, rc);
    1656716439
    16568     /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
    16569     AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
     16440    uint64_t const uExitQual = pVmxTransient->uExitQual;
     16441    AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
    1657016442
    1657116443    RTGCUINT uErrorCode = 0;
    16572     if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
     16444    if (uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
    1657316445        uErrorCode |= X86_TRAP_PF_ID;
    16574     if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
     16446    if (uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
    1657516447        uErrorCode |= X86_TRAP_PF_RW;
    16576     if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
     16448    if (uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
    1657716449        uErrorCode |= X86_TRAP_PF_P;
    1657816450
    16579     TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    16580 
    16581     /* Handle the pagefault trap for the nested shadow table. */
    1658216451    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
    1658316452    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16584 
    16585     Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x cs:rip=%#04x:%#RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
    16586               pCtx->cs.Sel, pCtx->rip));
    16587 
     16453    Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
     16454
     16455    /*
     16456     * Handle the pagefault trap for the nested shadow table.
     16457     */
     16458    TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    1658816459    rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
    1658916460    TRPMResetTrap(pVCpu);
     
    1701116882            AssertRCReturn(rc, rc);
    1701216883
    17013             /*
    17014              * If this VM-exit occurred while delivering an event through the nested-guest IDT, handle it accordingly.
    17015              */
    17016             rc = hmR0VmxCheckExitDueToEventDeliveryNested(pVCpu, pVmxTransient);
    17017             AssertRCReturn(rc, rc);
    17018 
    17019             /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs. */
    17020             Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    17021 
    17022             uint8_t const uVector    = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
    17023             bool const    fIntercept = CPUMIsGuestVmxXcptInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uVector,
    17024                                                                       pVmxTransient->uExitIntErrorCode);
     16884            PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     16885            bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pVCpu, pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
     16886                                                                   pVmxTransient->uExitIntErrorCode);
    1702516887            if (fIntercept)
    1702616888            {
     
    1704616908                ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
    1704716909                ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
    17048                 if (pVCpu->hm.s.Event.fPending)
    17049                 {
    17050                     Assert(ExitEventInfo.uIdtVectoringInfo == pVCpu->hm.s.Event.u64IntInfo);
    17051                     if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(ExitEventInfo.uIdtVectoringInfo))
    17052                         Assert(ExitEventInfo.uIdtVectoringErrCode == pVCpu->hm.s.Event.u32ErrCode);
    17053                     if (   VMX_IDT_VECTORING_INFO_TYPE(ExitEventInfo.uIdtVectoringInfo) == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    17054                         || VMX_IDT_VECTORING_INFO_TYPE(ExitEventInfo.uIdtVectoringInfo) == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT
    17055                         || VMX_IDT_VECTORING_INFO_TYPE(ExitEventInfo.uIdtVectoringInfo) == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT)
    17056                         Assert(ExitInfo.cbInstr == pVCpu->hm.s.Event.cbInstr);
    17057 
    17058                     pVCpu->hm.s.Event.fPending = false;
    17059                 }
    1706016910                return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
    1706116911            }
     
    1706316913            /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs. */
    1706416914            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    17065 
    17066             /*
    17067              * If the guest hypervisor is not intercepting an exception that caused a VM-exit directly,
    17068              * forward it to the guest (for e.g, an instruction raises a #GP that causes this VM-exit
    17069              * despite the guest hypervisor not intercept #GPs, inject #GP into the nested-guest).
    17070              *
    17071              * If the guest hypervisor is not intercepting an exception that caused a VM-exit indirectly,
    17072              * inject the secondary exception into the nested-guest (for e.g, an instruction raises a #GP,
    17073              * delivery of which causes an #AC. We get a #AC VM-exit but the guest-hypervisor is not
    17074              * intercepting #AC, then inject the #AC into the nested-guest rather than the original #GP).
    17075              */
    17076             pVCpu->hm.s.Event.fPending = false;
    17077             return hmR0VmxExitXcptAll(pVCpu, pVmxTransient, uVector);
     16915            return hmR0VmxExitXcpt(pVCpu, pVmxTransient);
    1707816916        }
    1707916917
     
    1767617514    AssertRCReturn(rc, rc);
    1767717515
    17678     rc = hmR0VmxCheckExitDueToEventDeliveryNested(pVCpu, pVmxTransient);
    17679     AssertRCReturn(rc, rc);
    17680 
    1768117516    Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
    1768217517    rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1769317528    ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
    1769417529    ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
    17695     if (pVCpu->hm.s.Event.fPending)
    17696     {
    17697         Assert(ExitEventInfo.uIdtVectoringInfo    == pVCpu->hm.s.Event.u64IntInfo);
    17698         Assert(ExitEventInfo.uIdtVectoringErrCode == pVCpu->hm.s.Event.u32ErrCode);
    17699         pVCpu->hm.s.Event.fPending = false;
    17700     }
    1770117530    return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
    1770217531}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette