VirtualBox

Changeset 68910 in vbox


Ignore:
Timestamp:
Sep 28, 2017 3:44:36 PM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Nested Hw.virt: SVM hardware-assisted execution bits.

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm_svm.h

    r68433 r68910  
    996996    /** Cache of the TLB control. */
    997997    SVMTLBCTRL          TLBCtrl;
     998    /** Cache of the nested-paging control. */
     999    SVMNPCTRL           NestedPagingCtrl;
    9981000    /** @} */
    9991001
     
    10021004    /** Cache of CR3. */
    10031005    uint64_t            u64CR3;
     1006    /** Cache of CR4. */
     1007    uint64_t            u64CR4;
    10041008    /** @} */
    10051009
    10061010    /** @name Other miscellaneous state.
    10071011     * @{ */
    1008     /** Whether a VMRUN was just emulated in R0 and the VMCB is up to date. */
    1009     bool                fVmrunEmulatedInR0;
    10101012    /** Whether the VMCB exit code and info fields are updated during \#VMEXIT
    10111013     *  processing. */
     
    10161018    bool                fValid;
    10171019    /** Alignment. */
    1018     bool                afPadding0[4];
     1020    bool                afPadding0[5];
    10191021    /** @} */
    10201022} SVMNESTEDVMCBCACHE;
  • trunk/include/VBox/vmm/vm.h

    r68851 r68910  
    156156        struct HMCPU    s;
    157157#endif
    158         uint8_t             padding[5760];      /* multiple of 64 */
     158        uint8_t             padding[5824];      /* multiple of 64 */
    159159    } hm;
    160160
     
    254254
    255255    /** Align the following members on page boundary. */
    256     uint8_t                 abAlignment2[2168];
     256    uint8_t                 abAlignment2[2104];
    257257
    258258    /** PGM part. */
  • trunk/include/VBox/vmm/vm.mac

    r65380 r68910  
    6262    alignb 64
    6363    .iem                    resb 18496
    64     .hm                     resb 5760
     64    .hm                     resb 5824
    6565    .em                     resb 1408
    6666    .trpm                   resb 128
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r68434 r68910  
    358358        pVmcbNstGstCtrl->u64InterceptCtrl         = pNstGstVmcbCache->u64InterceptCtrl;
    359359        pVmcbNstGstState->u64CR3                  = pNstGstVmcbCache->u64CR3;
     360        pVmcbNstGstState->u64CR4                  = pNstGstVmcbCache->u64CR4;
    360361        pVmcbNstGstCtrl->u64VmcbCleanBits         = pNstGstVmcbCache->u64VmcbCleanBits;
    361362        pVmcbNstGstCtrl->u64IOPMPhysAddr          = pNstGstVmcbCache->u64IOPMPhysAddr;
     
    363364        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking;
    364365        pVmcbNstGstCtrl->TLBCtrl                  = pNstGstVmcbCache->TLBCtrl;
     366        pVmcbNstGstCtrl->NestedPaging             = pNstGstVmcbCache->NestedPagingCtrl;
    365367        pNstGstVmcbCache->fValid = false;
    366368    }
    367     pNstGstVmcbCache->fVmrunEmulatedInR0 = false;
    368369}
    369370#endif
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r68434 r68910  
    308308static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
    309309#ifdef VBOX_WITH_NESTED_HWVIRT
     310static FNSVMEXITHANDLER hmR0SvmExitXcptPFNested;
    310311static FNSVMEXITHANDLER hmR0SvmExitClgi;
    311312static FNSVMEXITHANDLER hmR0SvmExitStgi;
     
    322323#ifdef VBOX_WITH_NESTED_HWVIRT
    323324static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
    324 static int hmR0SvmExecVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
    325325#endif
    326326
     
    11821182     * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
    11831183     */
    1184     if (!pVmcb->ctrl.NestedPaging.n.u1NestedPaging)
     1184    if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    11851185    {
    11861186        u64GuestCR0 |= X86_CR0_PG;     /* When Nested Paging is not available, use shadow page tables. */
     
    12291229
    12301230/**
    1231  * Loads the guest control registers (CR2, CR3, CR4) into the VMCB.
     1231 * Loads the guest/nested-guest control registers (CR2, CR3, CR4) into the VMCB.
    12321232 *
    12331233 * @returns VBox status code.
     
    12731273        }
    12741274        else
     1275        {
    12751276            pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
     1277            Log4(("hmR0SvmLoadGuestControlRegs: CR3=%#RX64 (HyperCR3=%#RX64)\n", pCtx->cr3, pVmcb->guest.u64CR3));
     1278        }
    12761279
    12771280        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     
    12861289    {
    12871290        uint64_t u64GuestCR4 = pCtx->cr4;
     1291        Assert(RT_HI_U32(u64GuestCR4) == 0);
    12881292        if (!pVM->hm.s.fNestedPaging)
    12891293        {
     
    13351339#ifdef VBOX_WITH_NESTED_HWVIRT
    13361340/**
    1337  * Loads the nested-guest control registers (CR2, CR3, CR4) into the VMCB.
    1338  *
     1341 * Loads the nested-guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
     1342 *
     1343 * @returns VBox status code.
    13391344 * @param   pVCpu           The cross context virtual CPU structure.
    13401345 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
     
    13431348 * @remarks No-long-jump zone!!!
    13441349 */
    1345 static void hmR0SvmLoadGuestControlRegsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
     1350static int hmR0SvmLoadGuestControlRegsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
    13461351{
    13471352    /*
     
    13551360    }
    13561361
    1357     /*
    1358      * Guest CR2.
    1359      */
    1360     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
    1361     {
    1362         pVmcbNstGst->guest.u64CR2 = pCtx->cr2;
    1363         pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
    1364         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    1365     }
    1366 
    1367     /*
    1368      * Guest CR3.
    1369      */
    1370     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
    1371     {
    1372         Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmNestedPaging);
    1373         int rc = PGMPhysGCPhys2HCPhys(pVCpu->CTX_SUFF(pVM), pCtx->cr3, &pVmcbNstGst->guest.u64CR3);
    1374         AssertRC(rc);
    1375         pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1376         Log4(("hmR0SvmLoadGuestControlRegsNested: CR3=%#RX64 to HC phys CR3=%#RHp\n", pCtx->cr3, pVmcbNstGst->guest.u64CR3));
    1377         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    1378     }
    1379 
    1380     /*
    1381      * Guest CR4.
    1382      * ASSUMES this is done everytime we get in from ring-3! (XCR0)
    1383      */
    1384     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    1385     {
    1386         Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmNestedPaging);
    1387         pVmcbNstGst->guest.u64CR4 = pCtx->cr4;
    1388         pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1389 
    1390         /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+nested-guest XCR0. */
    1391         pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    1392 
    1393         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
    1394     }
     1362    return hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx);
    13951363}
    13961364#endif
     
    15251493static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    15261494{
    1527     Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK);
    1528     Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
    1529     Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
    1530     Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
    1531 
    15321495    bool fInterceptMovDRx = false;
    15331496
     
    16691632        }
    16701633    }
     1634    Log4(("hmR0SvmLoadSharedDebugState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
    16711635}
    16721636
     
    17961760 * well and handle it accordingly.
    17971761 *
    1798  * @param   pVCpu       The cross context virtual CPU structure.
    1799  * @param   pVmcb           Pointer to the VM control block.
     1762 * @param   pVCpu           The cross context virtual CPU structure.
    18001763 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
    18011764 */
    1802 static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
     1765static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst)
    18031766{
    18041767    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
    18051768    {
     1769        /* First, load the guest intercepts into the guest VMCB. */
     1770        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    18061771        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
    18071772
     1773        /* Next, merge the intercepts into the nested-guest VMCB. */
    18081774        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
    18091775        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
     1776
     1777        /*
     1778         * CR3, CR4 reads and writes are intercepted as we modify them before
     1779         * hardware-assisted SVM execution. In addition, PGM needs to be up to date
     1780         * on paging mode changes in the nested-guest.
     1781         *
     1782         * CR0 writes are intercepted in case of paging mode changes. CR0 reads are not
     1783         * intercepted as we currently don't modify CR0 while executing the nested-guest.
     1784         */
     1785        pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(4) | RT_BIT(3);
     1786        pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4) | RT_BIT(3) | RT_BIT(0);
    18101787
    18111788        /** @todo Figure out debugging with nested-guests, till then just intercept
     
    20602037    pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
    20612038    pNstGstVmcbCache->u64CR3            = pVmcbNstGstState->u64CR3;
     2039    pNstGstVmcbCache->u64CR4            = pVmcbNstGstState->u64CR4;
    20622040    pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
    20632041    pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     
    20652043    pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    20662044    pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
     2045    pNstGstVmcbCache->NestedPagingCtrl  = pVmcbNstGstCtrl->NestedPaging;
    20672046    pNstGstVmcbCache->fValid            = true;
    20682047}
     
    20982077     */
    20992078    pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
     2079
     2080    /*
     2081     * Use the same nested-paging as the "outer" guest. We can't dynamically
     2082     * switch off nested-paging suddenly while executing a VM (see assertion at the
     2083     * end of Trap0eHandler in PGMAllBth.h).
     2084     */
     2085    pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
    21002086}
    21012087
     
    21062092 * @param   pVCpu           The cross context virtual CPU structure.
    21072093 * @param   pCtx            Pointer to the guest-CPU context.
    2108  *
    2109  * @remarks This must be called only after the guest exceptions are up to date as
    2110  *          otherwise we risk overwriting the guest exceptions with the nested-guest
    2111  *          exceptions.
    21122094 */
    21132095static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     
    21372119    Assert(pVmcbNstGst);
    21382120
    2139     /*
    2140      * If we just emulated VMRUN, the VMCB is already in-sync with the guest-CPU context.
    2141      */
    2142     if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0)
    2143     {
    2144         /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */
    2145         hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
    2146 
    2147         hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
    2148         hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
    2149 
    2150         pVmcbNstGst->guest.u64RIP    = pCtx->rip;
    2151         pVmcbNstGst->guest.u64RSP    = pCtx->rsp;
    2152         pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
    2153         pVmcbNstGst->guest.u64RAX    = pCtx->rax;
    2154     }
    2155 
    2156     hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
     2121    /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */
     2122    hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
     2123
     2124    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
     2125    hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
     2126
     2127    pVmcbNstGst->guest.u64RIP    = pCtx->rip;
     2128    pVmcbNstGst->guest.u64RSP    = pCtx->rsp;
     2129    pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
     2130    pVmcbNstGst->guest.u64RAX    = pCtx->rax;
     2131
     2132    int rc = hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
     2133    AssertRCReturn(rc, rc);
     2134
    21572135    hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
    2158 
    2159     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    2160     hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcb, pVmcbNstGst);
    2161 
    2162     int rc = hmR0SvmSetupVMRunHandler(pVCpu);
     2136    hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcbNstGst);
     2137
     2138    rc = hmR0SvmSetupVMRunHandler(pVCpu);
    21632139    AssertRCReturn(rc, rc);
    21642140
     
    21802156               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    21812157
    2182     Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32\n", pCtx->cs.Sel, pCtx->rip,
    2183           pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
     2158    Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 rc=%d\n",
     2159          pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pVmcbNstGst->guest.u64CR3, pCtx->cr4, rc));
    21842160    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    21852161    return rc;
     
    22192195    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    22202196    {
    2221         hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
     2197        /* We use nested-guest CR0 unmodified, hence nothing to do here. */
     2198        if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     2199            hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
     2200        else
     2201        {
     2202            pVmcb->guest.u64DR6 = pCtx->dr[6];
     2203            pVmcb->guest.u64DR7 = pCtx->dr[7];
     2204            Log4(("hmR0SvmLoadSharedState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
     2205        }
     2206
    22222207        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    22232208    }
     
    22722257    if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx))
    22732258    {
    2274         pMixedCtx->cr3    = pVmcb->guest.u64CR3;
    22752259        pMixedCtx->cr4    = pVmcb->guest.u64CR4;
    22762260        pMixedCtx->cr0    = pVmcb->guest.u64CR0;
     
    23862370     * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
    23872371     */
    2388 #ifdef VBOX_WITH_NESTED_HWVIRT
    2389     Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx) || !pVmcb->ctrl.NestedPaging.n.u1NestedPaging);
    2390 #endif
    23912372    if (   pVmcb->ctrl.NestedPaging.n.u1NestedPaging
    23922373        && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
     
    26062587        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    26072588    }
    2608 
    2609 #ifdef VBOX_WITH_NESTED_HWVIRT
    2610     pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false;
    2611 #endif
    26122589
    26132590    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
     
    30773054
    30783055    Assert(!pVCpu->hm.s.Event.fPending);
    3079     Assert(pCtx->hwvirt.svm.fGif);
    3080 
    3081     PSVMVMCB pVmcbNstGst  = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    3082 
    3083     SVMEVENT Event;
    3084     Event.u = 0;
    3085     bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
    3086 
    3087     /** @todo SMI. SMIs take priority over NMIs. */
    3088     /*
    3089      * Check if the nested-guest can receive NMIs.
    3090      * NMIs are higher priority than regular interrupts.
    3091      */
    3092     if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    3093     {
    3094         bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    3095         if (fBlockNmi)
    3096             hmR0SvmSetIretIntercept(pVmcbNstGst);
    3097         else if (fIntShadow)
    3098         {
    3099             /** @todo Figure this out, how we shall manage virt. intercept if the
    3100              *        nested-guest already has one set and/or if we really need it? */
    3101 #if 0
    3102             hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
    3103 #endif
    3104         }
    3105         else
    3106         {
    3107             Log4(("Pending NMI\n"));
    3108 
    3109             Event.n.u1Valid  = 1;
    3110             Event.n.u8Vector = X86_XCPT_NMI;
    3111             Event.n.u3Type   = SVM_EVENT_NMI;
    3112 
    3113             hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3114             hmR0SvmSetIretIntercept(pVmcbNstGst);
    3115             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    3116             return;
    3117         }
    3118     }
    3119 
    3120     /*
    3121      * Check if the nested-guest can receive external interrupts (PIC/APIC).
    3122      */
    3123     if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    3124         && !pVCpu->hm.s.fSingleInstruction)
    3125     {
    3126         /* Note: it is critical we call CPUMCanSvmNstGstTakePhysIntr -before- modifying the nested-guests's V_INTR_MASKING
    3127            bit, currently it gets modified in hmR0SvmLoadGuestApicStateNested. */
    3128         bool const fIntEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
    3129         if (    fIntEnabled
    3130             && !fIntShadow)
    3131         {
    3132             uint8_t u8Interrupt;
    3133             int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    3134             if (RT_SUCCESS(rc))
     3056
     3057    bool const fIntrEnabled = pCtx->hwvirt.svm.fGif && CPUMCanSvmNstGstTakePhysIntr(pCtx);
     3058    if (fIntrEnabled)
     3059    {
     3060        PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     3061        SVMEVENT Event;
     3062        Event.u = 0;
     3063
     3064        bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     3065
     3066        /*
     3067         * Check if the nested-guest can receive NMIs.
     3068         * NMIs are higher priority than regular interrupts.
     3069         */
     3070        /** @todo SMI. SMIs take priority over NMIs. */
     3071        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
     3072        {
     3073            bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3074            if (fBlockNmi)
     3075                hmR0SvmSetIretIntercept(pVmcbNstGst);
     3076            else if (fIntShadow)
    31353077            {
    3136                 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
     3078                /** @todo Figure this out, how we shall manage virt. intercept if the
     3079                 *        nested-guest already has one set and/or if we really need it? */
     3080                //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
     3081            }
     3082            else
     3083            {
     3084                Log4(("Pending NMI\n"));
     3085
     3086                Event.n.u1Valid  = 1;
     3087                Event.n.u8Vector = X86_XCPT_NMI;
     3088                Event.n.u3Type   = SVM_EVENT_NMI;
     3089
     3090                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3091                hmR0SvmSetIretIntercept(pVmcbNstGst);
     3092                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     3093                return;
     3094            }
     3095        }
     3096
     3097        /*
     3098         * Check if the nested-guest can receive external interrupts (PIC/APIC).
     3099         */
     3100        if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     3101            && !pVCpu->hm.s.fSingleInstruction)
     3102        {
     3103            /* Note: it is critical we call CPUMCanSvmNstGstTakePhysIntr -before- modifying the nested-guests's V_INTR_MASKING
     3104               bit, currently it gets modified in hmR0SvmLoadGuestApicStateNested. */
     3105            bool const fIntEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
     3106            if (    fIntEnabled
     3107                && !fIntShadow)
     3108            {
     3109                uint8_t u8Interrupt;
     3110                int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     3111                if (RT_SUCCESS(rc))
     3112                {
     3113                    Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
     3114
     3115                    Event.n.u1Valid  = 1;
     3116                    Event.n.u8Vector = u8Interrupt;
     3117                    Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     3118
     3119                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3120                }
     3121                else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
     3122                {
     3123                    /*
     3124                     * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt.
     3125                     * We just avoid delivering the TPR-masked interrupt here. TPR will be updated
     3126                     * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState().
     3127                     */
     3128                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     3129                }
     3130                else
     3131                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     3132            }
     3133            else
     3134            {
     3135                /** @todo Figure this out, how we shall manage virt. intercept if the
     3136                 *        nested-guest already has one set and/or if we really need it? */
     3137                //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
     3138            }
     3139        }
     3140        /*
     3141         * Check if the nested-guest can receive virtual interrupts.
     3142         */
     3143        else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
     3144        {
     3145            bool const fVirtIntEnabled = CPUMCanSvmNstGstTakeVirtIntr(pCtx);
     3146            if (fVirtIntEnabled)
     3147            {
     3148                uint8_t const u8Interrupt = CPUMGetSvmNstGstInterrupt(pCtx);
     3149                Log4(("Injecting virtual interrupt u8Interrupt=%#x\n", u8Interrupt));
    31373150
    31383151                Event.n.u1Valid  = 1;
     
    31403153                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    31413154
     3155                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    31423156                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    31433157            }
    3144             else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    3145             {
    3146                 /*
    3147                  * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt.
    3148                  * We just avoid delivering the TPR-masked interrupt here. TPR will be updated
    3149                  * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState().
    3150                  */
    3151                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    3152             }
    3153             else
    3154                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    3155         }
    3156         else
    3157         {
    3158             /** @todo Figure this out, how we shall manage virt. intercept if the
    3159              *        nested-guest already has one set and/or if we really need it? */
    3160 #if 0
    3161             hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
    3162 #endif
    3163         }
    3164     }
    3165     /*
    3166      * Check if the nested-guest can receive virtual interrupts.
    3167      */
    3168     else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
    3169     {
    3170         bool const fIntEnabled = CPUMCanSvmNstGstTakeVirtIntr(pCtx);
    3171         if (fIntEnabled)
    3172         {
    3173             uint8_t const u8Interrupt = CPUMGetSvmNstGstInterrupt(pCtx);
    3174             Log4(("Injecting virtual interrupt u8Interrupt=%#x\n", u8Interrupt));
    3175 
    3176             Event.n.u1Valid  = 1;
    3177             Event.n.u8Vector = u8Interrupt;
    3178             Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3179 
    3180             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    3181             hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    31823158        }
    31833159    }
     
    31953171{
    31963172    Assert(!pVCpu->hm.s.Event.fPending);
    3197     Log4Func(("\n"));
    31983173
    31993174#ifdef VBOX_WITH_NESTED_HWVIRT
     
    32023177    bool const fGif       = true;
    32033178#endif
     3179    Log4Func(("fGif=%RTbool\n", fGif));
     3180
    32043181    /*
    32053182     * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked.
     
    32153192        SVMEVENT Event;
    32163193        Event.u = 0;
     3194
     3195        Log4Func(("fGif=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fGif, fBlockInt, fIntShadow,
     3196                  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
    32173197
    32183198        /** @todo SMI. SMIs take priority over NMIs. */
     
    36193599
    36203600    /*
    3621      * Load the nested-guest state. We can optimize this later to be avoided when VMRUN is
    3622      * just emulated in hmR0SvmExecVmrun since the VMCB is already setup by the nested-hypervisor,
    3623      * We currently do this because we may pre-maturely return to ring-3 before executing the
    3624      * nested-guest and doing it here is simpler.
     3601     * Load the nested-guest state.
    36253602     */
    36263603    rc = hmR0SvmLoadGuestStateNested(pVCpu, pCtx);
     
    45974574    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    45984575
     4576#define HM_SVM_RET_VMEXIT_NESTED(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     4577    do \
     4578    { \
     4579        return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)); \
     4580    } while (0) \
     4581
    45994582#define HM_SVM_HANDLE_XCPT_EXIT_NESTED(a_uXcpt, a_XcptExitFn) \
    46004583    do \
    46014584    { \
    46024585        if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(a_uXcpt)) \
    4603             return hmR0SvmExecVmexit(pVCpu, pCtx); \
     4586            HM_SVM_RET_VMEXIT_NESTED(pVCpu, pVmcbNstGst->ctrl.u64ExitCode, pVmcbNstGst->ctrl.u64ExitInfo1, \
     4587                                     pVmcbNstGst->ctrl.u64ExitInfo2); \
    46044588        return a_XcptExitFn(pVCpu, pCtx, pSvmTransient); \
    46054589    } while (0) \
     
    46124596    PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    46134597    PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     4598    uint64_t const      uExitCode        = pVmcbNstGstCtrl->u64ExitCode;
     4599    uint64_t const      uExitInfo1       = pVmcbNstGstCtrl->u64ExitInfo1;
     4600    uint64_t const      uExitInfo2       = pVmcbNstGstCtrl->u64ExitInfo2;
     4601
    46144602    switch (pSvmTransient->u64ExitCode)
    46154603    {
     
    46174605        {
    46184606            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_CPUID)
    4619                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4607                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46204608            return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
    46214609        }
     
    46244612        {
    46254613            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)
    4626                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4614                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46274615            return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
    46284616        }
     
    46314619        {
    46324620            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)
    4633                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4621                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46344622            return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
    46354623        }
     
    46394627        {
    46404628            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_MONITOR)
    4641                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4629                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46424630            return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
    46434631        }
     
    46464634        {
    46474635            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_MWAIT)
    4648                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4636                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46494637            return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
    46504638        }
     
    46534641        {
    46544642            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_HLT)
    4655                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4643                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46564644            return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
    46574645        }
     
    46744662                        || (fInterceptRead  && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
    46754663                    {
    4676                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4664                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46774665                    }
    46784666                }
     
    46844672                     */
    46854673                    Assert(rc == VERR_OUT_OF_RANGE);
    4686                     return hmR0SvmExecVmexit(pVCpu, pCtx);
     4674                    HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    46874675                }
    46884676            }
     
    47024690                bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo);
    47034691                if (fIntercept)
    4704                     return hmR0SvmExecVmexit(pVCpu, pCtx);
     4692                    HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47054693            }
    47064694            return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
     
    47094697        case SVM_EXIT_EXCEPTION_14:  /* X86_XCPT_PF */
    47104698        {
    4711             Assert(!pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging);
    4712             if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_PF))
    4713                 return hmR0SvmExecVmexit(pVCpu, pCtx);
    4714 
    4715             /* If the nested-guest isn't for intercepting #PFs, simply forward the #PF to the guest. */
    4716             uint32_t    const u32ErrCode    = pVmcbNstGstCtrl->u64ExitInfo1;
    4717             RTGCUINTPTR const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
    4718             hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
    4719             return VINF_SUCCESS;
     4699            PVM pVM = pVCpu->CTX_SUFF(pVM);
     4700            if (pVM->hm.s.fNestedPaging)
     4701            {
     4702                uint32_t const u32ErrCode    = pVmcbNstGstCtrl->u64ExitInfo1;
     4703                uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
     4704
     4705                /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
     4706                if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_PF))
     4707                    HM_SVM_RET_VMEXIT_NESTED(pVCpu, SVM_EXIT_EXCEPTION_14, u32ErrCode, uFaultAddress);
     4708
     4709                /* If the nested-guest is not intercepting #PFs, forward the #PF to the nested-guest. */
     4710                hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
     4711                return VINF_SUCCESS;
     4712            }
     4713            return hmR0SvmExitXcptPFNested(pVCpu, pCtx,pSvmTransient);
    47204714        }
    47214715
     
    47434737        {
    47444738            if (pVmcbNstGstCache->u16InterceptRdCRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0)))
    4745                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4739                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47464740            return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
    47474741        }
     
    47504744        case SVM_EXIT_WRITE_CR3:
    47514745        case SVM_EXIT_WRITE_CR4:
    4752         case SVM_EXIT_WRITE_CR8:
     4746        case SVM_EXIT_WRITE_CR8:   /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?? */
    47534747        {
    47544748            if (pVmcbNstGstCache->u16InterceptWrCRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)))
    4755                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4749                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47564750            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
    47574751        }
     
    47604754        {
    47614755            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_PAUSE)
    4762                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4756                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47634757            return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
    47644758        }
     
    47674761        {
    47684762            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
    4769                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4763                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47704764            return hmR0SvmNestedExitVIntr(pVCpu, pCtx, pSvmTransient);
    47714765        }
     
    47804774        {
    47814775            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_FERR_FREEZE)
    4782                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4776                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47834777            return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
    47844778        }
     
    47874781        {
    47884782            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_NMI)
    4789                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4783                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47904784            return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
    47914785        }
     
    47944788        {
    47954789            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INVLPG)
    4796                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4790                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    47974791            return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
    47984792        }
     
    48014795        {
    48024796            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_WBINVD)
    4803                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4797                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48044798            return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
    48054799        }
     
    48084802        {
    48094803            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INVD)
    4810                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4804                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48114805            return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
    48124806        }
     
    48154809        {
    48164810            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDPMC)
    4817                 return hmR0SvmExecVmexit(pVCpu, pCtx);
     4811                HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48184812            return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
    48194813        }
     
    48294823                {
    48304824                    if (pVmcbNstGstCache->u16InterceptRdDRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_READ_DR0)))
    4831                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4825                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48324826                    return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
    48334827                }
     
    48394833                {
    48404834                    if (pVmcbNstGstCache->u16InterceptWrDRx & (1U << (uint16_t)(pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_DR0)))
    4841                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4835                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48424836                    return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
    48434837                }
     
    48574851                {
    48584852                    if (pVmcbNstGstCache->u32InterceptXcpt & (1U << (uint32_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0)))
    4859                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4853                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48604854                    /** @todo Write hmR0SvmExitXcptGeneric! */
    48614855                    return VERR_NOT_IMPLEMENTED;
     
    48654859                {
    48664860                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_XSETBV)
    4867                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4861                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48684862                    return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
    48694863                }
     
    48724866                {
    48734867                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_TASK_SWITCH)
    4874                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4868                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48754869                    return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
    48764870                }
     
    48794873                {
    48804874                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
    4881                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4875                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48824876                    return hmR0SvmNestedExitIret(pVCpu, pCtx, pSvmTransient);
    48834877                }
     
    48864880                {
    48874881                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_SHUTDOWN)
    4888                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4882                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48894883                    return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
    48904884                }
     
    48934887                {
    48944888                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_SMI)
    4895                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4889                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    48964890                    return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
    48974891                }
     
    49004894                {
    49014895                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INIT)
    4902                         return hmR0SvmExecVmexit(pVCpu, pCtx);
     4896                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49034897                    return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
    49044898                }
     
    49074901                {
    49084902                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMMCALL)
    4909                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4903                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49104904                    return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
    49114905                }
     
    49144908                {
    49154909                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_CLGI)
    4916                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4910                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49174911                     return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
    49184912                }
     
    49214915                {
    49224916                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_STGI)
    4923                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4917                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49244918                     return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
    49254919                }
     
    49284922                {
    49294923                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMLOAD)
    4930                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4924                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49314925                    return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
    49324926                }
     
    49354929                {
    49364930                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMSAVE)
    4937                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4931                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49384932                    return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
    49394933                }
     
    49424936                {
    49434937                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INVLPGA)
    4944                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4938                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49454939                    return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
    49464940                }
     
    49494943                {
    49504944                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
    4951                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4945                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49524946                    return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
    49534947                }
     
    49564950                {
    49574951                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RSM)
    4958                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4952                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49594953                    return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
    49604954                }
     
    49634957                {
    49644958                    if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_SKINIT)
    4965                         hmR0SvmExecVmexit(pVCpu, pCtx);
     4959                        HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    49664960                    return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
    49674961                }
     
    49864980
    49874981#undef HM_SVM_HANDLE_XCPT_EXIT_NESTED
     4982#undef HM_SVM_RET_VMEXIT_NESTED
    49884983}
    49894984#endif
     
    57925787
    57935788
    5794 #if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
    5795 /**
    5796  * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and HM internals.
    5797  *
    5798  * @returns VBox status code.
    5799  * @param   pVCpu       The cross context virtual CPU structure.
    5800  * @param   pCtx        The guest-CPU context.
    5801  */
    5802 static int hmR0SvmNstGstWorldSwitch(PVMCPU pVCpu, PCPUMCTX pCtx)
    5803 {
    5804     /** @todo What about informing PGM about CR0.WP? */
    5805     PGMFlushTLB(pVCpu, pCtx->cr3, true /* fGlobal */);
    5806 
    5807     /* Inform CPUM (recompiler), can later be removed. */
    5808     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
    5809 
    5810     /*
    5811      * Inform PGM about paging mode changes.
    5812      * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet.
    5813      */
    5814     return PGMChangeMode(pVCpu, pCtx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER);
    5815 }
    5816 
    5817 
    5818 /**
    5819  * Performs a \#VMEXIT when the VMRUN was emulating using hmR0SvmExecVmrun and
    5820  * optionally went ahead with hardware-assisted SVM execution.
    5821  *
    5822  * @returns VBox status code.
    5823  * @param   pVCpu           The cross context virtual CPU structure.
    5824  * @param   pCtx            Pointer to the guest-CPU context.
    5825  */
    5826 static int hmR0SvmExecVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
    5827 {
    5828     /*
    5829      * Restore the modifications we did to the nested-guest VMCB in order
    5830      * to executing the nested-guesting using hardware-assisted SVM.
    5831      */
    5832     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    5833     HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst);
    5834 
    5835     Log4(("hmR0SvmExecVmexit: uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pVmcbNstGst->ctrl.u64ExitCode,
    5836           pVmcbNstGst->ctrl.u64ExitInfo1, pVmcbNstGst->ctrl.u64ExitInfo2));
    5837 
    5838     /*
    5839      * Write the nested-guest VMCB back to guest memory.
    5840      */
    5841     RTGCPHYS const GCPhysVmcb = pCtx->hwvirt.svm.GCPhysVmcb;
    5842     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb, pVmcbNstGst, sizeof(*pVmcbNstGst));
    5843 
    5844     /*
    5845      * Clear our cache of the nested-guest VMCB controls.
    5846      */
    5847     PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    5848     memset(pVmcbNstGstCtrl, 0, sizeof(*pVmcbNstGstCtrl));
    5849     Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    5850 
    5851     /*
    5852      * Disable the global interrupt flag to not cause any interrupts or NMIs
    5853      * in the guest.
    5854      */
    5855     pCtx->hwvirt.svm.fGif = 0;
    5856 
    5857     /*
    5858      * Restore the guest's "host" state.
    5859      */
    5860     CPUMSvmVmExitRestoreHostState(pCtx);
    5861 
    5862     /*
    5863      * Restore the guest's force-flags.
    5864      */
    5865     if (pCtx->hwvirt.fLocalForcedActions)
    5866     {
    5867         VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
    5868         pCtx->hwvirt.fLocalForcedActions = 0;
    5869     }
    5870 
    5871     /*
    5872      * Make sure if VMRUN happens outside this SVM R0 code, we  don't skip setting
    5873      * things up that are required for executing the nested-guest using hardware-assisted SVM.
    5874      */
    5875     pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false;
    5876     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    5877 
    5878     if (RT_SUCCESS(rc))
    5879     {
    5880         rc = hmR0SvmNstGstWorldSwitch(pVCpu, pCtx);
    5881         if (rc == VINF_SUCCESS)
    5882             rc = VINF_SVM_VMEXIT;
    5883 
    5884         Log4(("hmR0SvmExecVmexit: #VMEXIT success! rc=%d\n", rc));
    5885     }
    5886     else
    5887         Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp, rc=%d\n", GCPhysVmcb, rc));
    5888 
    5889     return rc;
    5890 }
    5891 
    5892 
    5893 /**
    5894  * Setup the nested-guest for hardware-assisted SVM execution.
    5895  *
    5896  * @returns VBox status code.
    5897  * @param   pVCpu           The cross context virtual CPU structure.
    5898  * @param   pCtx            Pointer to the guest-CPU context.
    5899  * @param   GCPhysVmcb      The nested-guest physical address of its VMCB.
    5900  * @param   cbInstr         Length of the VMRUN instruction in bytes.
    5901  */
    5902 static int hmR0SvmExecVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb, uint8_t cbInstr)
    5903 {
    5904     Assert(CPUMGetGuestCPL(pVCpu) == 0);
    5905     Assert(!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0);
    5906 
    5907     /*
    5908      * Cache the physical address of the VMCB for #VMEXIT exceptions.
    5909      */
    5910     pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
    5911 
    5912     /*
    5913      * Save the "host" (guest-state) so that when we do a #VMEXIT we can restore the guest-state.
    5914      *
    5915      * The real host-state shall be saved/restored by the physical CPU once it executes VMRUN
    5916      * with the nested-guest VMCB.
    5917      */
    5918     CPUMSvmVmRunSaveHostState(pCtx, cbInstr);
    5919 
    5920     /*
    5921      * Read the nested-guest VMCB state.
    5922      */
    5923     PVM pVM = pVCpu->CTX_SUFF(pVM);
    5924     int rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));
    5925     if (RT_SUCCESS(rc))
    5926     {
    5927         PSVMVMCB          pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    5928         PSVMVMCBCTRL      pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    5929         PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
    5930 
    5931         /*
    5932          * Validate nested-guest state and controls.
    5933          * The rest shall be done by the physical CPU.
    5934          */
    5935         /* VMRUN must always be intercepted. */
    5936         if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
    5937         {
    5938             Log(("hmR0SvmExecVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
    5939             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5940             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5941         }
    5942 
    5943         /* Nested paging. */
    5944         if (    pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging
    5945             && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
    5946         {
    5947             Log(("hmR0SvmExecVmrun: Nested paging not supported -> #VMEXIT\n"));
    5948             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5949             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5950         }
    5951         /** @todo When implementing nested-paging for the nested-guest don't forget to
    5952          *        adjust/check PAT MSR. */
    5953 
    5954         /* AVIC. */
    5955         if (    pVmcbNstGstCtrl->IntCtrl.n.u1AvicEnable
    5956             && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
    5957         {
    5958             Log(("hmR0SvmExecVmrun: AVIC not supported -> #VMEXIT\n"));
    5959             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5960             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5961         }
    5962 
    5963         /* Last branch record (LBR) virtualization. */
    5964         if (    (pVmcbNstGstCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
    5965             && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
    5966         {
    5967             Log(("hmR0SvmExecVmrun: LBR virtualization not supported -> #VMEXIT\n"));
    5968             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5969             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5970         }
    5971 
    5972         /*
    5973          * MSR permission bitmap (MSRPM).
    5974          */
    5975         RTGCPHYS const GCPhysMsrBitmap = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    5976         Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
    5977         rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
    5978                                      SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
    5979         if (RT_FAILURE(rc))
    5980         {
    5981             Log(("hmR0SvmExecVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
    5982             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5983             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5984         }
    5985 
    5986         /*
    5987          * IO permission bitmap (IOPM).
    5988          */
    5989         RTGCPHYS const GCPhysIOBitmap = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    5990         rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
    5991                                      SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
    5992         if (RT_FAILURE(rc))
    5993         {
    5994             Log(("hmR0SvmExecVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
    5995             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    5996             return hmR0SvmExecVmexit(pVCpu, pCtx);
    5997         }
    5998 
    5999         /*
    6000          * EFER MSR.
    6001          */
    6002         uint64_t uValidEfer;
    6003         rc = CPUMQueryValidatedGuestEfer(pVM, pVmcbNstGstState->u64CR0, pVmcbNstGstState->u64EFER, pVmcbNstGstState->u64EFER,
    6004                                          &uValidEfer);
    6005         if (RT_FAILURE(rc))
    6006         {
    6007             Log(("hmR0SvmExecVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGstState->u64EFER));
    6008             pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
    6009             return hmR0SvmExecVmexit(pVCpu, pCtx);
    6010         }
    6011         bool const fLongModeEnabled         = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
    6012         bool const fPaging                  = RT_BOOL(pVmcbNstGstState->u64CR0 & X86_CR0_PG);
    6013         bool const fLongModeWithPaging      = fLongModeEnabled && fPaging;
    6014         /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0) and update it. */
    6015         if (fLongModeWithPaging)
    6016             uValidEfer |= MSR_K6_EFER_LMA;
    6017 
    6018         /*
    6019          * Check for pending virtual interrupts.
    6020          */
    6021         if (pVmcbNstGstCtrl->IntCtrl.n.u1VIrqPending)
    6022             VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    6023         else
    6024             Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
    6025 
    6026         /*
    6027          * Preserve the required force-flags.
    6028          *
    6029          * We only preserve the force-flags that would affect the execution of the
    6030          * nested-guest (or the guest).
    6031          *
    6032          *   - VMCPU_FF_INHIBIT_INTERRUPTS need -not- be preserved as it's for a single
    6033          *     instruction which is this VMRUN instruction itself.
    6034          *
    6035          *   - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
    6036          *     execution of a subsequent IRET instruction in the guest.
    6037          *
    6038          *   - The remaining FFs (e.g. timers) can stay in place so that we will be
    6039          *     able to generate interrupts that should cause #VMEXITs for the
    6040          *     nested-guest.
    6041          */
    6042         pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
    6043 
    6044         /*
    6045          * Interrupt shadow.
    6046          */
    6047         if (pVmcbNstGstCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
    6048         {
    6049             LogFlow(("hmR0SvmExecVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGstState->u64RIP));
    6050             /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
    6051             EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGstState->u64RIP);
    6052         }
    6053 
    6054         /*
    6055          * Load the guest-CPU state.
    6056          * Skip CPL adjustments (will be done by the hardware).
    6057          */
    6058         HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, ES, es);
    6059         HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, CS, cs);
    6060         HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, SS, ss);
    6061         HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, DS, ds);
    6062         pCtx->gdtr.cbGdt   = pVmcbNstGstState->GDTR.u32Limit;
    6063         pCtx->gdtr.pGdt    = pVmcbNstGstState->GDTR.u64Base;
    6064         pCtx->idtr.cbIdt   = pVmcbNstGstState->IDTR.u32Limit;
    6065         pCtx->idtr.pIdt    = pVmcbNstGstState->IDTR.u64Base;
    6066         pCtx->cr0          = pVmcbNstGstState->u64CR0;
    6067         pCtx->cr4          = pVmcbNstGstState->u64CR4;
    6068         pCtx->cr3          = pVmcbNstGstState->u64CR3;
    6069         pCtx->cr2          = pVmcbNstGstState->u64CR2;
    6070         pCtx->dr[6]        = pVmcbNstGstState->u64DR6;
    6071         pCtx->dr[7]        = pVmcbNstGstState->u64DR7;
    6072         pCtx->rflags.u64   = pVmcbNstGstState->u64RFlags;
    6073         pCtx->rax          = pVmcbNstGstState->u64RAX;
    6074         pCtx->rsp          = pVmcbNstGstState->u64RSP;
    6075         pCtx->rip          = pVmcbNstGstState->u64RIP;
    6076         pCtx->msrEFER      = uValidEfer;
    6077 
    6078         /* Mask DR6, DR7 bits mandatory set/clear bits. */
    6079         pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
    6080         pCtx->dr[6] |= X86_DR6_RA1_MASK;
    6081         pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
    6082         pCtx->dr[7] |= X86_DR7_RA1_MASK;
    6083 
    6084         /*
    6085          * Set up the nested-guest for executing it using hardware-assisted SVM.
    6086          */
    6087         hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    6088 
    6089         /*
    6090          * VMRUN loads a subset of the guest-CPU state (see above) and nothing else. Ensure
    6091          * hmR0SvmLoadGuestStateNested doesn't need to load anything back to the VMCB cache
    6092          * as we go straight into executing the nested-guest.
    6093          *
    6094          * If we fall back to ring-3 we would have to re-load things from the guest-CPU
    6095          * state into the VMCB as we are unsure what state we're in (e.g., VMRUN ends up
    6096          * getting executed in IEM along with a handful of nested-guest instructions and
    6097          * we have to continue executing the nested-guest in R0 since IEM doesn't know
    6098          * about this VMCB cache which is in HM).
    6099          */
    6100         PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    6101         pNstGstVmcbCache->fVmrunEmulatedInR0 = true;
    6102 
    6103         /*
    6104          * We flag a CR3 change to ensure loading the host-physical address of CR3 into
    6105          * the nested-guest VMCB in hmR0SvmLoadGuestControlRegsNested.
    6106          */
    6107         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ALL_GUEST);
    6108         HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE | HM_CHANGED_GUEST_CR3);
    6109 
    6110         /*
    6111          * Clear global interrupt flags to allow interrupts and NMIs in the guest.
    6112          */
    6113         pCtx->hwvirt.svm.fGif = 1;
    6114 
    6115         Log4(("hmR0SvmExecVmrun: CR0=%#RX32 CR3=%#RX64 CR4=%#RX32\n", pCtx->cr0, pCtx->cr3, pCtx->cr4));
    6116         return hmR0SvmNstGstWorldSwitch(pVCpu, pCtx);
    6117     }
    6118 
    6119     return rc;
    6120 }
    6121 #endif /* VBOX_WITH_NESTED_HWVIRT && !VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM */
    6122 
    6123 
    61245789/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    61255790/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
     
    75267191#ifdef VBOX_WITH_NESTED_HWVIRT
    75277192/**
     7193 * \#VMEXIT handler for #PF occuring while in nested-guest execution
     7194 * (SVM_EXIT_EXCEPTION_14). Conditional \#VMEXIT.
     7195 */
     7196HMSVM_EXIT_DECL hmR0SvmExitXcptPFNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     7197{
     7198    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7199
     7200    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     7201
     7202    /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
     7203    PSVMVMCB       pVmcb         = pVCpu->hm.s.svm.pVmcb;
     7204    uint32_t       u32ErrCode    = pVmcb->ctrl.u64ExitInfo1;
     7205    uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
     7206
     7207    Log4(("#PFNested: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode=%#RX32 CR3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
     7208          pCtx->rip, u32ErrCode, pCtx->cr3));
     7209
     7210    /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
     7211       of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
     7212    if (pSvmTransient->fVectoringPF)
     7213    {
     7214        Assert(pVCpu->hm.s.Event.fPending);
     7215        return VINF_EM_RAW_INJECT_TRPM_EVENT;
     7216    }
     7217
     7218    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
     7219
     7220    TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
     7221    int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
     7222
     7223    Log4(("#PFNested: rc=%Rrc\n", rc));
     7224
     7225    if (rc == VINF_SUCCESS)
     7226    {
     7227        /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
     7228        TRPMResetTrap(pVCpu);
     7229        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
     7230        HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     7231        return rc;
     7232    }
     7233
     7234    if (rc == VINF_EM_RAW_GUEST_TRAP)
     7235    {
     7236        pVCpu->hm.s.Event.fPending = false;     /* In case it's a contributory or vectoring #PF. */
     7237
     7238        if (!pSvmTransient->fVectoringDoublePF)
     7239        {
     7240            /* It's a nested-guest page fault and needs to be reflected to the nested-guest. */
     7241            u32ErrCode = TRPMGetErrorCode(pVCpu);        /* The error code might have been changed. */
     7242            TRPMResetTrap(pVCpu);
     7243            hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
     7244        }
     7245        else
     7246        {
     7247            /* A nested-guest page-fault occurred during delivery of a page-fault. Inject #DF. */
     7248            TRPMResetTrap(pVCpu);
     7249            hmR0SvmSetPendingXcptDF(pVCpu);
     7250            Log4(("#PF: Pending #DF due to vectoring #PF\n"));
     7251        }
     7252
     7253        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     7254        return VINF_SUCCESS;
     7255    }
     7256
     7257    TRPMResetTrap(pVCpu);
     7258    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
     7259    return rc;
     7260}
     7261
     7262
     7263/**
    75287264 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
    75297265 */
     
    75317267{
    75327268    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7269
    75337270    /** @todo Stat. */
    75347271    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
     
    75457282{
    75467283    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7284
    75477285    /** @todo Stat. */
    75487286    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
     
    75597297{
    75607298    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7299
    75617300    /** @todo Stat. */
    75627301    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */
     
    75737312{
    75747313    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7314
    75757315    /** @todo Stat. */
    75767316    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */
     
    76057345    VBOXSTRICTRC rcStrict;
    76067346    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    7607 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
    76087347    rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
    7609 #else
    7610     rcStrict = hmR0SvmExecVmrun(pVCpu, pCtx, pCtx->rax, cbInstr);
    76117348    if (rcStrict == VINF_SUCCESS)
     7349    {
    76127350        rcStrict = VINF_SVM_VMRUN;
    7613 #endif
     7351        HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     7352    }
    76147353    return VBOXSTRICTRC_VAL(rcStrict);
    76157354}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette