VirtualBox

Changeset 67156 in vbox for trunk


Ignore:
Timestamp:
May 31, 2017 9:10:12 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0, VMM/HMSVMAll: Nested Hw.virt: Fixes.

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm.h

    r66751 r67156  
    156156VMM_INT_DECL(void)              HMVmxNstGstVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
    157157VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
    158 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb);
     158VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb);
    159159VMM_INT_DECL(uint8_t)           HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx);
    160160VMM_INT_DECL(bool)              HMSvmNstGstCanTakeInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx);
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r66751 r67156  
    249249 * @param   pVCpu               The cross context virtual CPU structure.
    250250 * @param   pCtx                Pointer to the guest-CPU context.
     251 * @param   cbInstr             The length of the VMRUN instruction.
    251252 * @param   GCPhysVmcb          Guest physical address of the VMCB to run.
    252253 */
    253254/** @todo move this to IEM and make the VMRUN version that can execute under
    254255 *        hardware SVM here instead. */
    255 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb)
     256VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb)
    256257{
    257258    Assert(pVCpu);
    258259    Assert(pCtx);
    259260    PVM pVM = pVCpu->CTX_SUFF(pVM);
     261    Log3(("HMSvmVmrun\n"));
    260262
    261263    /*
     
    283285        pHostState->uCr4     = pCtx->cr4;
    284286        pHostState->rflags   = pCtx->rflags;
    285         pHostState->uRip     = pCtx->rip;
     287        pHostState->uRip     = pCtx->rip + cbInstr;
    286288        pHostState->uRsp     = pCtx->rsp;
    287289        pHostState->uRax     = pCtx->rax;
     
    337339
    338340            /* IO permission bitmap. */
    339             RTGCPHYS GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
     341            RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
    340342            if (   (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
    341                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap))
     343                || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
     344                || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
     345                || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
    342346            {
    343347                Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
     
    346350
    347351            /* MSR permission bitmap. */
    348             RTGCPHYS GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
     352            RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
    349353            if (   (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
    350                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap))
     354                || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
     355                || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
    351356            {
    352357                Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
     
    378383
    379384            /** @todo gPAT MSR validation? */
     385
     386            /*
     387             * Copy the IO permission bitmap into the cache.
     388             */
     389            Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap));
     390            rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
     391                                         SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
     392            if (RT_FAILURE(rc))
     393            {
     394                Log(("HMSvmVmRun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
     395                return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     396            }
     397
     398            /*
     399             * Copy the MSR permission bitmap into the cache.
     400             */
     401            Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
     402            rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
     403                                         SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
     404            if (RT_FAILURE(rc))
     405            {
     406                Log(("HMSvmVmRun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
     407                return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     408            }
    380409
    381410            /*
     
    468497            /*
    469498             * TLB flush control.
    470              */
     499             * Currently disabled since it's redundant as we unconditionally flush the TLB below.
     500             */
     501#if 0
    471502            /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
    472503            if (   pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
     
    474505                || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
    475506                PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */);
     507#endif
    476508
    477509            /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */
     
    501533            pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
    502534            pCtx->dr[7] |= X86_DR7_RA1_MASK;
     535
     536            /*
     537             * Ask PGM to flush the TLB as if we continue to interpret the nested-guest
     538             * instructions from guest memory we'd be in trouble otherwise.
     539             */
     540            PGMFlushTLB(pVCpu, pCtx->cr3, true);
     541            PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
     542            CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
    503543
    504544            /*
     
    558598                 *        NRIP for the nested-guest to calculate the instruction length
    559599                 *        below. */
     600                Log3(("HMSvmVmRun: InjectingEvent: uVector=%u enmType=%d uErrorCode=%u cr2=%#RX64\n", uVector, enmType,
     601                      uErrorCode, pCtx->cr2));
    560602                VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
    561603                if (   rcStrict == VINF_SVM_VMEXIT
     
    564606            }
    565607
     608            Log3(("HMSvmVmRun: Entered nested-guest at CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
    566609            return VINF_SUCCESS;
    567610        }
     
    597640                                             uint64_t uExitInfo2)
    598641{
    599     if (   CPUMIsGuestInNestedHwVirtMode(pCtx)
     642    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    600643        || uExitCode == SVM_EXIT_INVALID)
    601644    {
     645        Log3(("HMSvmNstGstVmExit: uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2));
     646
    602647        /*
    603648         * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch.
     
    734779            pCtx->dr[7]     |= X86_DR7_RA1_MASK;
    735780
     781            PGMFlushTLB(pVCpu, pCtx->cr3, true);
     782            PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
     783            CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
     784
    736785            /** @todo if RIP is not canonical or outside the CS segment limit, we need to
    737786             *        raise \#GP(0) in the guest. */
     
    745794        {
    746795            Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb));
    747             Assert(!CPUMIsGuestInNestedHwVirtMode(pCtx));
     796            Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    748797            rc = VERR_SVM_VMEXIT_FAILED;
    749798        }
    750799
     800        Log3(("HMSvmNstGstVmExit: returns %Rrc\n", rc));
    751801        return rc;
    752802    }
     
    771821{
    772822    PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;
    773     Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     823    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    774824
    775825    X86RFLAGS RFlags;
     
    836886    } while (0)
    837887
    838     if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
     888    if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    839889        return VINF_HM_INTERCEPT_NOT_ACTIVE;
    840890
     
    9891039     * Check if any IO accesses are being intercepted.
    9901040     */
    991     Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     1041    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    9921042    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT));
     1043    Log(("HMSvmNstGstHandleIOIntercept: u16Port=%u\n", pIoExitInfo->n.u16Port));
    9931044
    9941045    /*
     
    10051056    static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
    10061057    uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
     1058    Assert(pbIopm);
    10071059
    10081060    uint16_t const u16Port   = pIoExitInfo->n.u16Port;
     
    10151067    uint16_t const u16Iopm = *(uint16_t *)pbIopm;
    10161068    if (u16Iopm & fIopmMask)
     1069    {
     1070        Log(("HMSvmNstGstHandleIOIntercept: u16Port=%u offIoPm=%u fSizeMask=%#x cShift=%u fIopmMask=%#x\n", u16Port, offIopm,
     1071             fSizeMask, cShift, fIopmMask));
    10171072        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
    1018 
     1073    }
     1074
     1075    Log(("HMSvmNstGstHandleIOIntercept: huh!?\n"));
     1076    AssertMsgFailed(("We expect an IO intercept here!\n"));
    10191077    return VINF_HM_INTERCEPT_NOT_ACTIVE;
    10201078}
     
    10451103     */
    10461104    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));
    1047     Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     1105    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    10481106
    10491107    uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r67080 r67156  
    18041804    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    18051805
    1806 #ifdef VBOX_WITH_NESTED_HWVIRT
    1807     /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */
    1808     if (CPUMIsGuestInNestedHwVirtMode(pCtx))
    1809         return VERR_NOT_IMPLEMENTED;
    1810 #endif
    1811 
    18121806    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
    18131807    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    29722966{
    29732967    HMSVM_ASSERT_PREEMPT_SAFE();
     2968
     2969#ifdef VBOX_WITH_NESTED_HWVIRT_IN_IEM
     2970    /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */
     2971    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     2972        return VINF_EM_RESCHEDULE_REM;
     2973#endif
    29742974
    29752975    /* Check force flag actions that might require us to go back to ring-3. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette