VirtualBox

Changeset 74389 in vbox


Ignore:
Timestamp:
Sep 20, 2018 4:25:26 PM (6 years ago)
Author:
vboxsync
Message:

VMM/CPUM, IEM, HM: Nested VMX: bugref:9180 VM-exit bits.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r74043 r74389  
    21022102/** Generic VM-entry failure. */
    21032103#define VERR_VMX_VMENTRY_FAILED                     (-4033)
     2104/** Generic VM-exit failure. */
     2105#define VERR_VMX_VMEXIT_FAILED                      (-4034)
    21042106/** @} */
    21052107
  • trunk/include/VBox/vmm/cpumctx.h

    r74337 r74389  
    548548                /** 0x2f8 - Last emulated VMX instruction/VM-exit diagnostic. */
    549549                VMXVDIAG                enmDiag;
    550                 /** 0x2fc - Whether the guest is in VMX root mode. */
     550                /** 0x2fc - VMX abort reason. */
     551                VMXABORT                enmAbort;
     552                /** 0x300 - VMX abort auxiliary information field. */
     553                uint32_t                uAbortAux;
     554                /** 0x304 - Whether the guest is in VMX root mode. */
    551555                bool                    fInVmxRootMode;
    552                 /** 0x2fd - Whether the guest is in VMX non-root mode. */
     556                /** 0x305 - Whether the guest is in VMX non-root mode. */
    553557                bool                    fInVmxNonRootMode;
    554                 /** 0x2fe - Whether the injected events are subjected to event intercepts.  */
     558                /** 0x306 - Whether the injected events are subjected to event intercepts.  */
    555559                bool                    fInterceptEvents;
    556                 bool                    fPadding0;
    557                 /** 0x300 - Cache of the nested-guest current VMCS - R0 ptr. */
     560                bool                    afPadding0[1];
     561                /** 0x308 - Cache of the nested-guest current VMCS - R0 ptr. */
    558562                R0PTRTYPE(PVMXVVMCS)    pVmcsR0;
    559563#if HC_ARCH_BITS == 32
    560564                uint32_t                uVmcsR0Padding;
    561565#endif
    562                 /** 0x308 - Cache of the nested-guest curent VMCS - R3 ptr. */
     566                /** 0x310 - Cache of the nested-guest curent VMCS - R3 ptr. */
    563567                R3PTRTYPE(PVMXVVMCS)    pVmcsR3;
    564568#if HC_ARCH_BITS == 32
    565569                uint32_t                uVmcsR3Padding;
    566570#endif
    567                 /** 0X310 - Cache of the nested-guest shadow VMCS - R0 ptr. */
     571                /** 0X318 - Cache of the nested-guest shadow VMCS - R0 ptr. */
    568572                R0PTRTYPE(PVMXVVMCS)    pShadowVmcsR0;
    569573#if HC_ARCH_BITS == 32
    570574                uint32_t                uShadowVmcsR0Padding;
    571575#endif
    572                 /** 0x318 - Cache of the nested-guest shadow VMCS - R3 ptr. */
     576                /** 0x320 - Cache of the nested-guest shadow VMCS - R3 ptr. */
    573577                R3PTRTYPE(PVMXVVMCS)    pShadowVmcsR3;
    574578#if HC_ARCH_BITS == 32
    575579                uint32_t                uShadowVmcsR3Padding;
    576580#endif
    577                 /** 0x320 - Cache of the nested-guest Virtual-APIC page - R0 ptr. */
     581                /** 0x328 - Cache of the nested-guest Virtual-APIC page - R0 ptr. */
    578582                R0PTRTYPE(void *)       pvVirtApicPageR0;
    579583#if HC_ARCH_BITS == 32
    580584                uint32_t                uVirtApicPageR0Padding;
    581585#endif
    582                 /** 0x328 - Cache of the nested-guest Virtual-APIC page - R3 ptr. */
     586                /** 0x330 - Cache of the nested-guest Virtual-APIC page - R3 ptr. */
    583587                R3PTRTYPE(void *)       pvVirtApicPageR3;
    584588#if HC_ARCH_BITS == 32
    585589                uint32_t                uVirtApicPageR3Padding;
    586590#endif
    587                 /** 0x330 - Cache of the nested-guest VMREAD-bitmap - R0 ptr. */
     591                /** 0x338 - Cache of the nested-guest VMREAD-bitmap - R0 ptr. */
    588592                R0PTRTYPE(void *)       pvVmreadBitmapR0;
    589593#if HC_ARCH_BITS == 32
    590594                uint32_t                uVmreadBitmapR0Padding;
    591595#endif
    592                 /** 0x338 - Cache of the nested-guest VMREAD-bitmap - R3 ptr. */
     596                /** 0x340 - Cache of the nested-guest VMREAD-bitmap - R3 ptr. */
    593597                R3PTRTYPE(void *)       pvVmreadBitmapR3;
    594598#if HC_ARCH_BITS == 32
    595599                uint32_t                uVmreadBitmapR3Padding;
    596600#endif
    597                 /** 0x340 - Cache of the nested-guest VMWRITE-bitmap - R0 ptr. */
     601                /** 0x348 - Cache of the nested-guest VMWRITE-bitmap - R0 ptr. */
    598602                R0PTRTYPE(void *)       pvVmwriteBitmapR0;
    599603#if HC_ARCH_BITS == 32
    600604                uint32_t                uVmwriteBitmapR0Padding;
    601605#endif
    602                 /** 0x348 - Cache of the nested-guest VMWRITE-bitmap - R3 ptr. */
     606                /** 0x350 - Cache of the nested-guest VMWRITE-bitmap - R3 ptr. */
    603607                R3PTRTYPE(void *)       pvVmwriteBitmapR3;
    604608#if HC_ARCH_BITS == 32
    605609                uint32_t                uVmwriteBitmapR3Padding;
    606610#endif
    607                 /** 0x350 - The MSR auto-load/store area - R0 ptr. */
     611                /** 0x358 - The MSR auto-load/store area - R0 ptr. */
    608612                R0PTRTYPE(PVMXAUTOMSR)  pAutoMsrAreaR0;
    609613#if HC_ARCH_BITS == 32
    610614                uint32_t                uAutoMsrAreaR0;
    611615#endif
    612                 /** 0x358 - The MSR auto-load/store area - R3 ptr. */
     616                /** 0x360 - The MSR auto-load/store area - R3 ptr. */
    613617                R3PTRTYPE(PVMXAUTOMSR)  pAutoMsrAreaR3;
    614618#if HC_ARCH_BITS == 32
    615619                uint32_t                uAutoMsrAreaR3;
    616620#endif
    617                 /** 0x360 - Padding. */
    618                 uint8_t             abPadding[0x3f0 - 0x360];
     621                /** 0x368 - Padding. */
     622                uint8_t             abPadding[0x3f0 - 0x368];
    619623            } vmx;
    620624        } CPUM_UNION_NM(s);
     
    699703AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysShadowVmcs,       0x2f0);
    700704AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmDiag,                0x2f8);
    701 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode,         0x2fc);
    702 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode,      0x2fd);
    703 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents,       0x2fe);
    704 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0,                0x300);
    705 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3,                0x308);
    706 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0,          0x310);
    707 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3,          0x318);
    708 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0,       0x320);
    709 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR3,       0x328);
    710 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0,       0x330);
    711 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3,       0x338);
    712 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0,      0x340);
    713 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3,      0x348);
    714 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0,         0x350);
    715 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR3,         0x358);
     705AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmAbort,               0x2fc);
     706AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uAbortAux,              0x300);
     707AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode,         0x304);
     708AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode,      0x305);
     709AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents,       0x306);
     710AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0,                0x308);
     711AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3,                0x310);
     712AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0,          0x318);
     713AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3,          0x320);
     714AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0,       0x328);
     715AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR3,       0x330);
     716AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0,       0x338);
     717AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3,       0x340);
     718AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0,      0x348);
     719AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3,      0x350);
     720AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0,         0x358);
     721AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR3,         0x360);
    716722AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0,           8);
    717723AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0,     8);
  • trunk/include/VBox/vmm/hm.h

    r74287 r74389  
    136136VMM_INT_DECL(bool)              HMIsVmxSupported(PVM pVM);
    137137VMM_INT_DECL(const char *)      HMVmxGetDiagDesc(VMXVDIAG enmDiag);
     138VMM_INT_DECL(const char *)      HMVmxGetAbortDesc(VMXABORT enmAbort);
    138139VMM_INT_DECL(void)              HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
    139140/** @} */
  • trunk/include/VBox/vmm/hm_vmx.h

    r74381 r74389  
    13531353/** @name VMX abort reasons.
    13541354 * See Intel spec. "27.7 VMX Aborts".
     1355 * Update HMVmxGetAbortDesc() if new reasons are added.
    13551356 * @{
    13561357 */
    13571358typedef enum
    13581359{
     1360    /** None - don't use this / uninitialized value. */
     1361    VMXABORT_NONE                 = 0,
     1362    /** VMX abort caused during saving of guest MSRs. */
    13591363    VMXABORT_SAVE_GUEST_MSRS      = 1,
     1364    /** VMX abort caused during host PDPTE checks. */
    13601365    VMXBOART_HOST_PDPTE           = 2,
     1366    /** VMX abort caused due to current VMCS being corrupted. */
    13611367    VMXABORT_CURRENT_VMCS_CORRUPT = 3,
     1368    /** VMX abort caused during loading of host MSRs. */
    13621369    VMXABORT_LOAD_HOST_MSR        = 4,
     1370    /** VMX abort caused due to a machine-check exception during VM-exit. */
    13631371    VMXABORT_MACHINE_CHECK_XCPT   = 5,
    1364     VMXABORT_HOST_LONG_MODE       = 6
     1372    /** VMX abort caused due to invalid return to long mode. */
     1373    VMXABORT_HOST_LONG_MODE       = 6,
     1374    /* Type size hack. */
     1375    VMXABORT_32BIT_HACK           = 0x7fffffff
    13651376} VMXABORT;
     1377AssertCompileSize(VMXABORT, 4);
    13661378/** @} */
    13671379
     
    38223834    kVmxVDiag_Vmentry_VmxRoot,
    38233835    kVmxVDiag_Vmentry_Vpid,
     3836    kVmxVDiag_Vmexit_MsrStore,
     3837    kVmxVDiag_Vmexit_MsrStoreCount,
     3838    kVmxVDiag_Vmexit_MsrStorePtrReadPhys,
     3839    kVmxVDiag_Vmexit_MsrStoreRing3,
     3840    kVmxVDiag_Vmexit_MsrStoreRsvd,
    38243841    /* Last member for determining array index limit. */
    38253842    kVmxVDiag_End
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r74310 r74389  
    336336    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys , "VmwriteBitmapPtrReadPhys"  ),
    337337    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot                  , "VmxRoot"                   ),
    338     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid                     , "Vpid"                      )
     338    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid                     , "Vpid"                      ),
     339    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStore                  , "MsrStore"                  ),
     340    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreCount             , "MsrStoreCount"             ),
     341    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtrReadPhys       , "MsrStorePtrReadPhys"       ),
     342    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRing3             , "MsrStoreRing3"             ),
     343    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRsvd              , "MsrStoreRsvd"              )
    339344    /* kVmxVDiag_End */
    340345};
     
    432437    if (RT_LIKELY((unsigned)enmDiag < RT_ELEMENTS(g_apszVmxVDiagDesc)))
    433438        return g_apszVmxVDiagDesc[enmDiag];
     439    return "Unknown/invalid";
     440}
     441
     442
     443/**
     444 * Gets the description for a VMX abort reason.
     445 *
     446 * @returns The descriptive string.
     447 * @param   enmAbort    The VMX abort reason.
     448 */
     449VMM_INT_DECL(const char *) HMVmxGetAbortDesc(VMXABORT enmAbort)
     450{
     451    switch (enmAbort)
     452    {
     453        case VMXABORT_NONE:                 return "VMXABORT_NONE";
     454        case VMXABORT_SAVE_GUEST_MSRS:      return "VMXABORT_SAVE_GUEST_MSRS";
     455        case VMXBOART_HOST_PDPTE:           return "VMXBOART_HOST_PDPTE";
     456        case VMXABORT_CURRENT_VMCS_CORRUPT: return "VMXABORT_CURRENT_VMCS_CORRUPT";
     457        case VMXABORT_LOAD_HOST_MSR:        return "VMXABORT_LOAD_HOST_MSR";
     458        case VMXABORT_MACHINE_CHECK_XCPT:   return "VMXABORT_MACHINE_CHECK_XCPT";
     459        case VMXABORT_HOST_LONG_MODE:       return "VMXABORT_HOST_LONG_MODE";
     460        default:
     461            break;
     462    }
    434463    return "Unknown/invalid";
    435464}
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74376 r74389  
    407407
    408408/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
    409 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_InsDiag) \
     409#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
    410410    do \
    411411    { \
    412         Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_InsDiag), \
    413                 HMVmxGetDiagDesc(a_InsDiag), (a_pszFailure))); \
    414         (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_InsDiag); \
     412        Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
     413            HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
     414        (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
    415415        return VERR_VMX_VMENTRY_FAILED; \
    416416    } while (0)
     417
     418/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
     419#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
     420    do \
     421    { \
     422        Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
     423            HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
     424        (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
     425        return VERR_VMX_VMEXIT_FAILED; \
     426    } while (0)
     427
    417428
    418429
     
    40764087    }
    40774088
     4089    /*
     4090     * The VM-entry MSR-load area address need not be a valid guest-physical address if the
     4091     * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
     4092     * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
     4093     */
     4094    if (cMsrs == 0)
     4095        return VINF_SUCCESS;
     4096
    40784097    RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
    40794098    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
     
    40814100    if (RT_SUCCESS(rc))
    40824101    {
    4083         PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); NOREF(pMsr);
     4102        PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     4103        Assert(pMsr);
    40844104        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
    40854105        {
     
    40974117                 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
    40984118                 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
    4099                  * indicated further with a different diagnostic code. Later, we can try implement handling of
    4100                  * the MSR in ring-0 if possible, or come up with a better, generic solution.
     4119                 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
     4120                 * further by our own, specific diagnostic code. Later, we can try implement handling of the
     4121                 * MSR in ring-0 if possible, or come up with a better, generic solution.
    41014122                 */
    41024123                pVmcs->u64ExitQual.u = idxMsr;
     
    41154136    else
    41164137    {
    4117         Log(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
    4118         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_MsrLoadPtrReadPhys;
    4119         return rc;
     4138        AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
     4139        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
    41204140    }
    41214141
     
    42444264{
    42454265    /*
    4246      * Load control, debug, segment, descriptor-table registers and some MSRs.
     4266     * Load guest control, debug, segment, descriptor-table registers and some MSRs.
    42474267     */
    42484268    iemVmxVmentryLoadGuestControlRegsMsr(pVCpu);
     
    45414561     */
    45424562    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4543     Assert(pVmcs);
    45444563
    45454564    /* Save control registers. */
     
    46764695
    46774696/**
     4697 * Saves guest non-register state as part of VM-exit.
     4698 *
     4699 * @param   pVCpu           The cross context virtual CPU structure.
     4700 * @param   uExitReason     The VM-exit reason.
     4701 */
     4702IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
     4703{
     4704    /*
     4705     * Save guest non-register state.
     4706     * See Intel spec. 27.3.4 "Saving Non-Register State".
     4707     */
     4708    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4709
     4710    /*
     4711     * Activity-state: VM-exits occur before changing the activity state
     4712     * of the processor and hence we shouldn't need to change it.
     4713     */
     4714
     4715    /* Interruptibility-state. */
     4716    pVmcs->u32GuestIntrState = 0;
     4717    if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     4718    { /** @todo NSTVMX: Virtual-NMI blocking. */ }
     4719    else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     4720        pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
     4721
     4722    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     4723        && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
     4724    {
     4725        /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
     4726         *        currently. */
     4727        pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     4728        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     4729    }
     4730    /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
     4731
     4732    /* Pending debug exceptions. */
     4733    if (    uExitReason != VMX_EXIT_INIT_SIGNAL
     4734        &&  uExitReason != VMX_EXIT_SMI
     4735        &&  uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
     4736        && !HMVmxIsTrapLikeVmexit(uExitReason))
     4737    {
     4738        /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
     4739         *        block-by-MovSS is in effect. */
     4740        pVmcs->u64GuestPendingDbgXcpt.u = 0;
     4741    }
     4742
     4743    /** @todo NSTVMX: Save VMX preemption timer value. */
     4744
     4745    /* PDPTEs. */
     4746    Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));    /* We don't support EPT yet. */
     4747    pVmcs->u64GuestPdpte0.u = 0;
     4748    pVmcs->u64GuestPdpte1.u = 0;
     4749    pVmcs->u64GuestPdpte2.u = 0;
     4750    pVmcs->u64GuestPdpte3.u = 0;
     4751}
     4752
     4753
     4754/**
    46784755 * Saves the guest-state as part of VM-exit.
    46794756 *
    46804757 * @returns VBox status code.
    4681  * @param   pVCpu       The cross context virtual CPU structure.
    4682  */
    4683 IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu)
    4684 {
     4758 * @param   pVCpu           The cross context virtual CPU structure.
     4759 * @param   uExitReason     The VM-exit reason.
     4760 */
     4761IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
     4762{
     4763    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4764    Assert(pVmcs);
     4765
    46854766    /*
    4686      * Save control, debug, segment, descriptor-table registers and some MSRs.
     4767     * Save guest control, debug, segment, descriptor-table registers and some MSRs.
    46874768     */
    46884769    iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
    46894770    iemVmxVmexitSaveGuestSegRegs(pVCpu);
    46904771
    4691     /** @todo NSTVMX: rest of state. */
     4772    /*
     4773     * Save guest RIP, RSP and RFLAGS.
     4774     */
     4775    /* We don't support enclave mode yet. */
     4776    pVmcs->u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
     4777    pVmcs->u64GuestRsp.u    = pVCpu->cpum.GstCtx.rsp;
     4778    pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
     4779
     4780    /* Save guest non-register state. */
     4781    iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
     4782}
     4783
     4784
     4785/**
     4786 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
     4787 *
     4788 * @returns VBox status code.
     4789 * @param   pVCpu           The cross context virtual CPU structure.
     4790 * @param   uExitReason     The VM-exit reason (for diagnostic purposes).
     4791 */
     4792IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
     4793{
     4794    /*
     4795     * Save guest MSRs.
     4796     * See Intel spec. 27.4 "Saving MSRs".
     4797     */
     4798    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4799    const char *const pszFailure = "VMX-abort";
     4800
     4801    /*
     4802     * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
     4803     * is exceeded including possibly raising #MC exceptions during VMX transition. Our
     4804     * implementation causes a VMX-abort followed by a triple-fault.
     4805     */
     4806    uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
     4807    uint32_t const cMaxSupportedMsrs  = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr);
     4808    uint32_t const cMsrs              = pVmcs->u32ExitMsrStoreCount;
     4809    Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
     4810    if (cMsrs <= cMaxSupportedMsrs)
     4811    { /* likely */ }
     4812    else
     4813        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
     4814
     4815    /*
     4816     * The VM-exit MSR-store area address need not be a valid guest-physical address if the
     4817     * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
     4818     * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
     4819     */
     4820    if (cMsrs == 0)
     4821        return VINF_SUCCESS;
     4822
     4823    PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     4824    Assert(pMsr);
     4825    for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     4826    {
     4827        if (   !pMsr->u32Reserved
     4828            &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
     4829            &&  pMsr->u32Msr != MSR_IA32_SMBASE)
     4830        {
     4831            int rc = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
     4832            if (rc == VINF_SUCCESS)
     4833                continue;
     4834
     4835            /*
     4836             * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
     4837             * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
     4838             * recording the MSR index in a VirtualBox specific VMCS field and indicated further by our
     4839             * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
     4840             * if possible, or come up with a better, generic solution.
     4841             */
     4842            pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
     4843            VMXVDIAG const enmDiag = rc == VINF_CPUM_R3_MSR_READ
     4844                                   ? kVmxVDiag_Vmexit_MsrStoreRing3
     4845                                   : kVmxVDiag_Vmexit_MsrStore;
     4846            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
     4847        }
     4848        else
     4849        {
     4850            pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
     4851            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
     4852        }
     4853    }
     4854
     4855    RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
     4856    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
     4857                                      pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
     4858    if (RT_SUCCESS(rc))
     4859    { /* likely */ }
     4860    else
     4861    {
     4862        AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     4863        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys);
     4864    }
     4865
     4866    NOREF(uExitReason);
     4867    NOREF(pszFailure);
     4868    return VINF_SUCCESS;
     4869}
     4870
     4871
     4872/**
     4873 * Performs a VMX abort (due to an fatal error during VM-exit).
     4874 *
     4875 * @returns VBox status code.
     4876 * @param   pVCpu       The cross context virtual CPU structure.
     4877 * @param   enmAbort    The VMX abort reason.
     4878 */
     4879IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
     4880{
     4881    /*
     4882     * Perform the VMX abort.
     4883     * See Intel spec. 27.7 "VMX Aborts".
     4884     */
     4885    LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
     4886
     4887    /* We don't support SMX yet. */
     4888    pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
     4889    ASMWriteFence();
     4890    if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     4891    {
     4892        RTGCPHYS const GCPhysVmcs  = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
     4893        uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
     4894        PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
     4895    }
     4896
     4897    return VINF_EM_TRIPLE_FAULT;
    46924898}
    46934899
     
    46974903 *
    46984904 * @param   pVCpu           The cross context virtual CPU structure.
     4905 * @param   uExitReason     The VM-exit reason.
    46994906 * @param   cbInstr         The instruction length.
    47004907 */
     
    47184925        && uExitReason != VMX_EXIT_ERR_MSR_LOAD
    47194926        && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK)
    4720         iemVmxVmexitSaveGuestState(pVCpu);
    4721 
     4927    {
     4928        iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
     4929        int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
     4930        if (RT_SUCCESS(rc))
     4931        { /* likely */ }
     4932        else
     4933        {
     4934            LogFunc(("iemVmxVmexitSaveGuestAutoMsrs failed (rc=%Rrc) -> VMX-Abort\n", rc));
     4935            return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
     4936        }
     4937    }
     4938
     4939    /** @todo NSTVMX: rest of VM-exit (loading host state etc). */
    47224940    return VINF_SUCCESS;
    47234941}
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r74258 r74389  
    30393039    if (fDumpState & CPUMHWVIRTDUMP_VMX)
    30403040    {
     3041        pHlp->pfnPrintf(pHlp, "  GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon);
     3042        pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
     3043        pHlp->pfnPrintf(pHlp, "  GCPhysShadowVmcs           = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysShadowVmcs);
     3044        pHlp->pfnPrintf(pHlp, "  enmDiag                    = %u (%s)\n",   pCtx->hwvirt.vmx.enmDiag, HMVmxGetDiagDesc(pCtx->hwvirt.vmx.enmDiag));
     3045        pHlp->pfnPrintf(pHlp, "  enmAbort                   = %u (%s)\n",   pCtx->hwvirt.vmx.enmAbort, HMVmxGetAbortDesc(pCtx->hwvirt.vmx.enmAbort));
     3046        pHlp->pfnPrintf(pHlp, "  uAbortAux                  = %u (%#x)\n",  pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux);
    30413047        pHlp->pfnPrintf(pHlp, "  fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode);
    30423048        pHlp->pfnPrintf(pHlp, "  fInVmxNonRootMode          = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxNonRootMode);
    3043         pHlp->pfnPrintf(pHlp, "  GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon);
    3044         pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
    3045         pHlp->pfnPrintf(pHlp, "  enmDiag                    = %u (%s)\n",   pCtx->hwvirt.vmx.enmDiag,
    3046                         HMVmxGetDiagDesc(pCtx->hwvirt.vmx.enmDiag));
     3049        pHlp->pfnPrintf(pHlp, "  fInterceptEvents           = %RTbool\n",   pCtx->hwvirt.vmx.fInterceptEvents);
     3050
    30473051        /** @todo NSTVMX: Dump remaining/new fields. */
    30483052    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette