VirtualBox

Changeset 72872 in vbox


Ignore:
Timestamp:
Jul 4, 2018 1:14:32 PM (6 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: bugref:9193 Fix a bug with our exception bitmap cache going out of sync with the VMCS value.
Added additional consistency checks for VMCS cache values (exception bitmap, TSC offset) - now reports specific
errors via u32HmError on VERR_VMX_INVALID_GUEST_STATE gurus.

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r72580 r72872  
    20962096/** VT-x features disabled by the BIOS. */
    20972097#define VERR_VMX_MSR_VMX_DISABLED                   (-4029)
    2098 /** VM-Entry Controls internal cache invalid. */
    2099 #define VERR_VMX_ENTRY_CTLS_CACHE_INVALID           (-4030)
    2100 /** VM-Exit Controls internal cache invalid. */
    2101 #define VERR_VMX_EXIT_CTLS_CACHE_INVALID            (-4031)
    2102 /** VM-Execution Pin-based Controls internal cache invalid. */
    2103 #define VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID        (-4032)
    2104 /** VM-Execution Primary Processor-based Controls internal cache
    2105  *  invalid. */
    2106 #define VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID       (-4033)
    2107 /** VM-Execution Secondary Processor-based Controls internal
    2108  *  cache invalid. */
    2109 #define VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID      (-4034)
     2098/** VT-x VMCS field cache invalid. */
     2099#define VERR_VMX_VMCS_FIELD_CACHE_INVALID           (-4030)
    21102100/** Failed to set VMXON enable bit while enabling VT-x through the MSR. */
    2111 #define VERR_VMX_MSR_VMX_ENABLE_FAILED              (-4035)
     2101#define VERR_VMX_MSR_VMX_ENABLE_FAILED              (-4031)
    21122102/** Failed to enable VMXON-in-SMX bit while enabling VT-x through the MSR. */
    2113 #define VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED          (-4036)
     2103#define VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED          (-4032)
    21142104/** @} */
    21152105
  • trunk/include/VBox/vmm/hm_vmx.h

    r72855 r72872  
    149149/** EPT requires page-walk length of 4. */
    150150#define VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED                14
     151/** @} */
     152
     153/** @name VMX HM-error codes for VERR_VMX_VMCS_FIELD_CACHE_INVALID.
     154 *  VCI = VMCS-field Cache Invalid.
     155 * @{
     156 */
     157/** Cache of VM-entry controls invalid. */
     158#define VMX_VCI_CTRL_ENTRY                                      300
     159/** Cache of VM-exit controls invalid. */
     160#define VMX_VCI_CTRL_EXIT                                       301
     161/** Cache of pin-based VM-execution controls invalid. */
     162#define VMX_VCI_CTRL_PIN_EXEC                                   302
     163/** Cache of processor-based VM-execution controls invalid. */
     164#define VMX_VCI_CTRL_PROC_EXEC                                  303
     165/** Cache of secondary processor-based VM-execution controls invalid. */
     166#define VMX_VCI_CTRL_PROC_EXEC2                                 304
     167/** Cache of exception bitmap invalid. */
     168#define VMX_VCI_CTRL_XCPT_BITMAP                                305
     169/** Cache of TSC offset invalid. */
     170#define VMX_VCI_CTRL_TSC_OFFSET                                 306
    151171/** @} */
    152172
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72855 r72872  
    16211621
    16221622/**
    1623  * Verifies that our cached values of the VMCS controls are all
    1624  * consistent with what's actually present in the VMCS.
     1623 * Verifies that our cached values of the VMCS fields are all consistent with
     1624 * what's actually present in the VMCS.
    16251625 *
    16261626 * @returns VBox status code.
     1627 * @retval  VINF_SUCCESS if all our caches match their respective VMCS fields.
     1628 * @retval  VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
     1629 *                                            VMCS content. HMCPU error-field is
     1630 *                                            updated, see VMX_VCI_XXX.
    16271631 * @param   pVCpu   The cross context virtual CPU structure.
    16281632 */
     
    16321636    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
    16331637    AssertRCReturn(rc, rc);
    1634     AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
    1635                     VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
     1638    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32EntryCtls == u32Val,
     1639                        ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
     1640                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
     1641                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16361642
    16371643    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
    16381644    AssertRCReturn(rc, rc);
    1639     AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
    1640                     VERR_VMX_EXIT_CTLS_CACHE_INVALID);
     1645    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ExitCtls == u32Val,
     1646                        ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
     1647                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
     1648                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16411649
    16421650    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
    16431651    AssertRCReturn(rc, rc);
    1644     AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
    1645                     VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
     1652    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32PinCtls == u32Val,
     1653                        ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
     1654                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
     1655                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16461656
    16471657    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
    16481658    AssertRCReturn(rc, rc);
    1649     AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
    1650                     VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
     1659    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls == u32Val,
     1660                        ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
     1661                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
     1662                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16511663
    16521664    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     
    16541666        rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
    16551667        AssertRCReturn(rc, rc);
    1656         AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
    1657                         ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
    1658                         VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
    1659     }
     1668        AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
     1669                            ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
     1670                            pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
     1671                            VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     1672    }
     1673
     1674    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
     1675    AssertRCReturn(rc, rc);
     1676    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32XcptBitmap == u32Val,
     1677                        ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap, u32Val),
     1678                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
     1679                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     1680
     1681    uint64_t u64Val;
     1682    rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
     1683    AssertRCReturn(rc, rc);
     1684    AssertMsgReturnStmt(pVCpu->hm.s.vmx.u64TscOffset == u64Val,
     1685                        ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.u64TscOffset, u64Val),
     1686                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
     1687                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16601688
    16611689    return VINF_SUCCESS;
     
    26402668    AssertPtr(pVCpu);
    26412669
    2642     uint32_t u32XcptBitmap;
     2670    uint32_t uXcptBitmap;
    26432671
    26442672    /* Must always intercept #AC to prevent the guest from hanging the CPU. */
    2645     u32XcptBitmap = RT_BIT_32(X86_XCPT_AC);
     2673    uXcptBitmap = RT_BIT_32(X86_XCPT_AC);
    26462674
    26472675    /* Because we need to maintain the DR6 state even when intercepting DRx reads
    26482676       and writes, and because recursive #DBs can cause the CPU hang, we must always
    26492677       intercept #DB. */
    2650     u32XcptBitmap |= RT_BIT_32(X86_XCPT_DB);
     2678    uXcptBitmap |= RT_BIT_32(X86_XCPT_DB);
    26512679
    26522680    /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
    26532681    if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    2654         u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
     2682        uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    26552683
    26562684    /* Commit it to the VMCS. */
    2657     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
     2685    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    26582686    AssertRCReturn(rc, rc);
    26592687
    26602688    /* Update our cache of the exception bitmap. */
    2661     pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
     2689    pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
    26622690    return VINF_SUCCESS;
    26632691}
     
    37603788        uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    37613789#endif
    3762         if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
    3763         {
    3764             pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
    3765             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
    3766         }
    3767         Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
     3790        Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
    37683791
    37693792        /*
     
    38023825#endif
    38033826        /*
    3804          * Finally, update VMCS fields with the CR0 values.
     3827         * Finally, update VMCS fields with the CR0 values and the exception bitmap.
    38053828         */
    38063829        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0);
     
    38103833        if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
    38113834            rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
     3835        if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
     3836            rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    38123837        AssertRCReturn(rc, rc);
    38133838
    38143839        /* Update our caches. */
    3815         pVCpu->hm.s.vmx.u32Cr0Mask  = u32Cr0Mask;
    3816         pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
     3840        pVCpu->hm.s.vmx.u32Cr0Mask    = u32Cr0Mask;
     3841        pVCpu->hm.s.vmx.u32ProcCtls   = uProcCtls;
     3842        pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
    38173843
    38183844        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
     
    81418167    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81428168
    8143     /* This needs to be done after hmR0VmxExportGuestCR0() as it may alter intercepted exceptions. */
    81448169    rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
    81458170    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     
    1169311718{
    1169411719    int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    11695     rc    |= hmR0VmxCheckVmcsCtls(pVCpu);
    1169611720    AssertRCReturn(rc, rc);
     11721    rc = hmR0VmxCheckVmcsCtls(pVCpu);
     11722    if (RT_FAILURE(rc))
     11723        return rc;
    1169711724
    1169811725    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette