VirtualBox

Changeset 72849 in vbox


Ignore:
Timestamp:
Jul 4, 2018 5:27:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: bugref:9193 Add assertions where possible to verify we don't overwrite stale values from CPUMCTX into the VMCS.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72848 r72849  
    153153
    154154/** Assert that preemption is disabled or covered by thread-context hooks. */
    155 #define HMVMX_ASSERT_PREEMPT_SAFE()       Assert(   VMMR0ThreadCtxHookIsEnabled(pVCpu)   \
    156                                                  || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     155#define HMVMX_ASSERT_PREEMPT_SAFE()             Assert(   VMMR0ThreadCtxHookIsEnabled(pVCpu)   \
     156                                                       || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    157157
    158158/** Assert that we haven't migrated CPUs when thread-context hooks are not
    159159 *  used. */
    160 #define HMVMX_ASSERT_CPU_SAFE()           AssertMsg(   VMMR0ThreadCtxHookIsEnabled(pVCpu) \
    161                                                     || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
    162                                                     ("Illegal migration! Entered on CPU %u Current %u\n", \
    163                                                     pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
     160#define HMVMX_ASSERT_CPU_SAFE()                 AssertMsg(   VMMR0ThreadCtxHookIsEnabled(pVCpu) \
     161                                                          || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
     162                                                          ("Illegal migration! Entered on CPU %u Current %u\n", \
     163                                                          pVCpu->hm.s.idEnteredCpu, RTMpCpuId()))
     164
     165/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
     166 *  context. */
     167#define HMVMX_CPUMCTX_ASSERT(pVCpu, fExtrnMbz)  AssertMsg(!((pVCpu)->cpum.GstCtx.fExtrn & (fExtrnMbz)), \
     168                                                          ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (pVCpu)->cpum.GstCtx.fExtrn, \
     169                                                          (fExtrnMbz)))
    164170
    165171/** Helper macro for VM-exit handlers called unexpectedly. */
     
    33913397    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
    33923398    {
     3399        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
     3400
    33933401        if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
    33943402            && APICIsEnabled(pVCpu))
     
    35353543        Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
    35363544
     3545        /** @todo Optimize by checking cache before writing to VMCS. */
    35373546        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    35383547        AssertRCReturn(rc, rc);
     
    35613570    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    35623571    {
     3572        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
     3573
    35633574        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    35643575        AssertRCReturn(rc, rc);
     
    35923603    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
    35933604    {
     3605        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
     3606
    35943607        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    35953608        AssertRCReturn(rc, rc);
     
    36163629    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
    36173630    {
     3631        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     3632
    36183633        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    36193634           Let us assert it as such and use 32-bit VMWRITE. */
     
    36663681    {
    36673682        PVM pVM = pVCpu->CTX_SUFF(pVM);
     3683        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    36683684        Assert(!RT_HI_U32(pMixedCtx->cr0));
     3685
    36693686        uint32_t const u32ShadowCr0 = pMixedCtx->cr0;
    36703687        uint32_t       u32GuestCr0  = pMixedCtx->cr0;
     
    38463863    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
    38473864    {
     3865        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
     3866
    38483867        RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
    38493868        if (pVM->hm.s.fNestedPaging)
     
    39423961    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
    39433962    {
     3963        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    39443964        Assert(!RT_HI_U32(pMixedCtx->cr4));
     3965
    39453966        uint32_t       u32GuestCr4  = pMixedCtx->cr4;
    39463967        uint32_t const u32ShadowCr4 = pMixedCtx->cr4;
     
    44784499        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
    44794500        {
     4501            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    44804502            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    44814503                pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
     
    44874509        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
    44884510        {
     4511            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    44894512            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    44904513                pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
     
    44964519        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
    44974520        {
     4521            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    44984522            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    44994523                pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
     
    45054529        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
    45064530        {
     4531            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    45074532            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    45084533                pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
     
    45144539        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
    45154540        {
     4541            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    45164542            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    45174543                pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
     
    45234549        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
    45244550        {
     4551            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    45254552            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    45264553                pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
     
    45474574    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
    45484575    {
     4576        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
     4577
    45494578        /*
    45504579         * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
     
    46144643    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
    46154644    {
     4645        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
     4646
    46164647        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
    46174648        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);
     
    46304661    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
    46314662    {
     4663        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
     4664
    46324665        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
    46334666        uint32_t u32Access = 0;
     
    46674700    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
    46684701    {
     4702        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
     4703
    46694704        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
    46704705        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);
     
    47164751        {
    47174752#if HC_ARCH_BITS == 32
     4753            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
     4754
    47184755            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
    47194756            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
     
    47384775    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
    47394776    {
     4777        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
     4778
    47404779        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    47414780        {
     
    47624801    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    47634802    {
     4803        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
     4804
    47644805        if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    47654806        {
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette