VirtualBox

Changeset 64663 in vbox


Ignore:
Timestamp:
Nov 14, 2016 3:46:35 PM (8 years ago)
Author:
vboxsync
Message:

VMM/APIC: Get rid of msrApicBase cache from CPUMCTX, make APIC work with configured as mode disabled as well.

Location:
trunk
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/apic.h

    r64655 r64663  
    176176
    177177/* These functions are VMM internal. */
     178VMM_INT_DECL(bool)          APICIsEnabled(PVMCPU pVCpu);
    178179VMM_INT_DECL(bool)          APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr);
    179180VMM_INT_DECL(bool)          APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr);
  • trunk/include/VBox/vmm/cpum.mac

    r62476 r64663  
    245245    .msrSFMASK          resb    8
    246246    .msrKERNELGSBASE    resb    8
    247     .msrApicBase        resb    8
     247    .uMsrPadding0       resb    8
    248248    alignb 8
    249249    .aXcr               resq    2
  • trunk/include/VBox/vmm/cpumctx.h

    r62476 r64663  
    413413    uint64_t        msrSFMASK;          /**< syscall flag mask. */
    414414    uint64_t        msrKERNELGSBASE;    /**< swapgs exchange value. */
    415     uint64_t        msrApicBase;        /**< The local APIC base (IA32_APIC_BASE MSR). */
     415    uint64_t        uMsrPadding0;       /**< no longer used (used to hold a copy of APIC base MSR). */
    416416    /** @} */
    417417
     
    479479AssertCompileMemberOffset(CPUMCTX,                  msrSFMASK, 528);
    480480AssertCompileMemberOffset(CPUMCTX,            msrKERNELGSBASE, 536);
    481 AssertCompileMemberOffset(CPUMCTX,                msrApicBase, 544);
    482481AssertCompileMemberOffset(CPUMCTX,                       aXcr, 552);
    483482AssertCompileMemberOffset(CPUMCTX,                fXStateMask, 568);
  • trunk/include/VBox/vmm/pdmapi.h

    r64655 r64663  
    4848VMMDECL(int)            PDMIsaSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc);
    4949VMM_INT_DECL(bool)      PDMHasIoApic(PVM pVM);
     50VMM_INT_DECL(bool)      PDMHasApic(PVM pVM);
    5051VMM_INT_DECL(int)       PDMIoApicSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc);
    5152VMM_INT_DECL(int)       PDMIoApicBroadcastEoi(PVM pVM, uint8_t uVector);
  • trunk/src/VBox/VMM/VMMAll/APICAll.cpp

    r64662 r64663  
    412412 *
    413413 * @returns true if enabled, false otherwise.
    414  */
    415 DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
     414 * @param   pVCpu           The cross context virtual CPU structure.
     415 */
     416VMM_INT_DECL(bool) APICIsEnabled(PVMCPU pVCpu)
    416417{
    417418    PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
     
    616617            {
    617618                if (   VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
    618                     && apicIsEnabled(&pVM->aCpus[idCpu]))
     619                    && APICIsEnabled(&pVM->aCpus[idCpu]))
    619620                    fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
    620621            }
     
    626627            VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
    627628            if (   idCpu < pVM->cCpus
    628                 && apicIsEnabled(&pVM->aCpus[idCpu]))
     629                && APICIsEnabled(&pVM->aCpus[idCpu]))
    629630                fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
    630631            else
     
    652653            {
    653654                if (   VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
    654                     && apicIsEnabled(&pVM->aCpus[idCpu]))
     655                    && APICIsEnabled(&pVM->aCpus[idCpu]))
    655656                {
    656657                    Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
     
    18791880     */
    18801881    PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
    1881     if (apicIsEnabled(pVCpu))
     1882    if (APICIsEnabled(pVCpu))
    18821883    { /* likely */ }
    18831884    else
     
    19971998     */
    19981999    PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
    1999     if (apicIsEnabled(pVCpu))
     2000    if (APICIsEnabled(pVCpu))
    20002001    { /* likely */ }
    20012002    else
     
    23472348VMMDECL(int) APICSetTpr(PVMCPU pVCpu, uint8_t u8Tpr)
    23482349{
    2349     if (apicIsEnabled(pVCpu))
     2350    if (APICIsEnabled(pVCpu))
    23502351        return VBOXSTRICTRC_VAL(apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */));
    23512352    return VERR_PDM_NO_APIC_INSTANCE;
     
    23902391{
    23912392    VMCPU_ASSERT_EMT(pVCpu);
    2392     if (apicIsEnabled(pVCpu))
     2393    if (APICIsEnabled(pVCpu))
    23932394    {
    23942395        PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
     
    24272428
    24282429    PVMCPU pVCpu = &pVM->aCpus[0];
    2429     if (apicIsEnabled(pVCpu))
     2430    if (APICIsEnabled(pVCpu))
    24302431    {
    24312432        PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
     
    24592460     * If the APIC isn't enabled, do nothing and pretend success.
    24602461     */
    2461     if (apicIsEnabled(&pVM->aCpus[0]))
     2462    if (APICIsEnabled(&pVM->aCpus[0]))
    24622463    { /* likely */ }
    24632464    else
     
    25082509
    25092510    /* If the APIC is enabled, the interrupt is subject to LVT programming. */
    2510     if (apicIsEnabled(pVCpu))
     2511    if (APICIsEnabled(pVCpu))
    25112512    {
    25122513        PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
     
    26752676
    26762677    PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
    2677     bool const fApicHwEnabled = apicIsEnabled(pVCpu);
     2678    bool const fApicHwEnabled = APICIsEnabled(pVCpu);
    26782679    if (   fApicHwEnabled
    26792680        && pXApicPage->svr.u.fApicSoftwareEnable)
  • trunk/src/VBox/VMM/VMMAll/PDMAll.cpp

    r64655 r64663  
    248248 * Returns the presence of an IO-APIC.
    249249 *
    250  * @returns VBox true if an IO-APIC is present.
     250 * @returns true if an IO-APIC is present.
    251251 * @param   pVM         The cross context VM structure.
    252252 */
     
    254254{
    255255    return pVM->pdm.s.IoApic.CTX_SUFF(pDevIns) != NULL;
     256}
     257
     258
     259/**
     260 * Returns the presence of an APIC.
     261 *
     262 * @returns true if an APIC is present.
     263 * @param   pVM         The cross context VM structure.
     264 */
     265VMM_INT_DECL(bool) PDMHasApic(PVM pVM)
     266{
     267    return pVM->pdm.s.Apic.CTX_SUFF(pDevIns) != NULL;
    256268}
    257269
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r64655 r64663  
    16081608        return VINF_SUCCESS;
    16091609
    1610     bool    fPendingIntr;
    1611     uint8_t u8Tpr;
    1612     int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
    1613     AssertRCReturn(rc, rc);
    1614 
    1615     /* Assume that we need to trap all TPR accesses and thus need not check on
    1616        every #VMEXIT if we should update the TPR. */
    1617     Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
    1618     pVCpu->hm.s.svm.fSyncVTpr = false;
    1619 
    1620     /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
    1621     if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
    1622     {
    1623         pCtx->msrLSTAR = u8Tpr;
    1624 
    1625         /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
    1626         if (fPendingIntr)
    1627             hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1610    int rc = VINF_SUCCESS;
     1611    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1612    if (   PDMHasApic(pVM)
     1613        && APICIsEnabled(pVCpu))
     1614    {
     1615        bool    fPendingIntr;
     1616        uint8_t u8Tpr;
     1617        rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
     1618        AssertRCReturn(rc, rc);
     1619
     1620        /* Assume that we need to trap all TPR accesses and thus need not check on
     1621           every #VMEXIT if we should update the TPR. */
     1622        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
     1623        pVCpu->hm.s.svm.fSyncVTpr = false;
     1624
     1625        /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
     1626        if (pVM->hm.s.fTPRPatchingActive)
     1627        {
     1628            pCtx->msrLSTAR = u8Tpr;
     1629
     1630            /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
     1631            if (fPendingIntr)
     1632                hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1633            else
     1634            {
     1635                hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1636                pVCpu->hm.s.svm.fSyncVTpr = true;
     1637            }
     1638        }
    16281639        else
    16291640        {
    1630             hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1631             pVCpu->hm.s.svm.fSyncVTpr = true;
    1632         }
    1633     }
    1634     else
    1635     {
    1636         /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
    1637         pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
    1638 
    1639         /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
    1640         if (fPendingIntr)
    1641             pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
    1642         else
    1643         {
    1644             pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
    1645             pVCpu->hm.s.svm.fSyncVTpr = true;
    1646         }
    1647 
    1648         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     1641            /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
     1642            pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
     1643
     1644            /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
     1645            if (fPendingIntr)
     1646                pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
     1647            else
     1648            {
     1649                pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
     1650                pVCpu->hm.s.svm.fSyncVTpr = true;
     1651            }
     1652
     1653            pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     1654        }
    16491655    }
    16501656
     
    50895095        && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    50905096    {
    5091         RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
     5097        RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
    50925098        GCPhysApicBase &= PAGE_BASE_GC_MASK;
    50935099
     
    53535359    {
    53545360        RTGCPHYS GCPhysApicBase;
    5355         GCPhysApicBase  = pCtx->msrApicBase;
     5361        GCPhysApicBase  = APICGetBaseMsrNoCheck(pVCpu);
    53565362        GCPhysApicBase &= PAGE_BASE_GC_MASK;
    53575363
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r64655 r64663  
    34093409    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
    34103410    {
    3411         /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
    3412         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    3413         {
    3414             Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
    3415 
    3416             bool    fPendingIntr  = false;
    3417             uint8_t u8Tpr         = 0;
    3418             uint8_t u8PendingIntr = 0;
    3419             rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
    3420             AssertRCReturn(rc, rc);
    3421 
    3422             /*
    3423              * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
    3424              * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
    3425              * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
    3426              * the interrupt when we VM-exit for other reasons.
    3427              */
    3428             pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr;            /* Offset 0x80 is TPR in the APIC MMIO range. */
    3429             uint32_t u32TprThreshold = 0;
    3430             if (fPendingIntr)
     3411        if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
     3412            && APICIsEnabled(pVCpu))
     3413        {
     3414            /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
     3415            if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    34313416            {
    3432                 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
    3433                 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
    3434                 const uint8_t u8TprPriority     = (u8Tpr >> 4) & 0xf;
    3435                 if (u8PendingPriority <= u8TprPriority)
    3436                     u32TprThreshold = u8PendingPriority;
    3437                 else
    3438                     u32TprThreshold = u8TprPriority;             /* Required for Vista 64-bit guest, see @bugref{6398}. */
     3417                Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
     3418
     3419                bool    fPendingIntr  = false;
     3420                uint8_t u8Tpr         = 0;
     3421                uint8_t u8PendingIntr = 0;
     3422                rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
     3423                AssertRCReturn(rc, rc);
     3424
     3425                /*
     3426                 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
     3427                 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
     3428                 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
     3429                 * the interrupt when we VM-exit for other reasons.
     3430                 */
     3431                pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr;            /* Offset 0x80 is TPR in the APIC MMIO range. */
     3432                uint32_t u32TprThreshold = 0;
     3433                if (fPendingIntr)
     3434                {
     3435                    /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
     3436                    const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
     3437                    const uint8_t u8TprPriority     = (u8Tpr >> 4) & 0xf;
     3438                    if (u8PendingPriority <= u8TprPriority)
     3439                        u32TprThreshold = u8PendingPriority;
     3440                    else
     3441                        u32TprThreshold = u8TprPriority;             /* Required for Vista 64-bit guest, see @bugref{6398}. */
     3442                }
     3443
     3444                rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
     3445                AssertRCReturn(rc, rc);
    34393446            }
    34403447
    3441             rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
    3442             AssertRCReturn(rc, rc);
    3443         }
    3444 
     3448#ifndef IEM_VERIFICATION_MODE_FULL
     3449            /* Setup the Virtualized APIC accesses. */
     3450            if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     3451            {
     3452                uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
     3453                if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase)
     3454                {
     3455                    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3456                    Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     3457                    RTGCPHYS GCPhysApicBase;
     3458                    GCPhysApicBase  = u64MsrApicBase;
     3459                    GCPhysApicBase &= PAGE_BASE_GC_MASK;
     3460
     3461                    /* Unalias any existing mapping. */
     3462                    rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
     3463                    AssertRCReturn(rc, rc);
     3464
     3465                    /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
     3466                    Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));
     3467                    rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
     3468                    AssertRCReturn(rc, rc);
     3469
     3470                    /* Update VMX's cache of the APIC base. */
     3471                    pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
     3472                }
     3473            }
     3474#endif /* !IEM_VERIFICATION_MODE_FULL */
     3475        }
    34453476        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    34463477    }
     3478
    34473479    return rc;
    34483480}
     
    86028634    else
    86038635        return rcStrict;
    8604 
    8605     /** @todo r=ramshankar: Why can't we do this when the APIC base changes
    8606      *        in hmR0VmxLoadGuestApicState()? Also we can stop caching the
    8607      *        APIC base in several places just for HM usage and just take the
    8608      *        function call hit in load-guest state. */
    8609 #ifndef IEM_VERIFICATION_MODE_FULL
    8610     /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
    8611     if (   pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
    8612         && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    8613     {
    8614         Assert(pVM->hm.s.vmx.HCPhysApicAccess);
    8615         RTGCPHYS GCPhysApicBase;
    8616         GCPhysApicBase  = pMixedCtx->msrApicBase;
    8617         GCPhysApicBase &= PAGE_BASE_GC_MASK;
    8618 
    8619         /* Unalias any existing mapping. */
    8620         int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
    8621         AssertRCReturn(rc, rc);
    8622 
    8623         /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
    8624         Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));
    8625         rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
    8626         AssertRCReturn(rc, rc);
    8627 
    8628         pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
    8629     }
    8630 #endif /* !IEM_VERIFICATION_MODE_FULL */
    86318636
    86328637    if (TRPMHasTrap(pVCpu))
     
    1209312098
    1209412099        /* If this is an X2APIC WRMSR access, update the APIC state as well. */
    12095         if (   pMixedCtx->ecx >= MSR_IA32_X2APIC_START
    12096             && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
     12100        if (    pMixedCtx->ecx == MSR_IA32_APICBASE
     12101            || (   pMixedCtx->ecx >= MSR_IA32_X2APIC_START
     12102                && pMixedCtx->ecx <= MSR_IA32_X2APIC_END))
    1209712103        {
    1209812104            /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
     
    1269812704                      ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1269912705
    12700             RTGCPHYS GCPhys = pMixedCtx->msrApicBase;   /* Always up-to-date, msrApicBase is not part of the VMCS. */
     12706            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
    1270112707            GCPhys &= PAGE_BASE_GC_MASK;
    1270212708            GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
  • trunk/src/VBox/VMM/VMMR3/APIC.cpp

    r64655 r64663  
    16661666    {
    16671667        case PDMAPICMODE_NONE:
    1668             /** @todo permanently disabling the APIC won't really work (needs
    1669              *        fixing in HM, CPUM, PDM and possibly other places). See
    1670              *        @bugref{8353}. */
    1671             return VMR3SetError(pVM->pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "APIC mode 'none' is not supported yet.");
     1668            LogRel(("APIC: APIC maximum mode configured as 'None', effectively disabled/not-present!\n"));
    16721669        case PDMAPICMODE_APIC:
    16731670        case PDMAPICMODE_X2APIC:
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r64655 r64663  
    262262    SSMFIELD_ENTRY(         CPUMCTX, msrSFMASK),
    263263    SSMFIELD_ENTRY(         CPUMCTX, msrKERNELGSBASE),
    264     /* msrApicBase is not included here, it resides in the APIC device state. */
    265264    SSMFIELD_ENTRY(         CPUMCTX, ldtr.Sel),
    266265    SSMFIELD_ENTRY(         CPUMCTX, ldtr.ValidSel),
     
    11521151    /* C-state control. Guesses. */
    11531152    pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
    1154 
    1155 
    1156     /*
    1157      * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
    1158      * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
    1159      */
    1160     pCtx->msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    1161     LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", pVCpu->idCpu, pVCpu->cpum.s.Guest.msrApicBase));
    11621153}
    11631154
     
    16061597        /* Notify PGM of the NXE states in case they've changed. */
    16071598        PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
    1608 
    1609         /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
    1610         pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    1611         LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", idCpu, pVCpu->cpum.s.Guest.msrApicBase));
    16121599
    16131600        /* During init. this is done in CPUMR3InitCompleted(). */
     
    25242511        }
    25252512
    2526         case VMINITCOMPLETED_RING0:
    2527         {
    2528             /* Cache the APIC base (from the APIC device) once it has been initialized. */
    2529             for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2530             {
    2531                 PVMCPU pVCpu = &pVM->aCpus[i];
    2532                 pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    2533                 LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", i, pVCpu->cpum.s.Guest.msrApicBase));
    2534             }
    2535             break;
    2536         }
    2537 
    25382513        default:
    25392514            break;
  • trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp

    r62478 r64663  
    5858#include <VBox/vmm/cpum.h>
    5959#include <VBox/vmm/pgm.h>
     60#include <VBox/vmm/apic.h>
    6061#include <VBox/vmm/dbgf.h>
    6162#include <VBox/vmm/dbgfcorefmt.h>
     
    314315 * Gets the guest-CPU context suitable for dumping into the core file.
    315316 *
    316  * @param   pVM         The cross context VM structure.
    317  * @param   pCtx        Pointer to the guest-CPU context.
     317 * @param   pVCpu       The cross context virtual CPU structure.
    318318 * @param   pDbgfCpu    Where to dump the guest-CPU data.
    319319 */
    320 static void dbgfR3GetCoreCpu(PVM pVM, PCPUMCTX pCtx, PDBGFCORECPU pDbgfCpu)
     320static void dbgfR3GetCoreCpu(PVMCPU pVCpu, PDBGFCORECPU pDbgfCpu)
    321321{
    322322#define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \
     
    328328    } while (0)
    329329
     330    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
     331    PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    330332    pDbgfCpu->rax             = pCtx->rax;
    331333    pDbgfCpu->rbx             = pCtx->rbx;
     
    375377    pDbgfCpu->msrSFMASK       = pCtx->msrSFMASK;
    376378    pDbgfCpu->msrKernelGSBase = pCtx->msrKERNELGSBASE;
    377     pDbgfCpu->msrApicBase     = pCtx->msrApicBase;
     379    pDbgfCpu->msrApicBase     = APICGetBaseMsrNoCheck(pVCpu);
    378380    pDbgfCpu->aXcr[0]         = pCtx->aXcr[0];
    379381    pDbgfCpu->aXcr[1]         = pCtx->aXcr[1];
     
    506508    /*
    507509     * Write the CPU context note headers and data.
     510     * We allocate the DBGFCORECPU struct. rather than using the stack as it can be pretty large due to X86XSAVEAREA.
    508511     */
    509512    Assert(RTFileTell(hFile) == offCpuDumps);
     
    517520    for (uint32_t iCpu = 0; iCpu < pVM->cCpus; iCpu++)
    518521    {
    519         PVMCPU      pVCpu = &pVM->aCpus[iCpu];
    520         PCPUMCTX    pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
    521         if (RT_UNLIKELY(!pCtx))
    522         {
    523             LogRel((DBGFLOG_NAME ": CPUMQueryGuestCtxPtr failed for vCPU[%u]\n", iCpu));
    524             RTMemFree(pDbgfCoreCpu);
    525             return VERR_INVALID_POINTER;
    526         }
    527 
     522        PVMCPU pVCpu = &pVM->aCpus[iCpu];
    528523        RT_BZERO(pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
    529         dbgfR3GetCoreCpu(pVM, pCtx, pDbgfCoreCpu);
     524        dbgfR3GetCoreCpu(pVCpu, pDbgfCoreCpu);
     525
    530526        rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, g_pcszCoreVBoxCpu, pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
    531527        if (RT_FAILURE(rc))
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r62478 r64663  
    219219    .Guest.msrSFMASK          resb    8
    220220    .Guest.msrKERNELGSBASE    resb    8
    221     .Guest.msrApicBase        resb    8
     221    .Guest.uMsrPadding0       resb    8
    222222    .Guest.aXcr               resq    2
    223223    .Guest.fXStateMask        resq    1
     
    477477    .Hyper.msrSFMASK          resb    8
    478478    .Hyper.msrKERNELGSBASE    resb    8
    479     .Hyper.msrApicBase        resb    8
     479    .Hyper.uMsrPadding0       resb    8
    480480    .Hyper.aXcr               resq    2
    481481    .Hyper.fXStateMask        resq    1
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r64655 r64663  
    183183    GEN_CHECK_OFF(CPUMCTX, msrSFMASK);
    184184    GEN_CHECK_OFF(CPUMCTX, msrKERNELGSBASE);
    185     GEN_CHECK_OFF(CPUMCTX, msrApicBase);
    186185    GEN_CHECK_OFF(CPUMCTX, ldtr);
    187186    GEN_CHECK_OFF(CPUMCTX, tr);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette