Index: /trunk/include/VBox/vmm/apic.h
===================================================================
--- /trunk/include/VBox/vmm/apic.h	(revision 64662)
+++ /trunk/include/VBox/vmm/apic.h	(revision 64663)
@@ -176,4 +176,5 @@
 
 /* These functions are VMM internal. */
+VMM_INT_DECL(bool)          APICIsEnabled(PVMCPU pVCpu);
 VMM_INT_DECL(bool)          APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr);
 VMM_INT_DECL(bool)          APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr);
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 64662)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 64663)
@@ -245,5 +245,5 @@
     .msrSFMASK          resb    8
     .msrKERNELGSBASE    resb    8
-    .msrApicBase        resb    8
+    .uMsrPadding0       resb    8
     alignb 8
     .aXcr               resq    2
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 64662)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 64663)
@@ -413,5 +413,5 @@
     uint64_t        msrSFMASK;          /**< syscall flag mask. */
     uint64_t        msrKERNELGSBASE;    /**< swapgs exchange value. */
-    uint64_t        msrApicBase;        /**< The local APIC base (IA32_APIC_BASE MSR). */
+    uint64_t        uMsrPadding0;       /**< no longer used (used to hold a copy of APIC base MSR). */
     /** @} */
 
@@ -479,5 +479,4 @@
 AssertCompileMemberOffset(CPUMCTX,                  msrSFMASK, 528);
 AssertCompileMemberOffset(CPUMCTX,            msrKERNELGSBASE, 536);
-AssertCompileMemberOffset(CPUMCTX,                msrApicBase, 544);
 AssertCompileMemberOffset(CPUMCTX,                       aXcr, 552);
 AssertCompileMemberOffset(CPUMCTX,                fXStateMask, 568);
Index: /trunk/include/VBox/vmm/pdmapi.h
===================================================================
--- /trunk/include/VBox/vmm/pdmapi.h	(revision 64662)
+++ /trunk/include/VBox/vmm/pdmapi.h	(revision 64663)
@@ -48,4 +48,5 @@
 VMMDECL(int)            PDMIsaSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc);
 VMM_INT_DECL(bool)      PDMHasIoApic(PVM pVM);
+VMM_INT_DECL(bool)      PDMHasApic(PVM pVM);
 VMM_INT_DECL(int)       PDMIoApicSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc);
 VMM_INT_DECL(int)       PDMIoApicBroadcastEoi(PVM pVM, uint8_t uVector);
Index: /trunk/src/VBox/VMM/VMMAll/APICAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/APICAll.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMAll/APICAll.cpp	(revision 64663)
@@ -412,6 +412,7 @@
  *
  * @returns true if enabled, false otherwise.
- */
-DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
+ * @param   pVCpu           The cross context virtual CPU structure.
+ */
+VMM_INT_DECL(bool) APICIsEnabled(PVMCPU pVCpu)
 {
     PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
@@ -616,5 +617,5 @@
             {
                 if (   VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
-                    && apicIsEnabled(&pVM->aCpus[idCpu]))
+                    && APICIsEnabled(&pVM->aCpus[idCpu]))
                     fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
             }
@@ -626,5 +627,5 @@
             VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
             if (   idCpu < pVM->cCpus
-                && apicIsEnabled(&pVM->aCpus[idCpu]))
+                && APICIsEnabled(&pVM->aCpus[idCpu]))
                 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
             else
@@ -652,5 +653,5 @@
             {
                 if (   VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
-                    && apicIsEnabled(&pVM->aCpus[idCpu]))
+                    && APICIsEnabled(&pVM->aCpus[idCpu]))
                 {
                     Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
@@ -1879,5 +1880,5 @@
      */
     PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
     { /* likely */ }
     else
@@ -1997,5 +1998,5 @@
      */
     PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
     { /* likely */ }
     else
@@ -2347,5 +2348,5 @@
 VMMDECL(int) APICSetTpr(PVMCPU pVCpu, uint8_t u8Tpr)
 {
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
         return VBOXSTRICTRC_VAL(apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */));
     return VERR_PDM_NO_APIC_INSTANCE;
@@ -2390,5 +2391,5 @@
 {
     VMCPU_ASSERT_EMT(pVCpu);
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
     {
         PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
@@ -2427,5 +2428,5 @@
 
     PVMCPU pVCpu = &pVM->aCpus[0];
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
     {
         PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
@@ -2459,5 +2460,5 @@
      * If the APIC isn't enabled, do nothing and pretend success.
      */
-    if (apicIsEnabled(&pVM->aCpus[0]))
+    if (APICIsEnabled(&pVM->aCpus[0]))
     { /* likely */ }
     else
@@ -2508,5 +2509,5 @@
 
     /* If the APIC is enabled, the interrupt is subject to LVT programming. */
-    if (apicIsEnabled(pVCpu))
+    if (APICIsEnabled(pVCpu))
     {
         PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
@@ -2675,5 +2676,5 @@
 
     PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
-    bool const fApicHwEnabled = apicIsEnabled(pVCpu);
+    bool const fApicHwEnabled = APICIsEnabled(pVCpu);
     if (   fApicHwEnabled
         && pXApicPage->svr.u.fApicSoftwareEnable)
Index: /trunk/src/VBox/VMM/VMMAll/PDMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PDMAll.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMAll/PDMAll.cpp	(revision 64663)
@@ -248,5 +248,5 @@
  * Returns the presence of an IO-APIC.
  *
- * @returns VBox true if an IO-APIC is present.
+ * @returns true if an IO-APIC is present.
  * @param   pVM         The cross context VM structure.
  */
@@ -254,4 +254,16 @@
 {
     return pVM->pdm.s.IoApic.CTX_SUFF(pDevIns) != NULL;
+}
+
+
+/**
+ * Returns the presence of an APIC.
+ *
+ * @returns true if an APIC is present.
+ * @param   pVM         The cross context VM structure.
+ */
+VMM_INT_DECL(bool) PDMHasApic(PVM pVM)
+{
+    return pVM->pdm.s.Apic.CTX_SUFF(pDevIns) != NULL;
 }
 
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 64663)
@@ -1608,43 +1608,49 @@
         return VINF_SUCCESS;
 
-    bool    fPendingIntr;
-    uint8_t u8Tpr;
-    int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
-    AssertRCReturn(rc, rc);
-
-    /* Assume that we need to trap all TPR accesses and thus need not check on
-       every #VMEXIT if we should update the TPR. */
-    Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
-    pVCpu->hm.s.svm.fSyncVTpr = false;
-
-    /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
-    if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
-    {
-        pCtx->msrLSTAR = u8Tpr;
-
-        /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
-        if (fPendingIntr)
-            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
+    int rc = VINF_SUCCESS;
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (   PDMHasApic(pVM)
+        && APICIsEnabled(pVCpu))
+    {
+        bool    fPendingIntr;
+        uint8_t u8Tpr;
+        rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
+        AssertRCReturn(rc, rc);
+
+        /* Assume that we need to trap all TPR accesses and thus need not check on
+           every #VMEXIT if we should update the TPR. */
+        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
+        pVCpu->hm.s.svm.fSyncVTpr = false;
+
+        /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
+        if (pVM->hm.s.fTPRPatchingActive)
+        {
+            pCtx->msrLSTAR = u8Tpr;
+
+            /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
+            if (fPendingIntr)
+                hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
+            else
+            {
+                hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+                pVCpu->hm.s.svm.fSyncVTpr = true;
+            }
+        }
         else
         {
-            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-            pVCpu->hm.s.svm.fSyncVTpr = true;
-        }
-    }
-    else
-    {
-        /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
-        pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
-
-        /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
-        if (fPendingIntr)
-            pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
-        else
-        {
-            pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
-            pVCpu->hm.s.svm.fSyncVTpr = true;
-        }
-
-        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+            /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
+            pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
+
+            /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
+            if (fPendingIntr)
+                pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
+            else
+            {
+                pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
+                pVCpu->hm.s.svm.fSyncVTpr = true;
+            }
+
+            pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
+        }
     }
 
@@ -5089,5 +5095,5 @@
         && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
     {
-        RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
+        RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
         GCPhysApicBase &= PAGE_BASE_GC_MASK;
 
@@ -5353,5 +5359,5 @@
     {
         RTGCPHYS GCPhysApicBase;
-        GCPhysApicBase  = pCtx->msrApicBase;
+        GCPhysApicBase  = APICGetBaseMsrNoCheck(pVCpu);
         GCPhysApicBase &= PAGE_BASE_GC_MASK;
 
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 64663)
@@ -3409,40 +3409,72 @@
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
     {
-        /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
-        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
-        {
-            Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
-
-            bool    fPendingIntr  = false;
-            uint8_t u8Tpr         = 0;
-            uint8_t u8PendingIntr = 0;
-            rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
-            AssertRCReturn(rc, rc);
-
-            /*
-             * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
-             * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
-             * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
-             * the interrupt when we VM-exit for other reasons.
-             */
-            pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr;            /* Offset 0x80 is TPR in the APIC MMIO range. */
-            uint32_t u32TprThreshold = 0;
-            if (fPendingIntr)
+        if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
+            && APICIsEnabled(pVCpu))
+        {
+            /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
+            if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
             {
-                /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
-                const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
-                const uint8_t u8TprPriority     = (u8Tpr >> 4) & 0xf;
-                if (u8PendingPriority <= u8TprPriority)
-                    u32TprThreshold = u8PendingPriority;
-                else
-                    u32TprThreshold = u8TprPriority;             /* Required for Vista 64-bit guest, see @bugref{6398}. */
+                Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
+
+                bool    fPendingIntr  = false;
+                uint8_t u8Tpr         = 0;
+                uint8_t u8PendingIntr = 0;
+                rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
+                AssertRCReturn(rc, rc);
+
+                /*
+                 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
+                 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
+                 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
+                 * the interrupt when we VM-exit for other reasons.
+                 */
+                pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr;            /* Offset 0x80 is TPR in the APIC MMIO range. */
+                uint32_t u32TprThreshold = 0;
+                if (fPendingIntr)
+                {
+                    /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
+                    const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
+                    const uint8_t u8TprPriority     = (u8Tpr >> 4) & 0xf;
+                    if (u8PendingPriority <= u8TprPriority)
+                        u32TprThreshold = u8PendingPriority;
+                    else
+                        u32TprThreshold = u8TprPriority;             /* Required for Vista 64-bit guest, see @bugref{6398}. */
+                }
+
+                rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
+                AssertRCReturn(rc, rc);
             }
 
-            rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
-            AssertRCReturn(rc, rc);
-        }
-
+#ifndef IEM_VERIFICATION_MODE_FULL
+            /* Setup the Virtualized APIC accesses. */
+            if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
+            {
+                uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
+                if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase)
+                {
+                    PVM pVM = pVCpu->CTX_SUFF(pVM);
+                    Assert(pVM->hm.s.vmx.HCPhysApicAccess);
+                    RTGCPHYS GCPhysApicBase;
+                    GCPhysApicBase  = u64MsrApicBase;
+                    GCPhysApicBase &= PAGE_BASE_GC_MASK;
+
+                    /* Unalias any existing mapping. */
+                    rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
+                    AssertRCReturn(rc, rc);
+
+                    /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
+                    Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));
+                    rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
+                    AssertRCReturn(rc, rc);
+
+                    /* Update VMX's cache of the APIC base. */
+                    pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
+                }
+            }
+#endif /* !IEM_VERIFICATION_MODE_FULL */
+        }
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     }
+
     return rc;
 }
@@ -8602,31 +8634,4 @@
     else
         return rcStrict;
-
-    /** @todo r=ramshankar: Why can't we do this when the APIC base changes
-     *        in hmR0VmxLoadGuestApicState()? Also we can stop caching the
-     *        APIC base in several places just for HM usage and just take the
-     *        function call hit in load-guest state. */
-#ifndef IEM_VERIFICATION_MODE_FULL
-    /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
-    if (   pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
-        && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
-    {
-        Assert(pVM->hm.s.vmx.HCPhysApicAccess);
-        RTGCPHYS GCPhysApicBase;
-        GCPhysApicBase  = pMixedCtx->msrApicBase;
-        GCPhysApicBase &= PAGE_BASE_GC_MASK;
-
-        /* Unalias any existing mapping. */
-        int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
-        AssertRCReturn(rc, rc);
-
-        /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
-        Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));
-        rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
-        AssertRCReturn(rc, rc);
-
-        pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
-    }
-#endif /* !IEM_VERIFICATION_MODE_FULL */
 
     if (TRPMHasTrap(pVCpu))
@@ -12093,6 +12098,7 @@
 
         /* If this is an X2APIC WRMSR access, update the APIC state as well. */
-        if (   pMixedCtx->ecx >= MSR_IA32_X2APIC_START
-            && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
+        if (    pMixedCtx->ecx == MSR_IA32_APICBASE
+            || (   pMixedCtx->ecx >= MSR_IA32_X2APIC_START
+                && pMixedCtx->ecx <= MSR_IA32_X2APIC_END))
         {
             /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
@@ -12698,5 +12704,5 @@
                       ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
 
-            RTGCPHYS GCPhys = pMixedCtx->msrApicBase;   /* Always up-to-date, msrApicBase is not part of the VMCS. */
+            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
             GCPhys &= PAGE_BASE_GC_MASK;
             GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
Index: /trunk/src/VBox/VMM/VMMR3/APIC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/APIC.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMR3/APIC.cpp	(revision 64663)
@@ -1666,8 +1666,5 @@
     {
         case PDMAPICMODE_NONE:
-            /** @todo permanently disabling the APIC won't really work (needs
-             *        fixing in HM, CPUM, PDM and possibly other places). See
-             *        @bugref{8353}. */
-            return VMR3SetError(pVM->pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "APIC mode 'none' is not supported yet.");
+            LogRel(("APIC: APIC maximum mode configured as 'None', effectively disabled/not-present!\n"));
         case PDMAPICMODE_APIC:
         case PDMAPICMODE_X2APIC:
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 64663)
@@ -262,5 +262,4 @@
     SSMFIELD_ENTRY(         CPUMCTX, msrSFMASK),
     SSMFIELD_ENTRY(         CPUMCTX, msrKERNELGSBASE),
-    /* msrApicBase is not included here, it resides in the APIC device state. */
     SSMFIELD_ENTRY(         CPUMCTX, ldtr.Sel),
     SSMFIELD_ENTRY(         CPUMCTX, ldtr.ValidSel),
@@ -1152,12 +1151,4 @@
     /* C-state control. Guesses. */
     pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
-
-
-    /*
-     * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
-     * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
-     */
-    pCtx->msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
-    LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", pVCpu->idCpu, pVCpu->cpum.s.Guest.msrApicBase));
 }
 
@@ -1606,8 +1597,4 @@
         /* Notify PGM of the NXE states in case they've changed. */
         PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
-
-        /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
-        pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
-        LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", idCpu, pVCpu->cpum.s.Guest.msrApicBase));
 
         /* During init. this is done in CPUMR3InitCompleted(). */
@@ -2524,16 +2511,4 @@
         }
 
-        case VMINITCOMPLETED_RING0:
-        {
-            /* Cache the APIC base (from the APIC device) once it has been initialized. */
-            for (VMCPUID i = 0; i < pVM->cCpus; i++)
-            {
-                PVMCPU pVCpu = &pVM->aCpus[i];
-                pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);
-                LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", i, pVCpu->cpum.s.Guest.msrApicBase));
-            }
-            break;
-        }
-
         default:
             break;
Index: /trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp	(revision 64662)
+++ /trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp	(revision 64663)
@@ -58,4 +58,5 @@
 #include <VBox/vmm/cpum.h>
 #include <VBox/vmm/pgm.h>
+#include <VBox/vmm/apic.h>
 #include <VBox/vmm/dbgf.h>
 #include <VBox/vmm/dbgfcorefmt.h>
@@ -314,9 +315,8 @@
  * Gets the guest-CPU context suitable for dumping into the core file.
  *
- * @param   pVM         The cross context VM structure.
- * @param   pCtx        Pointer to the guest-CPU context.
+ * @param   pVCpu       The cross context virtual CPU structure.
  * @param   pDbgfCpu    Where to dump the guest-CPU data.
  */
-static void dbgfR3GetCoreCpu(PVM pVM, PCPUMCTX pCtx, PDBGFCORECPU pDbgfCpu)
+static void dbgfR3GetCoreCpu(PVMCPU pVCpu, PDBGFCORECPU pDbgfCpu)
 {
 #define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \
@@ -328,4 +328,6 @@
     } while (0)
 
+    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
+    PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     pDbgfCpu->rax             = pCtx->rax;
     pDbgfCpu->rbx             = pCtx->rbx;
@@ -375,5 +377,5 @@
     pDbgfCpu->msrSFMASK       = pCtx->msrSFMASK;
     pDbgfCpu->msrKernelGSBase = pCtx->msrKERNELGSBASE;
-    pDbgfCpu->msrApicBase     = pCtx->msrApicBase;
+    pDbgfCpu->msrApicBase     = APICGetBaseMsrNoCheck(pVCpu);
     pDbgfCpu->aXcr[0]         = pCtx->aXcr[0];
     pDbgfCpu->aXcr[1]         = pCtx->aXcr[1];
@@ -506,4 +508,5 @@
     /*
      * Write the CPU context note headers and data.
+     * We allocate the DBGFCORECPU struct. rather than using the stack as it can be pretty large due to X86XSAVEAREA.
      */
     Assert(RTFileTell(hFile) == offCpuDumps);
@@ -517,15 +520,8 @@
     for (uint32_t iCpu = 0; iCpu < pVM->cCpus; iCpu++)
     {
-        PVMCPU      pVCpu = &pVM->aCpus[iCpu];
-        PCPUMCTX    pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
-        if (RT_UNLIKELY(!pCtx))
-        {
-            LogRel((DBGFLOG_NAME ": CPUMQueryGuestCtxPtr failed for vCPU[%u]\n", iCpu));
-            RTMemFree(pDbgfCoreCpu);
-            return VERR_INVALID_POINTER;
-        }
-
+        PVMCPU pVCpu = &pVM->aCpus[iCpu];
         RT_BZERO(pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
-        dbgfR3GetCoreCpu(pVM, pCtx, pDbgfCoreCpu);
+        dbgfR3GetCoreCpu(pVCpu, pDbgfCoreCpu);
+
         rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, g_pcszCoreVBoxCpu, pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
         if (RT_FAILURE(rc))
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 64662)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 64663)
@@ -219,5 +219,5 @@
     .Guest.msrSFMASK          resb    8
     .Guest.msrKERNELGSBASE    resb    8
-    .Guest.msrApicBase        resb    8
+    .Guest.uMsrPadding0       resb    8
     .Guest.aXcr               resq    2
     .Guest.fXStateMask        resq    1
@@ -477,5 +477,5 @@
     .Hyper.msrSFMASK          resb    8
     .Hyper.msrKERNELGSBASE    resb    8
-    .Hyper.msrApicBase        resb    8
+    .Hyper.uMsrPadding0       resb    8
     .Hyper.aXcr               resq    2
     .Hyper.fXStateMask        resq    1
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 64662)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 64663)
@@ -183,5 +183,4 @@
     GEN_CHECK_OFF(CPUMCTX, msrSFMASK);
     GEN_CHECK_OFF(CPUMCTX, msrKERNELGSBASE);
-    GEN_CHECK_OFF(CPUMCTX, msrApicBase);
     GEN_CHECK_OFF(CPUMCTX, ldtr);
     GEN_CHECK_OFF(CPUMCTX, tr);
