Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 65221)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 65222)
@@ -3403,4 +3403,6 @@
  *                      out-of-sync. Make sure to update the required fields
  *                      before using them.
+ *
+ * @remarks Can cause longjumps!!!
  */
 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
@@ -3414,4 +3416,7 @@
             && APICIsEnabled(pVCpu))
         {
+            /*
+             * Setup TPR shadowing.
+             */
             if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
             {
@@ -3445,5 +3450,10 @@
 
 #ifndef IEM_VERIFICATION_MODE_FULL
-            /* Setup the Virtualized APIC accesses. */
+            /*
+             * Setup the virtualized-APIC accesses.
+             *
+             * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock
+             * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
+             */
             if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
             {
@@ -3451,4 +3461,5 @@
                 if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase)
                 {
+                    /* We only care about the APIC base MSR address and not the other bits. */
                     PVM pVM = pVCpu->CTX_SUFF(pVM);
                     Assert(pVM->hm.s.vmx.HCPhysApicAccess);
@@ -3457,14 +3468,27 @@
                     GCPhysApicBase &= PAGE_BASE_GC_MASK;
 
-                    /* Unalias any existing mapping. */
-                    rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
-                    AssertRCReturn(rc, rc);
-
-                    /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
-                    Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));
-                    rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
-                    AssertRCReturn(rc, rc);
-
-                    /* Update VMX's cache of the APIC base. */
+                    /*
+                     * We only need a single HC page as the APIC-access page for all VCPUs as it's used
+                     * purely for causing VM-exits and not for data access within the actual page.
+                     *
+                     * The following check ensures we do the mapping on a per-VM basis as our APIC code
+                     * does not allow different APICs to be mapped at different addresses on different VCPUs.
+                     *
+                     * In fact, we do not support remapping of the APIC base at all, see APICSetBaseMsr()
+                     * so we just map this once per-VM.
+                     */
+                    if (ASMAtomicCmpXchgU64(&pVM->hm.s.vmx.GCPhysApicBase, GCPhysApicBase, 0 /* u64Old */))
+                    {
+                        /* Unalias any existing mapping. */
+                        rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
+                        AssertRCReturn(rc, rc);
+
+                        /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
+                        Log4(("HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase));
+                        rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
+                        AssertRCReturn(rc, rc);
+                    }
+
+                    /* Update the per-VCPU cache of the APIC base MSR. */
                     pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
                 }
@@ -8386,7 +8410,4 @@
  *                      out-of-sync. Make sure to update the required fields
  *                      before using them.
- *
- * @remarks No-long-jump zone!!!  (Disables and enables long jmps for itself,
- *          caller disables then again on successfull return.  Confusing.)
  */
 static VBOXSTRICTRC hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
@@ -8396,7 +8417,4 @@
     AssertPtr(pMixedCtx);
     HMVMX_ASSERT_PREEMPT_SAFE();
-
-    VMMRZCallRing3Disable(pVCpu);
-    Assert(VMMR0IsLogFlushDisabled(pVCpu));
 
     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
@@ -8465,6 +8483,4 @@
     /* Clear any unused and reserved bits. */
     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
-
-    VMMRZCallRing3Enable(pVCpu);
 
     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 65221)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 65222)
@@ -461,4 +461,7 @@
         /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
         R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
+
+        /** The guest's MSR APIC base address at which the APIC access page is mapped. */
+        RTGCPHYS volatile           GCPhysApicBase;
 
         /** Physical address of the APIC-access page. */
