Index: /trunk/include/VBox/iom.h
===================================================================
--- /trunk/include/VBox/iom.h	(revision 19991)
+++ /trunk/include/VBox/iom.h	(revision 19992)
@@ -215,4 +215,5 @@
 VMMDECL(int)  IOMInterpretCheckPortIOAccess(PVM pVM, PCPUMCTXCORE pCtxCore, RTIOPORT Port, unsigned cb);
 VMMDECL(int)  IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags);
+VMMDECL(int)  IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags);
 VMMDECL(int)  IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys);
 VMMDECL(bool) IOMIsLockOwner(PVM pVM);
Index: /trunk/include/VBox/pgm.h
===================================================================
--- /trunk/include/VBox/pgm.h	(revision 19991)
+++ /trunk/include/VBox/pgm.h	(revision 19992)
@@ -345,4 +345,5 @@
 VMMDECL(int)        PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
 VMMDECL(int)        PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap);
+VMMDECL(int)        PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap);
 VMMDECL(int)        PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys);
 VMMDECL(bool)       PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys);
Index: /trunk/src/VBox/VMM/HWACCMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/HWACCMInternal.h	(revision 19991)
+++ /trunk/src/VBox/VMM/HWACCMInternal.h	(revision 19992)
@@ -259,9 +259,9 @@
         R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
 
-        /** R0 memory object for the virtual APIC mmio cache. */
+        /** R0 memory object for the APIC physical page (serves for filtering accesses). */
         RTR0MEMOBJ                  pMemObjAPIC;
-        /** Physical address of the virtual APIC mmio cache. */
+        /** Physical address of the APIC physical page (serves for filtering accesses). */
         RTHCPHYS                    pAPICPhys;
-        /** Virtual address of the virtual APIC mmio cache. */
+        /** Virtual address of the APIC physical page (serves for filtering accesses). */
         R0PTRTYPE(uint8_t *)        pAPIC;
 
@@ -489,4 +489,14 @@
         /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
         uint64_t                    proc_ctls;
+
+        /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
+        uint64_t                    proc_ctls2;
+
+        /** R0 memory object for the virtual APIC page for TPR caching. */
+        RTR0MEMOBJ                  pMemObjVAPIC;
+        /** Physical address of the virtual APIC page for TPR caching. */
+        RTHCPHYS                    pVAPICPhys;
+        /** Virtual address of the virtual APIC page for TPR caching. */
+        R0PTRTYPE(uint8_t *)        pVAPIC;
 
         /** Current CR0 mask. */
Index: /trunk/src/VBox/VMM/VMMAll/EMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 19991)
+++ /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 19992)
@@ -2903,5 +2903,5 @@
             rc = PDMApicReadMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, &val);
         else
-            /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
+            /* We should actually trigger a #GP here, but don't as that will cause more trouble. */
             val = 0;
         break;
Index: /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp	(revision 19991)
+++ /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp	(revision 19992)
@@ -1884,4 +1884,55 @@
 }
 
+/**
+ * Mapping a HC page in place of an MMIO page for direct access.
+ *
+ * (This is a special optimization used by the APIC in the VT-x case.)
+ *
+ * @returns VBox status code.
+ *
+ * @param   pVM             The virtual machine.
+ * @param   GCPhys          The address of the MMIO page to be changed.
+ * @param   HCPhys          The address of the host physical page.
+ * @param   fPageFlags      Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
+ *                          for the time being.
+ */
+VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
+{
+    Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
+
+    AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
+    Assert(HWACCMIsEnabled(pVM));
+
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+
+    /*
+     * Lookup the context range node the page belongs to.
+     */
+    PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
+    AssertMsgReturn(pRange,
+                    ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
+                    VERR_IOM_MMIO_RANGE_NOT_FOUND);
+    Assert((pRange->GCPhys       & PAGE_OFFSET_MASK) == 0);
+    Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
+
+    /*
+     * Do the aliasing; page align the addresses since PGM is picky.
+     */
+    GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
+    HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
+
+    int rc = PGMHandlerPhysicalPageAliasHC(pVM, pRange->GCPhys, GCPhys, HCPhys);
+    AssertRCReturn(rc, rc);
+
+    /*
+     * Modify the shadow page table. Since it's an MMIO page it won't be present and we
+     * can simply prefetch it.
+     *
+     * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
+     */
+    rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
+    Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
+    return VINF_SUCCESS;
+}
 
 /**
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 19991)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 19992)
@@ -1010,5 +1010,5 @@
                                 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
                                 VERR_PGM_PHYS_NOT_MMIO2);
-                if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPage))
+                if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
                     return VINF_PGM_HANDLER_ALREADY_ALIASED;
 
@@ -1035,4 +1035,99 @@
             PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
             LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
+
+#ifndef IN_RC
+            HWACCMInvalidatePhysPage(pVM, GCPhysPage);
+#endif
+            return VINF_SUCCESS;
+        }
+
+        AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
+                         GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
+        return VERR_INVALID_PARAMETER;
+    }
+
+    AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
+    return VERR_PGM_HANDLER_NOT_FOUND;
+}
+
+/**
+ * Replaces an MMIO page with an arbitrary HC page.
+ *
+ * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
+ * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
+ * backing, the caller must provide a replacement page. For various reasons the
+ * replacement page must be an MMIO2 page.
+ *
+ * The caller must do required page table modifications. You can get away
+ * without making any modifations since it's an MMIO page, the cost is an extra
+ * \#PF which will the resync the page.
+ *
+ * Call PGMHandlerPhysicalReset() to restore the MMIO page.
+ *
+ * The caller may still get handler callback even after this call and must be
+ * able to deal correctly with such calls. The reason for these callbacks are
+ * either that we're executing in the recompiler (which doesn't know about this
+ * arrangement) or that we've been restored from saved state (where we won't
+ * save the change).
+ *
+ * @returns VBox status code.
+ * @param   pVM                 The VM handle
+ * @param   GCPhys              The start address of the access handler. This
+ *                              must be a fully page aligned range or we risk
+ *                              messing up other handlers installed for the
+ *                              start and end pages.
+ * @param   GCPhysPage          The physical address of the page to turn off
+ *                              access monitoring for.
+ * @param   HCPhysPageRemap     The physical address of the HC page that
+ *                              serves as backing memory.
+ *
+ * @remark  May cause a page pool flush if used on a page that is already
+ *          aliased.
+ */
+VMMDECL(int)  PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
+{
+///    Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
+
+    /*
+     * Lookup and validate the range.
+     */
+    PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
+    if (RT_LIKELY(pCur))
+    {
+        if (RT_LIKELY(    GCPhysPage >= pCur->Core.Key
+                      &&  GCPhysPage <= pCur->Core.KeyLast))
+        {
+            AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED);
+            AssertReturn(!(pCur->Core.Key & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+            AssertReturn((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, VERR_INVALID_PARAMETER);
+
+            /*
+             * Get and validate the pages.
+             */
+            PPGMPAGE pPage;
+            int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
+            AssertRCReturn(rc, rc);
+            if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
+            {
+                AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
+                                ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
+                                VERR_PGM_PHYS_NOT_MMIO2);
+                return VINF_PGM_HANDLER_ALREADY_ALIASED;
+            }
+            Assert(PGM_PAGE_IS_ZERO(pPage));
+
+            /*
+             * Do the actual remapping here.
+             * This page now serves as an alias for the backing memory specified.
+             */
+            LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %HGp\n",
+                     GCPhysPage, pPage, HCPhysPageRemap));
+            PGM_PAGE_SET_HCPHYS(pPage, HCPhysPageRemap);
+            PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
+            PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
+            /** @todo hack alert */
+            PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
+            PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
+            LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
 
 #ifndef IN_RC
Index: /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 19991)
+++ /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 19992)
@@ -769,5 +769,5 @@
 
     /* 64 bits guest mode? */
-    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (CPUMIsGuestInLongModeEx(pCtx))
     {
 #if !defined(VBOX_ENABLE_64_BITS_GUESTS)
@@ -942,5 +942,5 @@
     /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */
     /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */
-    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (CPUMIsGuestInLongModeEx(pCtx))
     {
         bool fPending;
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 19991)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 19992)
@@ -180,5 +180,5 @@
     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     {
-        /* Allocate one page for the virtual APIC mmio cache. */
+        /* Allocate one page for the APIC physical page (serves for filtering accesses). */
         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
         AssertRC(rc);
@@ -245,4 +245,14 @@
         pVCpu->hwaccm.s.vmx.cr0_mask = 0;
         pVCpu->hwaccm.s.vmx.cr4_mask = 0;
+
+        /* Allocate one page for the virtual APIC page for TPR caching. */
+        rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
+        AssertRC(rc);
+        if (RT_FAILURE(rc))
+            return rc;
+
+        pVCpu->hwaccm.s.vmx.pVAPIC     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjVAPIC);
+        pVCpu->hwaccm.s.vmx.pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0);
+        ASMMemZero32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE);
 
         /* Current guest paging mode. */
@@ -267,10 +277,19 @@
     for (unsigned i=0;i<pVM->cCPUs;i++)
     {
-        if (pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS != NIL_RTR0MEMOBJ)
-        {
-            RTR0MemObjFree(pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS, false);
-            pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ;
-            pVM->aCpus[i].hwaccm.s.vmx.pVMCS       = 0;
-            pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys   = 0;
+        PVMCPU pVCpu = &pVM->aCpus[i];
+
+        if (pVCpu->hwaccm.s.vmx.pMemObjVMCS != NIL_RTR0MEMOBJ)
+        {
+            RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjVMCS, false);
+            pVCpu->hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ;
+            pVCpu->hwaccm.s.vmx.pVMCS       = 0;
+            pVCpu->hwaccm.s.vmx.pVMCSPhys   = 0;
+        }
+        if (pVCpu->hwaccm.s.vmx.pMemObjVAPIC != NIL_RTR0MEMOBJ)
+        {
+            RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, false);
+            pVCpu->hwaccm.s.vmx.pMemObjVAPIC = NIL_RTR0MEMOBJ;
+            pVCpu->hwaccm.s.vmx.pVAPIC       = 0;
+            pVCpu->hwaccm.s.vmx.pVAPICPhys   = 0;
         }
     }
@@ -415,5 +434,5 @@
             /** @todo make sure they don't conflict with the above requirements. */
             val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
-
+            pVCpu->hwaccm.s.vmx.proc_ctls2 = val;
             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val);
             AssertRC(rc);
@@ -475,5 +494,9 @@
             /* Optional */
             rc  = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0);
-            rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys);
+            rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx.pVAPICPhys);
+
+            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
+                rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys);
+
             AssertRC(rc);
         }
@@ -1142,5 +1165,5 @@
 #endif
     /* 64 bits guest mode? */
-    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (CPUMIsGuestInLongModeEx(pCtx))
         val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
     /* else Must be zero when AMD64 is not available. */
@@ -1168,5 +1191,5 @@
     /* else: Must be zero when AMD64 is not available. */
 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
-    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (CPUMIsGuestInLongModeEx(pCtx))
         val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;      /* our switcher goes to long mode */
     else
@@ -1623,5 +1646,5 @@
 
     /* 64 bits guest mode? */
-    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (CPUMIsGuestInLongModeEx(pCtx))
     {
 #if !defined(VBOX_ENABLE_64_BITS_GUESTS)
@@ -2140,5 +2163,5 @@
      * @todo reduce overhead
      */
-    if (   (pCtx->msrEFER & MSR_K6_EFER_LMA)
+    if (    CPUMIsGuestInLongModeEx(pCtx)
         &&  pVM->hwaccm.s.vmx.pAPIC)
     {
@@ -2904,6 +2927,25 @@
         /* If the page is present, then it's a page level protection fault. */
         if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
+        {
             errCode |= X86_TRAP_PF_P;
 
+#if 0
+            /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
+            if (    (GCPhys & 0xfff) == 0x080
+                &&  GCPhys > 0x1000000  /* to skip VGA frame buffer accesses */
+                &&  !CPUMIsGuestInLongModeEx(pCtx)
+                &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
+            {
+                RTGCPHYS GCPhysApicBase;
+                PDMApicGetBase(pVM, &GCPhysApicBase);   /* @todo cache this */
+                if (GCPhys == GCPhysApicBase + 0x80)
+                {
+                    pVCpu->hwaccm.s.vmx.proc_ctls2 |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
+                    rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val);
+                    AssertRC(rc);
+                }
+            }
+#endif
+        }
         LogFlow(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
 
@@ -3376,4 +3418,6 @@
             PDMApicGetBase(pVM, &GCPhys);
             GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification);
+
+            Log(("Apic access at %RGp\n", GCPhys));
             rc = VINF_EM_RAW_EMULATE_INSTR;
             break;
@@ -4114,4 +4158,5 @@
     case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
     case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
+    case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
     case VMX_VMCS_GUEST_LINK_PTR_FULL:
     case VMX_VMCS_GUEST_PDPTR0_FULL:
