Index: /trunk/src/VBox/VMM/PGMInline.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInline.h	(revision 31177)
+++ /trunk/src/VBox/VMM/PGMInline.h	(revision 31178)
@@ -329,12 +329,83 @@
  *
  * @returns See PGMDynMapGCPage.
+ * @param   pVM         The VM handle.
  * @param   pVCpu       The current CPU.
  * @param   GCPhys      The guest physical address of the page.
  * @param   ppv         Where to store the mapping address.
  */
-DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
+DECLINLINE(int) pgmR0DynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
 {
     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
+
+    /*
+     * Get the ram range.
+     */
+    PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
+    RTGCPHYS        off  = GCPhys - pRam->GCPhys;
+    if (RT_UNLIKELY(off >= pRam->cb
+        /** @todo   || page state stuff */))
+    {
+        /* This case is not counted into StatR0DynMapGCPageInl. */
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
+        return PGMDynMapGCPage(pVM, GCPhys, ppv);
+    }
+
+    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
+
+    /*
+     * pgmR0DynMapHCPageInlined with out stats.
+     */
+    PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
+    Assert(!(HCPhys & PAGE_OFFSET_MASK));
+    Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
+
+    unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
+    unsigned    iEntry  = pSet->aiHashTable[iHash];
+    if (    iEntry < pSet->cEntries
+        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
+    {
+        *ppv = pSet->aEntries[iEntry].pvPage;
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
+    }
+    else
+    {
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
+        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
+    }
+
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
+ * access to pages already in the set.
+ *
+ * @returns See PGMDynMapGCPage.
+ * @param   pVCpu       The current CPU.
+ * @param   GCPhys      The guest physical address of the page.
+ * @param   ppv         Where to store the mapping address.
+ */
+DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
+{
+    return pgmR0DynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv);
+}
+
+
+/**
+ * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
+ * access to pages already in the set.
+ *
+ * @returns See PGMDynMapGCPage.
+ * @param   pVCpu       The current CPU.
+ * @param   HCPhys      The physical address of the page.
+ * @param   ppv         Where to store the mapping address.
+ */
+DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
+{
+    STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
 
     /*
@@ -349,60 +420,4 @@
         /* This case is not counted into StatR0DynMapGCPageInl. */
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
-        return PGMDynMapGCPage(pVM, GCPhys, ppv);
-    }
-
-    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
-
-    /*
-     * pgmR0DynMapHCPageInlined with out stats.
-     */
-    PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
-    Assert(!(HCPhys & PAGE_OFFSET_MASK));
-    Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
-
-    unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
-    unsigned    iEntry  = pSet->aiHashTable[iHash];
-    if (    iEntry < pSet->cEntries
-        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
-    {
-        *ppv = pSet->aEntries[iEntry].pvPage;
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
-    }
-    else
-    {
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
-        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
-    }
-
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
- * access to pages already in the set.
- *
- * @returns See PGMDynMapGCPage.
- * @param   pVCpu       The current CPU.
- * @param   HCPhys      The physical address of the page.
- * @param   ppv         Where to store the mapping address.
- */
-DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
-{
-    STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
-
-    /*
-     * Get the ram range.
-     */
-    PVM             pVM  = pVCpu->CTX_SUFF(pVM);
-    PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
-    RTGCPHYS        off  = GCPhys - pRam->GCPhys;
-    if (RT_UNLIKELY(off >= pRam->cb
-        /** @todo   || page state stuff */))
-    {
-        /* This case is not counted into StatR0DynMapGCPageInl. */
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
         return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
     }
@@ -461,4 +476,29 @@
     }
     AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
+}
+
+/**
+ * Maps the page into current context (RC and maybe R0).
+ *
+ * @returns pointer to the mapping.
+ * @param   pVM         Pointer to the PGM instance data.
+ * @param   pVCpu       The current CPU.
+ * @param   pPage       The page.
+ */
+DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage)
+{
+    if (pPage->idx >= PGMPOOL_IDX_FIRST)
+    {
+        Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
+        void *pv;
+# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+        Assert(pVCpu == VMMGetCpu(pVM));
+        pgmR0DynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv);
+# else
+        PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
+# endif
+        return pv;
+    }
+    AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
 }
 
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 31177)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 31178)
@@ -228,21 +228,47 @@
  *
  * @returns VBox status code.
+ * @param   pVM         The VM handle.
+ * @param   pVCpu       The current CPU.
+ * @param   HCPhys      The HC physical address to map to a virtual one.
+ * @param   ppv         Where to store the virtual address. No need to cast
+ *                      this.
+ *
+ * @remark  In RC this uses PGMDynMapHCPage(), so it will consume of the small
+ *          page window employeed by that function. Be careful.
+ * @remark  There is no need to assert on the result.
+ */
+#ifdef IN_RC
+# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
+     PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
+#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
+     pgmR0DynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv))
+#else
+# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
+     MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
+#endif
+
+/** @def PGM_GCPHYS_2_PTR_V2
+ * Maps a GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
  * @param   pVM     The VM handle.
- * @param   HCPhys  The HC physical address to map to a virtual one.
+ * @param   pVCpu   The current CPU.
+ * @param   GCPhys  The GC physical address to map to a virtual one.
  * @param   ppv     Where to store the virtual address. No need to cast this.
  *
- * @remark  In GC this uses PGMGCDynMapHCPage(), so it will consume of the
+ * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
  *          small page window employeed by that function. Be careful.
  * @remark  There is no need to assert on the result.
  */
 #ifdef IN_RC
-# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
-     PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
+# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
+     PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
-     pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, (void **)(ppv))
+# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
+     pgmR0DynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv))
 #else
-# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
-     MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
+# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
+     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
 #endif
 
@@ -259,14 +285,5 @@
  * @remark  There is no need to assert on the result.
  */
-#ifdef IN_RC
-# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
-     PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
-     pgmR0DynMapGCPageInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv))
-#else
-# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
-     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
-#endif
+#define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2(pVM, VMMGetCpu(pVM), GCPhys, ppv)
 
 /** @def PGM_GCPHYS_2_PTR_BY_VMCPU
@@ -282,11 +299,5 @@
  * @remark  There is no need to assert on the result.
  */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-# define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
-     pgmR0DynMapGCPageInlined(pVCpu, GCPhys, (void **)(ppv))
-#else
-# define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
-     PGM_GCPHYS_2_PTR((pVCpu)->CTX_SUFF(pVM), GCPhys, ppv)
-#endif
+#define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2((pVCpu)->CTX_SUFF(pVM), pVCpu, GCPhys, ppv)
 
 /** @def PGM_GCPHYS_2_PTR_EX
@@ -2168,7 +2179,7 @@
  */
 #if defined(IN_RC)
-# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageInlined((pVM), (pPage))
+# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageInlined((pVM), (pPage))
+# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
 #else
 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   PGMPOOL_PAGE_2_PTR((pVM), (pPage))
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 31177)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 31178)
@@ -1827,5 +1827,5 @@
                  */
                 PGSTPT pPTSrc;
-                int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
+                int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
                 if (RT_SUCCESS(rc))
                 {
@@ -2332,5 +2332,5 @@
      */
     PGSTPT pPTSrc;
-    int rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
+    int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
     if (RT_FAILURE(rc))
     {
@@ -3903,5 +3903,5 @@
                         */
                         const GSTPT *pPTSrc;
-                        rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
+                        rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
                         if (RT_FAILURE(rc))
                         {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 31177)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 31178)
@@ -501,5 +501,5 @@
                  */
                 PGSTPT pPT;
-                int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
+                int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, Pde.u & GST_PDE_PG_MASK, &pPT);
                 if (RT_SUCCESS(rc))
                 {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 31177)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 31178)
@@ -3083,5 +3083,5 @@
             /** @todo we should check reserved bits ... */
             void *pvSrc;
-            rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
+            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
             switch (rc)
             {
@@ -3125,5 +3125,5 @@
             AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
             void *pvSrc1;
-            rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
+            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
             switch (rc)
             {
@@ -3139,5 +3139,5 @@
 
             void *pvSrc2;
-            rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
+            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
             switch (rc)
             {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h	(revision 31177)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h	(revision 31178)
@@ -157,5 +157,5 @@
     /* PDPT */
     PX86PDPT        pPDPT;
-    int rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
+    int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
     if (RT_FAILURE(rc))
         return rc;
@@ -167,5 +167,5 @@
     /* PD */
     PX86PDPAE       pPd;
-    rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
+    rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
     if (RT_FAILURE(rc))
         return rc;
@@ -231,5 +231,5 @@
     if (!(Pde.u & PGM_PDFLAGS_MAPPING))
     {
-        int rc2 = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
+        int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
         if (RT_FAILURE(rc2))
             return rc2;
@@ -323,5 +323,5 @@
         /* PDPT */
         PX86PDPT        pPDPT;
-        rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
+        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
         if (RT_FAILURE(rc))
             return rc;
@@ -333,5 +333,5 @@
         /* PD */
         PX86PDPAE       pPd;
-        rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
+        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
         if (RT_FAILURE(rc))
             return rc;
@@ -368,5 +368,5 @@
          */
         PSHWPT          pPT;
-        rc = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
+        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
         if (RT_FAILURE(rc))
             return rc;
