Index: /trunk/include/VBox/pgm.h
===================================================================
--- /trunk/include/VBox/pgm.h	(revision 31401)
+++ /trunk/include/VBox/pgm.h	(revision 31402)
@@ -386,23 +386,10 @@
 
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
-VMMDECL(int)        PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);
-VMMDECL(int)        PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv);
-# ifdef IN_RC
-VMMDECL(int)        PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);
-VMMDECL(void)       PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
-VMMDECL(void)       PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
-#  ifdef VBOX_STRICT
-VMMDECL(void)       PGMDynCheckLocks(PVM pVM);
-#  endif
-# endif
-VMMDECL(void)       PGMDynMapStartAutoSet(PVMCPU pVCpu);
-VMMDECL(bool)       PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu);
-VMMDECL(void)       PGMDynMapReleaseAutoSet(PVMCPU pVCpu);
-VMMDECL(void)       PGMDynMapFlushAutoSet(PVMCPU pVCpu);
-VMMDECL(void)       PGMDynMapMigrateAutoSet(PVMCPU pVCpu);
-VMMDECL(uint32_t)   PGMDynMapPushAutoSubset(PVMCPU pVCpu);
-VMMDECL(void)       PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset);
+VMMDECL(void)       PGMRZDynMapStartAutoSet(PVMCPU pVCpu);
+VMMDECL(void)       PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu);
+VMMDECL(void)       PGMRZDynMapFlushAutoSet(PVMCPU pVCpu);
+VMMDECL(uint32_t)   PGMRZDynMapPushAutoSubset(PVMCPU pVCpu);
+VMMDECL(void)       PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset);
 #endif
-
 
 VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages);
@@ -422,4 +409,5 @@
  * @{
  */
+VMMRCDECL(int)      PGMRCDynMapInit(PVM pVM);
 /** @} */
 #endif /* IN_RC */
@@ -441,4 +429,6 @@
 VMMR0DECL(void)     PGMR0DynMapTermVM(PVM pVM);
 VMMR0DECL(int)      PGMR0DynMapAssertIntegrity(void);
+VMMR0DECL(bool)     PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu);
+VMMR0DECL(void)     PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu);
 # endif
 /** @} */
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 31401)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 31402)
@@ -362,4 +362,5 @@
  	VMMGC/HWACCMGCA.asm \
  	VMMRZ/DBGFRZ.cpp \
+ 	VMMRZ/PGMRZDynMap.cpp \
  	VMMRZ/VMMRZ.cpp \
  	VMMAll/CPUMAllRegs.cpp \
@@ -494,5 +495,5 @@
 	VMMR0/VMMR0JmpA-x86.asm
 VMMR0_SOURCES.darwin.x86 = \
-	VMMR0/PGMR0DynMap.cpp
+	VMMRZ/PGMRZDynMap.cpp
 
 # disable annoying warnings about array subscript above array bounds in aPages[]
Index: /trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp
===================================================================
--- /trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp	(revision 31402)
@@ -292,4 +292,5 @@
                     /* We are no longer executing PATM code; set PIF again. */
                     pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
+                    PGMRZDynMapReleaseAutoSet(VMMGetCpu0(pVM));
                     CPUMGCCallV86Code(pRegFrame);
                     /* does not return */
Index: /trunk/src/VBox/VMM/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGM.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/PGM.cpp	(revision 31402)
@@ -481,7 +481,9 @@
  * In order to be able to map in and out memory and to be able to support
  * guest with more RAM than we've got virtual address space, we'll employing
- * a mapping cache. There is already a tiny one for GC (see PGMGCDynMapGCPageEx)
- * and we'll create a similar one for ring-0 unless we decide to setup a dedicate
- * memory context for the HWACCM execution.
+ * a mapping cache.  Normally ring-0 and ring-3 can share the same cache,
+ * however on 32-bit darwin the ring-0 code is running in a different memory
+ * context and therefore needs a separate cache.  In raw-mode context we also
+ * need a separate cache.  The 32-bit darwin mapping cache and the one for
+ * raw-mode context share a lot of code, see PGMRZDYNMAP.
  *
  *
@@ -1720,6 +1722,4 @@
 
     /* GC only: */
-    PGM_REG_COUNTER(&pStats->StatRCDynMapCacheHits,             "/PGM/RC/DynMapCache/Hits" ,          "Number of dynamic page mapping cache hits.");
-    PGM_REG_COUNTER(&pStats->StatRCDynMapCacheMisses,           "/PGM/RC/DynMapCache/Misses" ,        "Number of dynamic page mapping cache misses.");
     PGM_REG_COUNTER(&pStats->StatRCInvlPgConflict,              "/PGM/RC/InvlPgConflict",             "Number of times PGMInvalidatePage() detected a mapping conflict.");
     PGM_REG_COUNTER(&pStats->StatRCInvlPgSyncMonCR3,            "/PGM/RC/InvlPgSyncMonitorCR3",       "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
@@ -1778,40 +1778,4 @@
 # endif
         /* R0 only: */
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapMigrateInvlPg,         "/PGM/CPU%u/R0/DynMapMigrateInvlPg",        "invlpg count in PGMDynMapMigrateAutoSet.");
-        PGM_REG_PROFILE(&pCpuStats->StatR0DynMapGCPageInl,             "/PGM/CPU%u/R0/DynMapPageGCPageInl",        "Calls to pgmR0DynMapGCPageInlined.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlHits,         "/PGM/CPU%u/R0/DynMapPageGCPageInl/Hits",   "Hash table lookup hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlMisses,       "/PGM/CPU%u/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamHits,      "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamHits",   "1st ram range hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamMisses,    "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
-        PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPageInl,             "/PGM/CPU%u/R0/DynMapPageHCPageInl",        "Calls to pgmR0DynMapHCPageInlined.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlHits,         "/PGM/CPU%u/R0/DynMapPageHCPageInl/Hits",   "Hash table lookup hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlMisses,       "/PGM/CPU%u/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPage,                  "/PGM/CPU%u/R0/DynMapPage",                 "Calls to pgmR0DynMapPage");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetOptimize,           "/PGM/CPU%u/R0/DynMapPage/SetOptimize",     "Calls to pgmDynMapOptimizeAutoSet.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchFlushes,      "/PGM/CPU%u/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchHits,         "/PGM/CPU%u/R0/DynMapPage/SetSearchHits",   "Set search hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchMisses,       "/PGM/CPU%u/R0/DynMapPage/SetSearchMisses", "Set search misses.");
-        PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPage,                "/PGM/CPU%u/R0/DynMapPage/HCPage",          "Calls to PGMDynMapHCPage (ring-0).");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits0,             "/PGM/CPU%u/R0/DynMapPage/Hits0",           "Hits at iPage+0");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits1,             "/PGM/CPU%u/R0/DynMapPage/Hits1",           "Hits at iPage+1");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits2,             "/PGM/CPU%u/R0/DynMapPage/Hits2",           "Hits at iPage+2");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageInvlPg,            "/PGM/CPU%u/R0/DynMapPage/InvlPg",          "invlpg count in pgmR0DynMapPageSlow.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlow,              "/PGM/CPU%u/R0/DynMapPage/Slow",            "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopHits,      "/PGM/CPU%u/R0/DynMapPage/SlowLoopHits" ,   "Hits in the loop path.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopMisses,    "/PGM/CPU%u/R0/DynMapPage/SlowLoopMisses",  "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
-        //PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLostHits,      "/PGM/CPU%u/R0/DynMapPage/SlowLostHits",    "Lost hits.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSubsets,               "/PGM/CPU%u/R0/Subsets",                    "Times PGMDynMapPushAutoSubset was called.");
-        PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPopFlushes,            "/PGM/CPU%u/R0/SubsetPopFlushes",           "Times PGMDynMapPopAutoSubset flushes the subset.");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[0],           "/PGM/CPU%u/R0/SetSize000..09",              "00-09% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[1],           "/PGM/CPU%u/R0/SetSize010..19",              "10-19% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[2],           "/PGM/CPU%u/R0/SetSize020..29",              "20-29% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[3],           "/PGM/CPU%u/R0/SetSize030..39",              "30-39% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[4],           "/PGM/CPU%u/R0/SetSize040..49",              "40-49% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[5],           "/PGM/CPU%u/R0/SetSize050..59",              "50-59% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[6],           "/PGM/CPU%u/R0/SetSize060..69",              "60-69% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[7],           "/PGM/CPU%u/R0/SetSize070..79",              "70-79% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[8],           "/PGM/CPU%u/R0/SetSize080..89",              "80-89% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[9],           "/PGM/CPU%u/R0/SetSize090..99",              "90-99% filled");
-        PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[10],          "/PGM/CPU%u/R0/SetSize100",                 "100% filled");
 
         /* RZ only: */
@@ -1869,4 +1833,41 @@
         PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteUnhandled,      "/PGM/CPU%u/RZ/ROMWriteUnhandled",              "The number of times the Guest ROM change was passed back to the recompiler.");
 
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapMigrateInvlPg,         "/PGM/CPU%u/RZ/DynMap/MigrateInvlPg",            "invlpg count in PGMR0DynMapMigrateAutoSet.");
+        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapGCPageInl,             "/PGM/CPU%u/RZ/DynMap/PageGCPageInl",            "Calls to pgmR0DynMapGCPageInlined.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlHits,         "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Hits",       "Hash table lookup hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlMisses,       "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Misses",     "Misses that falls back to the code common.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamHits,      "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamHits",    "1st ram range hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamMisses,    "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamMisses",  "1st ram range misses, takes slow path.");
+        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPageInl,             "/PGM/CPU%u/RZ/DynMap/PageHCPageInl",            "Calls to pgmRZDynMapHCPageInlined.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlHits,         "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Hits",       "Hash table lookup hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlMisses,       "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Misses",     "Misses that falls back to the code common.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPage,                  "/PGM/CPU%u/RZ/DynMap/Page",                     "Calls to pgmR0DynMapPage");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetOptimize,           "/PGM/CPU%u/RZ/DynMap/Page/SetOptimize",         "Calls to pgmRZDynMapOptimizeAutoSet.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchFlushes,      "/PGM/CPU%u/RZ/DynMap/Page/SetSearchFlushes",    "Set search restorting to subset flushes.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchHits,         "/PGM/CPU%u/RZ/DynMap/Page/SetSearchHits",       "Set search hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchMisses,       "/PGM/CPU%u/RZ/DynMap/Page/SetSearchMisses",     "Set search misses.");
+        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPage,                "/PGM/CPU%u/RZ/DynMap/Page/HCPage",              "Calls to pgmRZDynMapHCPageCommon (ring-0).");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits0,             "/PGM/CPU%u/RZ/DynMap/Page/Hits0",               "Hits at iPage+0");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits1,             "/PGM/CPU%u/RZ/DynMap/Page/Hits1",               "Hits at iPage+1");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits2,             "/PGM/CPU%u/RZ/DynMap/Page/Hits2",               "Hits at iPage+2");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageInvlPg,            "/PGM/CPU%u/RZ/DynMap/Page/InvlPg",              "invlpg count in pgmR0DynMapPageSlow.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlow,              "/PGM/CPU%u/RZ/DynMap/Page/Slow",                "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopHits,      "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopHits" ,       "Hits in the loop path.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopMisses,    "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopMisses",      "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
+        //PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLostHits,      "/PGM/CPU%u/R0/DynMap/Page/SlowLostHits",        "Lost hits.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSubsets,               "/PGM/CPU%u/RZ/DynMap/Subsets",                  "Times PGMRZDynMapPushAutoSubset was called.");
+        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPopFlushes,            "/PGM/CPU%u/RZ/DynMap/SubsetPopFlushes",         "Times PGMRZDynMapPopAutoSubset flushes the subset.");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[0],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct000..09",      "00-09% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[1],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct010..19",      "10-19% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[2],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct020..29",      "20-29% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[3],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct030..39",      "30-39% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[4],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct040..49",      "40-49% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[5],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct050..59",      "50-59% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[6],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct060..69",      "60-69% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[7],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct070..79",      "70-79% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[8],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct080..89",      "80-89% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[9],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct090..99",      "90-99% filled (RC: min(set-size, dynmap-size))");
+        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[10],     "/PGM/CPU%u/RZ/DynMap/SetFilledPct100",          "100% filled (RC: min(set-size, dynmap-size))");
+
         /* HC only: */
 
@@ -2037,12 +2038,9 @@
     pVM->pgm.s.paDynPageMapPaePTEsGC   = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
 
-    /* init cache */
+    /* init cache area */
     RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
-    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++)
-        pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy;
-
-    for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
-    {
-        rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0);
+    for (uint32_t offDynMap = 0; offDynMap < MM_HYPER_DYNAMIC_SIZE; offDynMap += PAGE_SIZE)
+    {
+        rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + offDynMap, HCPhysDummy, PAGE_SIZE, 0);
         AssertRCReturn(rc, rc);
     }
@@ -2205,6 +2203,21 @@
      */
     pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
-    pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;
-    pVM->pgm.s.pbDynPageMapBaseGC += offDelta;
+    pVM->pgm.s.paDynPageMapPaePTEsGC   += offDelta;
+    pVM->pgm.s.pbDynPageMapBaseGC      += offDelta;
+
+    if (pVM->pgm.s.pRCDynMap)
+    {
+        pVM->pgm.s.pRCDynMap += offDelta;
+        PPGMRCDYNMAP pDynMap = (PPGMRCDYNMAP)MMHyperRCToCC(pVM, pVM->pgm.s.pRCDynMap);
+
+        pDynMap->paPages     += offDelta;
+        PPGMRCDYNMAPENTRY paPages = (PPGMRCDYNMAPENTRY)MMHyperRCToCC(pVM, pDynMap->paPages);
+
+        for (uint32_t iPage = 0; iPage < pDynMap->cPages; iPage++)
+        {
+            paPages[iPage].pvPage  += offDelta;
+            paPages[iPage].uPte.pv += offDelta;
+        }
+    }
 
     /*
Index: /trunk/src/VBox/VMM/PGMInline.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInline.h	(revision 31401)
+++ /trunk/src/VBox/VMM/PGMInline.h	(revision 31402)
@@ -286,9 +286,9 @@
 }
 
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-
-/**
- * Inlined version of the ring-0 version of PGMDynMapHCPage that
- * optimizes access to pages already in the set.
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
+
+/**
+ * Inlined version of the ring-0 version of the host page mapping code
+ * that optimizes access to pages already in the set.
  *
  * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
@@ -297,9 +297,9 @@
  * @param   ppv         Where to store the mapping address.
  */
-DECLINLINE(int) pgmR0DynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv)
+DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
 {
     PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
 
-    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInl, a);
+    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
     Assert(!(HCPhys & PAGE_OFFSET_MASK));
     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
@@ -308,16 +308,18 @@
     unsigned    iEntry  = pSet->aiHashTable[iHash];
     if (    iEntry < pSet->cEntries
-        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
-    {
+        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
+        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[iEntry].cInlinedRefs++;
         *ppv = pSet->aEntries[iEntry].pvPage;
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInlHits);
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
     }
     else
     {
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInlMisses);
-        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
-    }
-
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInl, a);
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
+        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
+    }
+
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
     return VINF_SUCCESS;
 }
@@ -325,8 +327,8 @@
 
 /**
- * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
- * access to pages already in the set.
- *
- * @returns See PGMDynMapGCPage.
+ * Inlined version of the guest page mapping code that optimizes access to pages
+ * already in the set.
+ *
+ * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
  * @param   pVM         The VM handle.
  * @param   pVCpu       The current CPU.
@@ -334,7 +336,7 @@
  * @param   ppv         Where to store the mapping address.
  */
-DECLINLINE(int) pgmR0DynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
-{
-    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
+DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
+{
+    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
 
@@ -347,14 +349,14 @@
         /** @todo   || page state stuff */))
     {
-        /* This case is not counted into StatR0DynMapGCPageInl. */
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
-        return PGMDynMapGCPage(pVM, GCPhys, ppv);
+        /* This case is not counted into StatRZDynMapGCPageInl. */
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
+        return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
     }
 
     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
 
     /*
-     * pgmR0DynMapHCPageInlined with out stats.
+     * pgmRZDynMapHCPageInlined with out stats.
      */
     PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
@@ -365,16 +367,18 @@
     unsigned    iEntry  = pSet->aiHashTable[iHash];
     if (    iEntry < pSet->cEntries
-        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
-    {
+        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
+        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[iEntry].cInlinedRefs++;
         *ppv = pSet->aEntries[iEntry].pvPage;
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
     }
     else
     {
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
-        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
-    }
-
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
+        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
+    }
+
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
     return VINF_SUCCESS;
 }
@@ -382,30 +386,31 @@
 
 /**
- * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
+ * Inlined version of the ring-0 version of guest page mapping that optimizes
  * access to pages already in the set.
  *
- * @returns See PGMDynMapGCPage.
+ * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
  * @param   pVCpu       The current CPU.
  * @param   GCPhys      The guest physical address of the page.
  * @param   ppv         Where to store the mapping address.
  */
-DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
-{
-    return pgmR0DynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv);
-}
-
-
-/**
- * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
- * access to pages already in the set.
- *
- * @returns See PGMDynMapGCPage.
+DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
+{
+    return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
+}
+
+
+/**
+ * Inlined version of the ring-0 version of the guest byte mapping code
+ * that optimizes access to pages already in the set.
+ *
+ * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
  * @param   pVCpu       The current CPU.
  * @param   HCPhys      The physical address of the page.
- * @param   ppv         Where to store the mapping address.
- */
-DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
-{
-    STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
+ * @param   ppv         Where to store the mapping address. The offset is
+ *                      preserved.
+ */
+DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
+{
+    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
 
     /*
@@ -418,14 +423,14 @@
         /** @todo   || page state stuff */))
     {
-        /* This case is not counted into StatR0DynMapGCPageInl. */
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
-        return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
+        /* This case is not counted into StatRZDynMapGCPageInl. */
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
+        return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
     }
 
     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
 
     /*
-     * pgmR0DynMapHCPageInlined with out stats.
+     * pgmRZDynMapHCPageInlined with out stats.
      */
     PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
@@ -436,17 +441,19 @@
     unsigned    iEntry  = pSet->aiHashTable[iHash];
     if (    iEntry < pSet->cEntries
-        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
-    {
+        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
+        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
+    {
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
+        pSet->aEntries[iEntry].cInlinedRefs++;
         *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
     }
     else
     {
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
-        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
+        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
         *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
     }
 
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
     return VINF_SUCCESS;
 }
@@ -462,5 +469,5 @@
  * @param   pPage       The page.
  */
-DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage)
+DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
 {
     if (pPage->idx >= PGMPOOL_IDX_FIRST)
@@ -468,9 +475,5 @@
         Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
         void *pv;
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-        pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv);
-# else
-        PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
-# endif
+        pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
         return pv;
     }
@@ -486,5 +489,5 @@
  * @param   pPage       The page.
  */
-DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage)
+DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
 {
     if (pPage->idx >= PGMPOOL_IDX_FIRST)
@@ -492,10 +495,6 @@
         Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
         void *pv;
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
         Assert(pVCpu == VMMGetCpu(pVM));
-        pgmR0DynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv);
-# else
-        PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
-# endif
+        pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
         return pv;
     }
@@ -514,12 +513,8 @@
  * @param   HCPhys      HC Physical address of the page.
  */
-DECLINLINE(void *) pgmDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys)
+DECLINLINE(void *) pgmRZDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys RTLOG_COMMA_SRC_POS_DECL)
 {
     void *pv;
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
-# else
-    PGMDynMapHCPage(pVM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
-# endif
+    pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv RTLOG_COMMA_SRC_POS_ARGS);
     pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
     return pv;
@@ -651,5 +646,5 @@
 {
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd);
+    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
     if (RT_FAILURE(rc))
     {
@@ -676,5 +671,5 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     PX86PD pGuestPD = NULL;
-    int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD);
+    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
     if (RT_FAILURE(rc))
     {
@@ -705,5 +700,5 @@
 {
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    int rc = pgmR0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt);
+    int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
     if (RT_FAILURE(rc))
     {
@@ -749,5 +744,5 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     PX86PDPT pGuestPDPT = NULL;
-    int rc = pgmR0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT);
+    int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
     AssertRCReturn(rc, NULL);
 #else
@@ -785,7 +780,8 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
             PX86PDPAE   pGuestPD = NULL;
-            int rc = pgmR0DynMapGCPageInlined(pVCpu,
+            int rc = pgmRZDynMapGCPageInlined(pVCpu,
                                               pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
-                                              (void **)&pGuestPD);
+                                              (void **)&pGuestPD
+                                              RTLOG_COMMA_SRC_POS);
             if (RT_SUCCESS(rc))
                 return pGuestPD->a[iPD];
@@ -837,7 +833,8 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     PX86PDPAE   pGuestPD = NULL;
-    int rc = pgmR0DynMapGCPageInlined(pVCpu,
+    int rc = pgmRZDynMapGCPageInlined(pVCpu,
                                       pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
-                                      (void **)&pGuestPD);
+                                      (void **)&pGuestPD
+                                      RTLOG_COMMA_SRC_POS);
     if (RT_FAILURE(rc))
     {
@@ -868,5 +865,5 @@
 {
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4);
+    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
     if (RT_FAILURE(rc))
     {
@@ -910,5 +907,5 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     PX86PML4 pGuestPml4;
-    int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4);
+    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
     AssertRCReturn(rc, NULL);
 #else
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 31401)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 31402)
@@ -234,14 +234,11 @@
  *                      this.
  *
- * @remark  In RC this uses PGMDynMapHCPage(), so it will consume of the small
- *          page window employeed by that function. Be careful.
+ * @remark  Use with care as we don't have so much dynamic mapping space in
+ *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
-#ifdef IN_RC
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
-     PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
-     pgmR0DynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv))
+     pgmRZDynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
 #else
 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
@@ -258,14 +255,11 @@
  * @param   ppv     Where to store the virtual address. No need to cast this.
  *
- * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
+ * @remark  Use with care as we don't have so much dynamic mapping space in
+ *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
-#ifdef IN_RC
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
-     PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
-     pgmR0DynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv))
+     pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
 #else
 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
@@ -281,6 +275,6 @@
  * @param   ppv     Where to store the virtual address. No need to cast this.
  *
- * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
+ * @remark  Use with care as we don't have so much dynamic mapping space in
+ *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
@@ -295,6 +289,6 @@
  * @param   ppv     Where to store the virtual address. No need to cast this.
  *
- * @remark  In RC this uses PGMGCDynMapGCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
+ * @remark  Use with care as we don't have so much dynamic mapping space in
+ *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
@@ -309,15 +303,42 @@
  * @param   ppv     Where to store the virtual address. No need to cast this.
  *
- * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
+ * @remark  Use with care as we don't have so much dynamic mapping space in
+ *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
-     PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
+     pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
 #else
 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
 #endif
+
+/** @def PGM_DYNMAP_UNUSED_HINT
+ * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
+ * is no longer used.
+ *
+ * @param   pVCpu   The current CPU.
+ * @param   pPage   The pool page.
+ */
+#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+# ifdef LOG_ENABLED
+#  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS)
+# else
+#  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage)
+# endif
+#else
+# define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  do {} while (0)
+#endif
+
+/** @def PGM_DYNMAP_UNUSED_HINT_VM
+ * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
+ * is no longer used.
+ *
+ * @param   pVM     The VM handle.
+ * @param   pPage   The pool page.
+ */
+#define PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvPage)  PGM_DYNMAP_UNUSED_HINT(VMMGetCpu(pVM), pvPage)
+
 
 /** @def PGM_INVL_PG
@@ -1549,13 +1570,93 @@
 
 /**
+ * Raw-mode context dynamic mapping cache entry.
+ *
+ * Because of raw-mode context being reloctable and all relocations are applied
+ * in ring-3, this has to be defined here and be RC specfic.
+ *
+ * @sa PGMRZDYNMAPENTRY, PGMR0DYNMAPENTRY.
+ */
+typedef struct PGMRCDYNMAPENTRY
+{
+    /** The physical address of the currently mapped page.
+     * This is duplicate for three reasons: cache locality, cache policy of the PT
+     * mappings and sanity checks.   */
+    RTHCPHYS                    HCPhys;
+    /** Pointer to the page. */
+    RTRCPTR                     pvPage;
+    /** The number of references. */
+    int32_t volatile            cRefs;
+    /** PTE pointer union. */
+    union PGMRCDYNMAPENTRY_PPTE
+    {
+        /** PTE pointer, 32-bit legacy version. */
+        RCPTRTYPE(PX86PTE)      pLegacy;
+        /** PTE pointer, PAE version. */
+        RCPTRTYPE(PX86PTEPAE)   pPae;
+        /** PTE pointer, the void version. */
+        RTRCPTR                 pv;
+    } uPte;
+    /** Alignment padding. */
+    RTRCPTR                     RCPtrAlignment;
+} PGMRCDYNMAPENTRY;
+/** Pointer to a dynamic mapping cache entry for the raw-mode context. */
+typedef PGMRCDYNMAPENTRY *PPGMRCDYNMAPENTRY;
+
+
+/**
+ * Dynamic mapping cache for the raw-mode context.
+ *
+ * This is initialized during VMMRC init based upon the pbDynPageMapBaseGC and
+ * paDynPageMap* PGM members.  However, it has to be defined in PGMInternal.h
+ * so that we can perform relocations from PGMR3Relocate.  This has the
+ * consequence that we must have separate ring-0 and raw-mode context versions
+ * of this struct even if they share the basic elements.
+ *
+ * @sa PPGMRZDYNMAP, PGMR0DYNMAP.
+ */
+typedef struct PGMRCDYNMAP
+{
+    /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */
+    uint32_t                        u32Magic;
+    /** Array for tracking and managing the pages.  */
+    RCPTRTYPE(PPGMRCDYNMAPENTRY)    paPages;
+    /** The cache size given as a number of pages. */
+    uint32_t                        cPages;
+    /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
+    bool                            fLegacyMode;
+    /** The current load.
+     * This does not include guard pages. */
+    uint32_t                        cLoad;
+    /** The max load ever.
+     * This is maintained to get trigger adding of more mapping space. */
+    uint32_t                        cMaxLoad;
+    /** The number of guard pages. */
+    uint32_t                        cGuardPages;
+    /** The number of users (protected by hInitLock). */
+    uint32_t                        cUsers;
+} PGMRCDYNMAP;
+/** Pointer to the dynamic cache for the raw-mode context. */
+typedef PGMRCDYNMAP *PPGMRCDYNMAP;
+
+
+/**
  * Mapping cache usage set entry.
  *
  * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
  *          the dynamic ring-0 and (to some extent) raw-mode context mapping
- *          cache. If it's extended to include ring-3, well, then something will
- *          have be changed here...
+ *          cache.  If it's extended to include ring-3, well, then something
+ *          will have be changed here...
  */
 typedef struct PGMMAPSETENTRY
 {
+    /** Pointer to the page. */
+#ifndef IN_RC
+    RTR0PTR                     pvPage;
+#else
+    RTRCPTR                     pvPage;
+# if HC_ARCH_BITS == 64
+    uint32_t                    u32Alignment2;
+# endif
+#endif
     /** The mapping cache index. */
     uint16_t                    iPage;
@@ -1563,12 +1664,18 @@
      * The max is UINT16_MAX - 1. */
     uint16_t                    cRefs;
-#if HC_ARCH_BITS == 64
-    uint32_t                    alignment;
-#endif
-    /** Pointer to the page. */
-    RTR0PTR                     pvPage;
+    /** The number inlined references.
+     * The max is UINT16_MAX - 1. */
+    uint16_t                    cInlinedRefs;
+    /** Unreferences.  */
+    uint16_t                    cUnrefs;
+
+#if HC_ARCH_BITS == 32
+    uint32_t                    u32Alignment1;
+#endif
     /** The physical address for this entry. */
     RTHCPHYS                    HCPhys;
 } PGMMAPSETENTRY;
+AssertCompileMemberOffset(PGMMAPSETENTRY, iPage, RT_MAX(sizeof(RTR0PTR), sizeof(RTRCPTR)));
+AssertCompileMemberAlignment(PGMMAPSETENTRY, HCPhys, sizeof(RTHCPHYS));
 /** Pointer to a mapping cache usage set entry. */
 typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
@@ -2150,8 +2257,6 @@
  * @remark  There is no need to assert on the result.
  */
-#if defined(IN_RC)
-# define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage))
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage))
+#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+# define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage) RTLOG_COMMA_SRC_POS)
 #elif defined(VBOX_STRICT)
 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageStrict(pPage)
@@ -2178,8 +2283,6 @@
  * @remark  There is no need to assert on the result.
  */
-#if defined(IN_RC)
-# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
+#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage) RTLOG_COMMA_SRC_POS)
 #else
 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   PGMPOOL_PAGE_2_PTR((pVM), (pPage))
@@ -2623,6 +2726,4 @@
 
     /* RC only: */
-    STAMCOUNTER StatRCDynMapCacheMisses;            /**< RC: The number of dynamic page mapping cache misses */
-    STAMCOUNTER StatRCDynMapCacheHits;              /**< RC: The number of dynamic page mapping cache hits */
     STAMCOUNTER StatRCInvlPgConflict;               /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
     STAMCOUNTER StatRCInvlPgSyncMonCR3;             /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
@@ -2846,14 +2947,12 @@
     /** Base address of the dynamic page mapping area.
      * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
+     *
+     * @todo The plan of keeping PGMRCDYNMAP private to PGMRZDynMap.cpp didn't
+     *       work out.  Some cleaning up of the initialization that would
+     *       remove this memory is yet to be done...
      */
     RCPTRTYPE(uint8_t *)            pbDynPageMapBaseGC;
-    /** The index of the last entry used in the dynamic page mapping area. */
-    RTUINT                          iDynPageMapLast;
-    /** Cache containing the last entries in the dynamic page mapping area.
-     * The cache size is covering half of the mapping area. */
-    RTHCPHYS                        aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
-    /** Keep a lock counter for the full (!) mapping area. */
-    uint32_t                        aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
-
+    /** The address of the raw-mode context mapping cache. */
+    RCPTRTYPE(PPGMRCDYNMAP)         pRCDynMap;
     /** The address of the ring-0 mapping cache if we're making use of it.  */
     RTR0PTR                         pvR0DynMapUsed;
@@ -3052,5 +3151,4 @@
 AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
 AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
-AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
 AssertCompileMemberAlignment(PGM, CritSect, 8);
 AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
@@ -3072,30 +3170,4 @@
 
     /* R0 only: */
-    STAMCOUNTER StatR0DynMapMigrateInvlPg;          /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
-    STAMPROFILE StatR0DynMapGCPageInl;              /**< R0: Calls to pgmR0DynMapGCPageInlined. */
-    STAMCOUNTER StatR0DynMapGCPageInlHits;          /**< R0: Hash table lookup hits. */
-    STAMCOUNTER StatR0DynMapGCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
-    STAMCOUNTER StatR0DynMapGCPageInlRamHits;       /**< R0: 1st ram range hits. */
-    STAMCOUNTER StatR0DynMapGCPageInlRamMisses;     /**< R0: 1st ram range misses, takes slow path. */
-    STAMPROFILE StatR0DynMapHCPageInl;              /**< R0: Calls to pgmR0DynMapHCPageInlined. */
-    STAMCOUNTER StatR0DynMapHCPageInlHits;          /**< R0: Hash table lookup hits. */
-    STAMCOUNTER StatR0DynMapHCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
-    STAMPROFILE StatR0DynMapHCPage;                 /**< R0: Calls to PGMDynMapHCPage. */
-    STAMCOUNTER StatR0DynMapSetOptimize;            /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
-    STAMCOUNTER StatR0DynMapSetSearchFlushes;       /**< R0: Set search restorting to subset flushes. */
-    STAMCOUNTER StatR0DynMapSetSearchHits;          /**< R0: Set search hits. */
-    STAMCOUNTER StatR0DynMapSetSearchMisses;        /**< R0: Set search misses. */
-    STAMCOUNTER StatR0DynMapPage;                   /**< R0: Calls to pgmR0DynMapPage. */
-    STAMCOUNTER StatR0DynMapPageHits0;              /**< R0: Hits at iPage+0. */
-    STAMCOUNTER StatR0DynMapPageHits1;              /**< R0: Hits at iPage+1. */
-    STAMCOUNTER StatR0DynMapPageHits2;              /**< R0: Hits at iPage+2. */
-    STAMCOUNTER StatR0DynMapPageInvlPg;             /**< R0: invlpg. */
-    STAMCOUNTER StatR0DynMapPageSlow;               /**< R0: Calls to pgmR0DynMapPageSlow. */
-    STAMCOUNTER StatR0DynMapPageSlowLoopHits;       /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
-    STAMCOUNTER StatR0DynMapPageSlowLoopMisses;     /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
-    //STAMCOUNTER StatR0DynMapPageSlowLostHits;       /**< R0: Lost hits. */
-    STAMCOUNTER StatR0DynMapSubsets;                /**< R0: Times PGMDynMapPushAutoSubset was called. */
-    STAMCOUNTER StatR0DynMapPopFlushes;             /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
-    STAMCOUNTER aStatR0DynMapSetSize[11];           /**< R0: Set size distribution. */
 
     /* RZ only: */
@@ -3148,4 +3220,30 @@
     STAMCOUNTER StatRZGuestROMWriteHandled;         /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
     STAMCOUNTER StatRZGuestROMWriteUnhandled;       /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
+    STAMCOUNTER StatRZDynMapMigrateInvlPg;          /**< RZ: invlpg in PGMR0DynMapMigrateAutoSet. */
+    STAMPROFILE StatRZDynMapGCPageInl;              /**< RZ: Calls to pgmRZDynMapGCPageInlined. */
+    STAMCOUNTER StatRZDynMapGCPageInlHits;          /**< RZ: Hash table lookup hits. */
+    STAMCOUNTER StatRZDynMapGCPageInlMisses;        /**< RZ: Misses that falls back to the code common. */
+    STAMCOUNTER StatRZDynMapGCPageInlRamHits;       /**< RZ: 1st ram range hits. */
+    STAMCOUNTER StatRZDynMapGCPageInlRamMisses;     /**< RZ: 1st ram range misses, takes slow path. */
+    STAMPROFILE StatRZDynMapHCPageInl;              /**< RZ: Calls to pgmRZDynMapHCPageInlined. */
+    STAMCOUNTER StatRZDynMapHCPageInlHits;          /**< RZ: Hash table lookup hits. */
+    STAMCOUNTER StatRZDynMapHCPageInlMisses;        /**< RZ: Misses that falls back to the code common. */
+    STAMPROFILE StatRZDynMapHCPage;                 /**< RZ: Calls to pgmRZDynMapHCPageCommon. */
+    STAMCOUNTER StatRZDynMapSetOptimize;            /**< RZ: Calls to pgmRZDynMapOptimizeAutoSet. */
+    STAMCOUNTER StatRZDynMapSetSearchFlushes;       /**< RZ: Set search restorting to subset flushes. */
+    STAMCOUNTER StatRZDynMapSetSearchHits;          /**< RZ: Set search hits. */
+    STAMCOUNTER StatRZDynMapSetSearchMisses;        /**< RZ: Set search misses. */
+    STAMCOUNTER StatRZDynMapPage;                   /**< RZ: Calls to pgmR0DynMapPage. */
+    STAMCOUNTER StatRZDynMapPageHits0;              /**< RZ: Hits at iPage+0. */
+    STAMCOUNTER StatRZDynMapPageHits1;              /**< RZ: Hits at iPage+1. */
+    STAMCOUNTER StatRZDynMapPageHits2;              /**< RZ: Hits at iPage+2. */
+    STAMCOUNTER StatRZDynMapPageInvlPg;             /**< RZ: invlpg. */
+    STAMCOUNTER StatRZDynMapPageSlow;               /**< RZ: Calls to pgmR0DynMapPageSlow. */
+    STAMCOUNTER StatRZDynMapPageSlowLoopHits;       /**< RZ: Hits in the pgmR0DynMapPageSlow search loop. */
+    STAMCOUNTER StatRZDynMapPageSlowLoopMisses;     /**< RZ: Misses in the pgmR0DynMapPageSlow search loop. */
+    //STAMCOUNTER StatRZDynMapPageSlowLostHits;       /**< RZ: Lost hits. */
+    STAMCOUNTER StatRZDynMapSubsets;                /**< RZ: Times PGMDynMapPushAutoSubset was called. */
+    STAMCOUNTER StatRZDynMapPopFlushes;             /**< RZ: Times PGMDynMapPopAutoSubset flushes the subset. */
+    STAMCOUNTER aStatRZDynMapSetFilledPct[11];      /**< RZ: Set fill distribution, percent. */
 
     /* HC - R3 and (maybe) R0: */
@@ -3270,12 +3368,12 @@
 {
     /** Offset to the VM structure. */
-    RTINT                           offVM;
+    int32_t                         offVM;
     /** Offset to the VMCPU structure. */
-    RTINT                           offVCpu;
+    int32_t                         offVCpu;
     /** Offset of the PGM structure relative to VMCPU. */
-    RTINT                           offPGM;
-    RTINT                           uPadding0;      /**< structure size alignment. */
-
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+    int32_t                         offPGM;
+    uint32_t                        uPadding0;      /**< structure size alignment. */
+
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)
     /** Automatically tracked physical memory mapping set.
      * Ring-0 and strict raw-mode builds. */
@@ -3593,6 +3691,12 @@
 
 #endif /* IN_RING3 */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-int             pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || IN_RC
+int             pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
+int             pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
+# ifdef LOG_ENABLED
+void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL);
+# else
+void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint);
+# endif
 #endif
 int             pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
Index: /trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp	(revision 31402)
@@ -39,5 +39,5 @@
 
 
-#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+#if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)
 
 /**
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 31402)
@@ -936,9 +936,4 @@
         PGMPOOLKIND enmKind;
 
-# if defined(IN_RC)
-        /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-        PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
-# endif
-
         if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
         {
@@ -990,6 +985,6 @@
          */
         ASMReloadCR3();
-        PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
 # endif
+        PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
     }
     else
@@ -1524,5 +1519,4 @@
 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-
 /**
  * Performs the lazy mapping of the 32-bit guest PD.
@@ -1563,5 +1557,4 @@
     return rc;
 }
-
 #endif
 
@@ -2272,11 +2265,20 @@
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
 
-/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
-DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
+/**
+ * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
+ *
+ * @returns VBox status code.
+ * @param   pVM         The VM handle.
+ * @param   pVCpu       The current CPU.
+ * @param   GCPhys      The guest physical address of the page to map.  The
+ *                      offset bits are not ignored.
+ * @param   ppv         Where to return the address corresponding to @a GCPhys.
+ */
+int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
 {
     pgmLock(pVM);
 
     /*
-     * Convert it to a writable page and it on to PGMDynMapHCPage.
+     * Convert it to a writable page and it on to the dynamic mapper.
      */
     int rc;
@@ -2287,10 +2289,8 @@
         if (RT_SUCCESS(rc))
         {
-            //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-            rc = pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), PGM_PAGE_GET_HCPHYS(pPage), ppv);
-#else
-            rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
-#endif
+            void *pv;
+            rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
+            if (RT_SUCCESS(rc))
+                *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
         }
         else
@@ -2307,199 +2307,5 @@
 }
 
-/**
- * Temporarily maps one guest page specified by GC physical address.
- * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
- *
- * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
- * reused after 8 mappings (or perhaps a few more if you score with the cache).
- *
- * @returns VBox status.
- * @param   pVM         VM handle.
- * @param   GCPhys      GC Physical address of the page.
- * @param   ppv         Where to store the address of the mapping.
- */
-VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
-{
-    AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
-    return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
-}
-
-
-/**
- * Temporarily maps one guest page specified by unaligned GC physical address.
- * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
- *
- * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
- * reused after 8 mappings (or perhaps a few more if you score with the cache).
- *
- * The caller is aware that only the speicifed page is mapped and that really bad things
- * will happen if writing beyond the page!
- *
- * @returns VBox status.
- * @param   pVM         VM handle.
- * @param   GCPhys      GC Physical address within the page to be mapped.
- * @param   ppv         Where to store the address of the mapping address corresponding to GCPhys.
- */
-VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
-{
-    void *pv;
-    int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
-    if (RT_SUCCESS(rc))
-    {
-        *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
-        return VINF_SUCCESS;
-    }
-    return rc;
-}
-
-# ifdef IN_RC
-
-/**
- * Temporarily maps one host page specified by HC physical address.
- *
- * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
- * reused after 16 mappings (or perhaps a few more if you score with the cache).
- *
- * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
- * @param   pVM         VM handle.
- * @param   HCPhys      HC Physical address of the page.
- * @param   ppv         Where to store the address of the mapping. This is the
- *                      address of the PAGE not the exact address corresponding
- *                      to HCPhys. Use PGMDynMapHCPageOff if you care for the
- *                      page offset.
- */
-VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
-{
-    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
-
-    /*
-     * Check the cache.
-     */
-    register unsigned iCache;
-    for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
-    {
-        static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
-        {
-            { 0,  9, 10, 11, 12, 13, 14, 15},
-            { 0,  1, 10, 11, 12, 13, 14, 15},
-            { 0,  1,  2, 11, 12, 13, 14, 15},
-            { 0,  1,  2,  3, 12, 13, 14, 15},
-            { 0,  1,  2,  3,  4, 13, 14, 15},
-            { 0,  1,  2,  3,  4,  5, 14, 15},
-            { 0,  1,  2,  3,  4,  5,  6, 15},
-            { 0,  1,  2,  3,  4,  5,  6,  7},
-            { 8,  1,  2,  3,  4,  5,  6,  7},
-            { 8,  9,  2,  3,  4,  5,  6,  7},
-            { 8,  9, 10,  3,  4,  5,  6,  7},
-            { 8,  9, 10, 11,  4,  5,  6,  7},
-            { 8,  9, 10, 11, 12,  5,  6,  7},
-            { 8,  9, 10, 11, 12, 13,  6,  7},
-            { 8,  9, 10, 11, 12, 13, 14,  7},
-            { 8,  9, 10, 11, 12, 13, 14, 15},
-        };
-        AssertCompile(RT_ELEMENTS(au8Trans) == 16);
-        AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
-
-        if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
-        {
-            int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
-
-            /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
-            if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
-            {
-                void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
-                *ppv = pv;
-                STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);
-                Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
-                return VINF_SUCCESS;
-            }
-            LogFlow(("Out of sync entry %d\n", iPage));
-        }
-    }
-    AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
-    AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
-    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);
-
-    /*
-     * Update the page tables.
-     */
-    unsigned iPage = pVM->pgm.s.iDynPageMapLast;
-    unsigned i;
-    for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
-    {
-        pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
-        if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
-            break;
-        iPage++;
-    }
-    AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
-
-    pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
-    pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
-    pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u   =           HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
-    pVM->pgm.s.aLockedDynPageMapCache[iPage]    = 0;
-
-    void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
-    *ppv = pv;
-    ASMInvalidatePage(pv);
-    Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Temporarily lock a dynamic page to prevent it from being reused.
- *
- * @param   pVM         VM handle.
- * @param   GCPage      GC address of page
- */
-VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
-{
-    unsigned iPage;
-
-    Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
-    iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
-    ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
-    Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
-}
-
-
-/**
- * Unlock a dynamic page
- *
- * @param   pVM         VM handle.
- * @param   GCPage      GC address of page
- */
-VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
-{
-    unsigned iPage;
-
-    AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
-    AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
-
-    Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
-    iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
-    Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
-    ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
-    Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
-}
-
-
-#  ifdef VBOX_STRICT
-/**
- * Check for lock leaks.
- *
- * @param   pVM         VM handle.
- */
-VMMDECL(void) PGMDynCheckLocks(PVM pVM)
-{
-    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
-        Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
-}
-#  endif /* VBOX_STRICT */
-
-# endif /* IN_RC */
 #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
-
 #if !defined(IN_R0) || defined(LOG_ENABLED)
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 31402)
@@ -383,8 +383,4 @@
     *pfLockTaken = false;
 
-# if defined(IN_RC) && defined(VBOX_STRICT)
-    PGMDynCheckLocks(pVM);
-# endif
-
 # if  (   PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
        || PGM_GST_TYPE == PGM_TYPE_PAE   || PGM_GST_TYPE == PGM_TYPE_AMD64) \
@@ -433,4 +429,5 @@
     if (uErr & X86_TRAP_PF_RSVD)
     {
+/** @todo This is not complete code. take locks */
         Assert(uErr & X86_TRAP_PF_P);
         PPGMPAGE pPage;
@@ -563,17 +560,6 @@
             return VINF_SUCCESS;
         }
-#ifndef IN_RC
         AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
         AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
-#else
-        /* Ugly hack, proper fix is comming up later. */
-        if (   !(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u)
-            || !(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u) )
-        {
-            rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);
-            if (RT_FAILURE_NP(rc))
-                return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
-        }
-#endif
     }
 
@@ -1148,9 +1134,4 @@
     }
 
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
-
     /*
      * Get the guest PD entry and calc big page.
@@ -1295,8 +1276,5 @@
                     LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
                     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
-# if defined(IN_RC)
-                    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-                    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+                    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
                     return VINF_SUCCESS;
                 }
@@ -1335,8 +1313,5 @@
         }
     }
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
     return rc;
 
@@ -1785,9 +1760,4 @@
     PPGMPOOLPAGE    pShwPde  = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
     Assert(pShwPde);
-# endif
-
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
 # endif
 
@@ -2021,8 +1991,5 @@
                 }
             }
-# if defined(IN_RC)
-            /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
             return VINF_SUCCESS;
         }
@@ -2050,8 +2017,5 @@
     ASMAtomicWriteSize(pPdeDst, 0);
 
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
     PGM_INVL_VCPU_TLBS(pVCpu);
     return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
@@ -2564,9 +2528,4 @@
     Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
 
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
-
     /*
      * Sync page directory entry.
@@ -2646,7 +2605,5 @@
             }
             ASMAtomicWriteSize(pPdeDst, PdeDst.u);
-# if defined(IN_RC)
-            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
             return VINF_SUCCESS;
         }
@@ -2654,7 +2611,5 @@
         {
             VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
-# if defined(IN_RC)
-            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
             return VINF_PGM_SYNC_CR3;
         }
@@ -2687,7 +2642,5 @@
                          | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
                 ASMAtomicWriteSize(pPdeDst, PdeDst.u);
-# if defined(IN_RC)
-                PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+                PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
 
                 /*
@@ -2768,5 +2721,6 @@
 
             /**
-             * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
+             * @todo It might be more efficient to sync only a part of the 4MB
+             *       page (similar to what we do for 4KB PDs).
              */
 
@@ -2795,7 +2749,5 @@
             }
             ASMAtomicWriteSize(pPdeDst, PdeDst.u);
-# if defined(IN_RC)
-            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
 
             /*
@@ -3391,9 +3343,4 @@
 # endif
 
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
-
     if (!pPdeDst->n.u1Present)
     {
@@ -3401,8 +3348,5 @@
         if (rc != VINF_SUCCESS)
         {
-# if defined(IN_RC)
-            /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
             pgmUnlock(pVM);
             AssertRC(rc);
@@ -3449,8 +3393,5 @@
         }
     }
-# if defined(IN_RC)
-    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
-    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
-# endif
+    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
     pgmUnlock(pVM);
     return rc;
@@ -4359,5 +4300,5 @@
     AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2);
     HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3);
-    /** @todo this needs some reworking wrt. locking.  */
+    /** @todo this needs some reworking wrt. locking?  */
 # if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     HCPtrGuestCR3 = NIL_RTHCPTR;
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 31402)
@@ -247,7 +247,5 @@
                 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
                 AssertFatal(pShw32BitPd);
-#ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
-                PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
-#endif
+
                 /* Free any previous user, unless it's us. */
                 Assert(   (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
@@ -260,8 +258,5 @@
                 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
                                           | (uint32_t)pMap->aPTs[i].HCPhysPT;
-#ifdef IN_RC
-                /* Unlock dynamic mappings again. */
-                PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
-#endif
+                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
                 break;
             }
@@ -274,7 +269,4 @@
                 PX86PDPT        pShwPdpt  = pgmShwGetPaePDPTPtr(pVCpu);
                 Assert(pShwPdpt);
-#ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
-                PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
-#endif
 
                 /*
@@ -302,7 +294,4 @@
                 }
                 Assert(pShwPaePd);
-#ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
-                PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
-#endif
 
                 /*
@@ -357,9 +346,6 @@
                 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
 
-#ifdef IN_RC
-                /* Unlock dynamic mappings again. */
-                PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
-                PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
-#endif
+                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
+                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
                 break;
             }
@@ -406,11 +392,5 @@
     if (    PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
         &&  pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
-    {
         pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
-#ifdef IN_RC    /* Lock mapping to prevent it from being reused (currently not possible). */
-        if (pCurrentShwPdpt)
-            PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
-#endif
-    }
 
     unsigned i = pMap->cPTs;
@@ -503,9 +483,6 @@
         }
     }
-#ifdef IN_RC
-    /* Unlock dynamic mappings again. */
-    if (pCurrentShwPdpt)
-        PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
-#endif
+
+    PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
 }
 #endif /* !IN_RING0 */
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 31402)
@@ -738,15 +738,9 @@
     AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
 
-#ifdef IN_RC
+#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     /*
      * Map it by HCPhys.
      */
-    return PGMDynMapHCPage(pVM, HCPhys, ppv);
-
-#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-    /*
-     * Map it by HCPhys.
-     */
-    return pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
+    return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv  RTLOG_COMMA_SRC_POS);
 
 #else
@@ -824,9 +818,5 @@
     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
-# else
-    PGMDynMapHCPage(pVM, HCPhys, ppv);
-# endif
+    pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
     return VINF_SUCCESS;
 
@@ -1138,5 +1128,5 @@
      */
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-    *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
+    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS);
 #else
     PPGMPAGEMAPTLBE pTlbe;
@@ -1176,5 +1166,5 @@
      */
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-    *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
+    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
 #else
     PPGMPAGEMAPTLBE pTlbe;
@@ -1234,5 +1224,5 @@
         if (RT_SUCCESS(rc))
         {
-            *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
+            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
 # if 0
             pLock->pvMap = 0;
@@ -1345,5 +1335,5 @@
         else
         {
-            *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
+            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
 # if 0
             pLock->pvMap = 0;
@@ -1493,5 +1483,5 @@
     pLock->u32Dummy = 0;
 
-#else   /* IN_RING3 */
+#else
     PPGMPAGEMAP pMap       = (PPGMPAGEMAP)pLock->pvMap;
     PPGMPAGE    pPage      = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 31402)
@@ -88,51 +88,4 @@
 }
 
-/** @def PGMPOOL_PAGE_2_LOCKED_PTR
- * Maps a pool page pool into the current context and lock it (RC only).
- *
- * @returns VBox status code.
- * @param   pVM     The VM handle.
- * @param   pPage   The pool page.
- *
- * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
- * @remark  There is no need to assert on the result.
- */
-#if defined(IN_RC)
-DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)
-{
-    void *pv = pgmPoolMapPageInlined(pVM, pPage);
-
-    /* Make sure the dynamic mapping will not be reused. */
-    if (pv)
-        PGMDynLockHCPage(pVM, (uint8_t *)pv);
-
-    return pv;
-}
-#else
-# define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage)  PGMPOOL_PAGE_2_PTR(pVM, pPage)
-#endif
-
-/** @def PGMPOOL_UNLOCK_PTR
- * Unlock a previously locked dynamic caching (RC only).
- *
- * @returns VBox status code.
- * @param   pVM     The VM handle.
- * @param   pPage   The pool page.
- *
- * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
- *          small page window employeed by that function. Be careful.
- * @remark  There is no need to assert on the result.
- */
-#if defined(IN_RC)
-DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)
-{
-    if (pvPage)
-        PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);
-}
-#else
-# define PGMPOOL_UNLOCK_PTR(pVM, pPage)  do {} while (0)
-#endif
-
 
 /**
@@ -247,5 +200,5 @@
             {
                 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PTE);
                 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
@@ -270,5 +223,5 @@
             {
                 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
                 {
@@ -300,5 +253,5 @@
                 unsigned iShwPdpt = iGst / 256;
                 unsigned iShw     = (iGst % 256) * 2;
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
 
                 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
@@ -363,5 +316,5 @@
             case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
             {
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PTEPAE);
                 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
@@ -409,5 +362,5 @@
             case PGMPOOLKIND_32BIT_PD:
             {
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PTE);         // ASSUMING 32-bit guest paging!
 
@@ -489,5 +442,5 @@
             case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
             {
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PDEPAE);
                 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
@@ -566,5 +519,5 @@
                 const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
 
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = offPdpt / sizeof(X86PDPE);
                 if (iShw < X86_PG_PAE_PDPE_ENTRIES)          /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
@@ -633,5 +586,5 @@
             {
                 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PDEPAE);
                 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
@@ -673,5 +626,5 @@
                  * - messing with the bits of pd pointers without changing the physical address
                  */
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PDPE);
                 if (uShw.pPDPT->a[iShw].n.u1Present)
@@ -703,5 +656,5 @@
                  * - messing with the bits of pd pointers without changing the physical address
                  */
-                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
+                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
                 const unsigned iShw = off / sizeof(X86PDPE);
                 if (uShw.pPML4->a[iShw].n.u1Present)
@@ -730,5 +683,5 @@
                 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
         }
-        PGMPOOL_UNLOCK_PTR(pVM, uShw.pv);
+        PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv);
 
         /* next */
@@ -960,8 +913,8 @@
     while (pRegFrame->rcx)
     {
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-        uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
+        uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
         pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
-        PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
+        PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
 #else
         pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
@@ -1012,8 +965,8 @@
      * Clear all the pages. ASSUMES that pvFault is readable.
      */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    uint32_t    iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
+    uint32_t    iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
     pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
-    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
+    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
 #else
     pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
@@ -1113,5 +1066,5 @@
     if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
     {
-        void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
+        void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
         void *pvGst;
         int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
@@ -1421,5 +1374,5 @@
                     if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
                     {
-                        PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);
+                        PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);
 
                         for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
@@ -1539,6 +1492,7 @@
     pPage->fDirty = false;
 
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    uint32_t iPrevSubset = PGMDynMapPushAutoSubset(VMMGetCpu(pVM));
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
+    PVMCPU   pVCpu = VMMGetCpu(pVM);
+    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
 #endif
 
@@ -1557,5 +1511,5 @@
     /* Flush those PTEs that have changed. */
     STAM_PROFILE_START(&pPool->StatTrackDeref,a);
-    void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
+    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
     void *pvGst;
     bool  fFlush;
@@ -1589,6 +1543,6 @@
         Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
 
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PGMDynMapPopAutoSubset(VMMGetCpu(pVM), iPrevSubset);
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
+    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
 #endif
 }
@@ -1627,5 +1581,5 @@
      * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!)
      */
-    void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
+    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
     void *pvGst;
     int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
@@ -3352,8 +3306,8 @@
         else
         {
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
             /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
                pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
-            uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
+            uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
 # endif
 
@@ -3370,6 +3324,6 @@
             *pfFlushTLBs = true;
 
-# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-            PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
+# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
+            PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
 # endif
         }
@@ -3663,4 +3617,5 @@
             AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
     }
+    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64);
 }
 
@@ -4435,5 +4390,5 @@
      * Map the shadow page and take action according to the page kind.
      */
-    void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
+    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
     switch (pPage->enmKind)
     {
@@ -4539,5 +4494,5 @@
     STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
     pPage->fZeroed = true;
-    PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
+    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), pvShw);
     Assert(!pPage->cPresent);
 }
@@ -4596,8 +4551,8 @@
     }
 
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     /* Start a subset so we won't run out of mapping space. */
     PVMCPU pVCpu = VMMGetCpu(pVM);
-    uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
+    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
 #endif
 
@@ -4629,7 +4584,7 @@
     pgmPoolCacheFlushPage(pPool, pPage);
 
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
     /* Heavy stuff done. */
-    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
+    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
 #endif
 
Index: /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 31402)
@@ -693,5 +693,7 @@
                         STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[iOrgTrap], o);
 
-                    CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], eflags.u32, ss_r0, (RTRCPTR)esp_r0);
+                    PGMRZDynMapReleaseAutoSet(pVCpu);
+                    CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate],
+                                               eflags.u32, ss_r0, (RTRCPTR)esp_r0);
                     /* does not return */
 #else
Index: /trunk/src/VBox/VMM/VMMGC/PGMGC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMGC/PGMGC.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMGC/PGMGC.cpp	(revision 31402)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
Index: /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp	(revision 31402)
@@ -46,4 +46,5 @@
 #include <iprt/assert.h>
 
+
 /*******************************************************************************
 *   Defined Constants And Macros                                               *
@@ -130,4 +131,6 @@
  * @param   rc          The VBox status code to return.
  * @param   pRegFrame   Pointer to the register frame for the trap.
+ *
+ * @remarks This must not be used for hypervisor traps, only guest traps.
  */
 static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame)
@@ -231,9 +234,13 @@
          */
         else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
+        {
 #if 1
+            PGMRZDynMapReleaseAutoSet(pVCpu);
+            PGMRZDynMapStartAutoSet(pVCpu);
             rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
 #else
             rc = VINF_PGM_SYNC_CR3;
 #endif
+        }
         /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
         else if (   VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
@@ -246,4 +253,5 @@
                     && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM))
               , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss, pRegFrame->eflags.Bits.u2IOPL));
+    PGMRZDynMapReleaseAutoSet(pVCpu);
     return rc;
 }
@@ -270,14 +278,11 @@
 
     /*
-     * We currently don't make sure of the X86_DR7_GD bit, but
+     * We currently don't make use of the X86_DR7_GD bit, but
      * there might come a time when we do.
      */
-    if ((uDr6 & X86_DR6_BD) == X86_DR6_BD)
-    {
-        AssertReleaseMsgFailed(("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
-                                ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6));
-        return VERR_NOT_IMPLEMENTED;
-    }
-
+    AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
+                           ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
+                            ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
+                           VERR_NOT_IMPLEMENTED);
     AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
 
@@ -285,4 +290,5 @@
      * Now leave the rest to the DBGF.
      */
+    PGMRZDynMapStartAutoSet(pVCpu);
     int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6);
     if (rc == VINF_EM_RAW_GUEST_TRAP)
@@ -296,4 +302,48 @@
 
 /**
+ * \#DB (Debug event) handler for the hypervisor code.
+ *
+ * This is mostly the same as TRPMGCTrap01Handler, but we skip the PGM auto
+ * mapping set as well as the default trap exit path since they are both really
+ * bad ideas in this context.
+ *
+ * @returns VBox status code.
+ *          VINF_SUCCESS means we completely handled this trap,
+ *          other codes are passed execution to host context.
+ *
+ * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
+ * @param   pRegFrame   Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+    RTGCUINTREG uDr6  = ASMGetAndClearDR6();
+    PVM         pVM   = TRPMCPU_2_VM(pTrpmCpu);
+    PVMCPU      pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+
+    LogFlow(("TRPMGCHyper01: cs:eip=%04x:%08x uDr6=%RTreg\n", pRegFrame->cs, pRegFrame->eip, uDr6));
+
+    /*
+     * We currently don't make use of the X86_DR7_GD bit, but
+     * there might come a time when we do.
+     */
+    AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
+                           ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
+                            ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
+                           VERR_NOT_IMPLEMENTED);
+    AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
+
+    /*
+     * Now leave the rest to the DBGF.
+     */
+    int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6);
+    AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3);
+
+    Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs, pRegFrame->eip, uDr6));
+    return rc;
+}
+
+
+/**
  * NMI handler, for when we are using NMIs to debug things.
  *
@@ -311,4 +361,27 @@
     LogFlow(("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
     RTLogComPrintf("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip);
+    return VERR_TRPM_DONT_PANIC;
+}
+
+
+/**
+ * NMI handler, for when we are using NMIs to debug things.
+ *
+ * This is the handler we're most likely to hit when the NMI fires (it is
+ * unlikely that we'll be stuck in guest code).
+ *
+ * @returns VBox status code.
+ *          VINF_SUCCESS means we completely handled this trap,
+ *          other codes are passed execution to host context.
+ *
+ * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
+ * @param   pRegFrame   Pointer to the register frame for the trap.
+ * @internal
+ * @remark  This is not hooked up unless you're building with VBOX_WITH_NMI defined.
+ */
+DECLASM(int) TRPMGCHyperTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+    LogFlow(("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
+    RTLogComPrintf("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip);
     return VERR_TRPM_DONT_PANIC;
 }
@@ -332,7 +405,8 @@
     PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     int     rc;
-
-    /*
-     * Both PATM are using INT3s, let them have a go first.
+    PGMRZDynMapStartAutoSet(pVCpu);
+
+    /*
+     * PATM is using INT3s, let them have a go first.
      */
     if (    (pRegFrame->ss & X86_SEL_RPL) == 1
@@ -357,4 +431,35 @@
 
 /**
+ * \#BP (Breakpoint) handler.
+ *
+ * This is similar to TRPMGCTrap03Handler but we bits which are potentially
+ * harmful to us (common trap exit and the auto mapping set).
+ *
+ * @returns VBox status code.
+ *          VINF_SUCCESS means we completely handled this trap,
+ *          other codes are passed execution to host context.
+ *
+ * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
+ * @param   pRegFrame   Pointer to the register frame for the trap.
+ * @internal
+ */
+DECLASM(int) TRPMGCHyperTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
+{
+    LogFlow(("TRPMGCHyper03: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
+    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
+    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+
+    /*
+     * Hand it over to DBGF.
+     */
+    int rc = DBGFRZTrap03Handler(pVM, pVCpu, pRegFrame);
+    AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3);
+
+    Log6(("TRPMGCHyper03: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs, pRegFrame->eip));
+    return rc;
+}
+
+
+/**
  * Trap handler for illegal opcode fault (\#UD).
  *
@@ -373,4 +478,5 @@
     PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     int     rc;
+    PGMRZDynMapStartAutoSet(pVCpu);
 
     if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
@@ -402,8 +508,10 @@
         /*
          * UD2 in a patch?
+         * Note! PATMGCHandleIllegalInstrTrap doesn't always return.
          */
         if (    Cpu.pCurInstr->opcode == OP_ILLUD2
             &&  PATMIsPatchGCAddr(pVM, pRegFrame->eip))
         {
+            LogFlow(("TRPMGCTrap06Handler: -> PATMGCHandleIllegalInstrTrap\n"));
             rc = PATMGCHandleIllegalInstrTrap(pVM, pRegFrame);
             /** @todo  These tests are completely unnecessary, should just follow the
@@ -439,4 +547,5 @@
         else if (Cpu.pCurInstr->opcode == OP_MONITOR)
         {
+            LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n"));
             uint32_t cbIgnored;
             rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, pRegFrame, PC, &cbIgnored);
@@ -446,8 +555,12 @@
         /* Never generate a raw trap here; it might be an instruction, that requires emulation. */
         else
+        {
+            LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n"));
             rc = VINF_EM_RAW_EMULATE_INSTR;
+        }
     }
     else
     {
+        LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n"));
         rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
         Assert(rc == VINF_EM_RAW_GUEST_TRAP);
@@ -478,4 +591,5 @@
     PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
     PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+    PGMRZDynMapStartAutoSet(pVCpu);
 
     int rc = CPUMHandleLazyFPU(pVCpu);
@@ -500,5 +614,7 @@
 {
     LogFlow(("TRPMGC0b: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
-    PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
+    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
+    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
+    PGMRZDynMapStartAutoSet(pVCpu);
 
     /*
@@ -574,4 +690,5 @@
             pTrpmCpu->uActiveVector = ~0;
             Log6(("TRPMGC0b: %Rrc (%04x:%08x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs, pRegFrame->eip));
+            PGMRZDynMapReleaseAutoSet(pVCpu);
             return VINF_EM_RAW_RING_SWITCH;
         }
@@ -582,4 +699,5 @@
      */
     Log6(("TRPMGC0b: %Rrc (%04x:%08x)\n", VINF_EM_RAW_GUEST_TRAP, pRegFrame->cs, pRegFrame->eip));
+    PGMRZDynMapReleaseAutoSet(pVCpu);
     return VINF_EM_RAW_GUEST_TRAP;
 }
@@ -933,4 +1051,5 @@
     LogFlow(("TRPMGC0d: %04x:%08x err=%x\n", pRegFrame->cs, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode));
 
+    PGMRZDynMapStartAutoSet(pVCpu);
     int rc = trpmGCTrap0dHandler(pVM, pTrpmCpu, pRegFrame);
     switch (rc)
@@ -994,4 +1113,5 @@
      * This is all PGM stuff.
      */
+    PGMRZDynMapStartAutoSet(pVCpu);
     int rc = PGMTrap0eHandler(pVCpu, pVCpu->trpm.s.uActiveErrorCode, pRegFrame, (RTGCPTR)pVCpu->trpm.s.uActiveCR2);
     switch (rc)
@@ -1009,5 +1129,8 @@
         case VINF_EM_RAW_GUEST_TRAP:
             if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
+            {
+                PGMRZDynMapReleaseAutoSet(pVCpu);
                 return VINF_PATM_PATCH_TRAP_PF;
+            }
 
             rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xE, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xe);
Index: /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm
===================================================================
--- /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm	(revision 31402)
@@ -4,5 +4,5 @@
 ;
 
-; Copyright (C) 2006-2007 Oracle Corporation
+; Copyright (C) 2006-2010 Oracle Corporation
 ;
 ; This file is part of VirtualBox Open Source Edition (OSE), as
@@ -34,22 +34,25 @@
 extern IMPNAME(g_TRPM)                  ; where there is a pointer to the real symbol. PE imports
 extern IMPNAME(g_TRPMCPU)               ; are a bit confusing at first... :-)
-extern IMPNAME(g_VM)                    
+extern IMPNAME(g_VM)
 extern NAME(CPUMGCRestoreInt)
 extern NAME(cpumHandleLazyFPUAsm)
 extern NAME(CPUMHyperSetCtxCore)
 extern NAME(trpmGCTrapInGeneric)
-extern NAME(TRPMGCHyperTrap0bHandler)
-extern NAME(TRPMGCHyperTrap0dHandler)
-extern NAME(TRPMGCHyperTrap0eHandler)
 extern NAME(TRPMGCTrap01Handler)
+extern NAME(TRPMGCHyperTrap01Handler)
 %ifdef VBOX_WITH_NMI
 extern NAME(TRPMGCTrap02Handler)
+extern NAME(TRPMGCHyperTrap02Handler)
 %endif
 extern NAME(TRPMGCTrap03Handler)
+extern NAME(TRPMGCHyperTrap03Handler)
 extern NAME(TRPMGCTrap06Handler)
+extern NAME(TRPMGCTrap07Handler)
 extern NAME(TRPMGCTrap0bHandler)
+extern NAME(TRPMGCHyperTrap0bHandler)
 extern NAME(TRPMGCTrap0dHandler)
+extern NAME(TRPMGCHyperTrap0dHandler)
 extern NAME(TRPMGCTrap0eHandler)
-extern NAME(TRPMGCTrap07Handler)
+extern NAME(TRPMGCHyperTrap0eHandler)
 
 ;; IMPORTANT all COM_ functions trashes esi, some edi and the LOOP_SHORT_WHILE kills ecx.
@@ -71,11 +74,11 @@
                                         ; =============================================================
     dd 0                                ;  0 - #DE - F   - N - Divide error
-    dd NAME(TRPMGCTrap01Handler)        ;  1 - #DB - F/T - N - Single step, INT 1 instruction
+    dd NAME(TRPMGCHyperTrap01Handler)   ;  1 - #DB - F/T - N - Single step, INT 1 instruction
 %ifdef VBOX_WITH_NMI
-    dd NAME(TRPMGCTrap02Handler)        ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
+    dd NAME(TRPMGCHyperTrap02Handler)   ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
 %else
     dd 0                                ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
 %endif
-    dd NAME(TRPMGCTrap03Handler)        ;  3 - #BP - T   - N - Breakpoint, INT 3 instruction.
+    dd NAME(TRPMGCHyperTrap03Handler)   ;  3 - #BP - T   - N - Breakpoint, INT 3 instruction.
     dd 0                                ;  4 - #OF - T   - N - Overflow, INTO instruction.
     dd 0                                ;  5 - #BR - F   - N - BOUND Range Exceeded, BOUND instruction.
@@ -271,5 +274,5 @@
     mov     [esp + CPUMCTXCORE.eflags], eax
 
-%if GC_ARCH_BITS == 64    
+%if GC_ARCH_BITS == 64
     ; zero out the high dwords
     mov     dword [esp + CPUMCTXCORE.eax + 4], 0
@@ -775,5 +778,5 @@
     mov     [esp + CPUMCTXCORE.ss], eax
 
-%if GC_ARCH_BITS == 64    
+%if GC_ARCH_BITS == 64
     ; zero out the high dwords
     mov     dword [esp + CPUMCTXCORE.eax + 4], 0
Index: /trunk/src/VBox/VMM/VMMGC/VMMGC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMGC/VMMGC.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMGC/VMMGC.cpp	(revision 31402)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -91,4 +91,6 @@
             AssertRCReturn(rc, rc);
 
+            rc = PGMRCDynMapInit(pVM);
+            AssertRCReturn(rc, rc);
             return VINF_SUCCESS;
         }
Index: /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 31402)
@@ -1094,5 +1094,5 @@
 
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
-    bool fStartedSet = PGMDynMapStartOrMigrateAutoSet(pVCpu);
+    bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
 #endif
 
@@ -1107,5 +1107,5 @@
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     if (fStartedSet)
-        PGMDynMapReleaseAutoSet(pVCpu);
+        PGMRZDynMapReleaseAutoSet(pVCpu);
 #endif
 
@@ -1209,5 +1209,5 @@
 
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
-    PGMDynMapStartAutoSet(pVCpu);
+    PGMRZDynMapStartAutoSet(pVCpu);
 #endif
 
@@ -1217,5 +1217,5 @@
 
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
-    PGMDynMapReleaseAutoSet(pVCpu);
+    PGMRZDynMapReleaseAutoSet(pVCpu);
 #endif
     return rc;
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 31402)
@@ -2565,5 +2565,5 @@
 #endif
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PGMDynMapFlushAutoSet(pVCpu);
+    PGMRZDynMapFlushAutoSet(pVCpu);
 #endif
 
Index: unk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp	(revision 31401)
+++ 	(revision )
@@ -1,2236 +1,0 @@
-/* $Id$ */
-/** @file
- * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
- */
-
-/*
- * Copyright (C) 2008 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- */
-
-/*******************************************************************************
-*   Internal Functions                                                         *
-*******************************************************************************/
-#define LOG_GROUP LOG_GROUP_PGM
-#include <VBox/pgm.h>
-#include "../PGMInternal.h"
-#include <VBox/vm.h>
-#include "../PGMInline.h"
-#include <VBox/sup.h>
-#include <VBox/err.h>
-#include <iprt/asm.h>
-#include <iprt/asm-amd64-x86.h>
-#include <iprt/alloc.h>
-#include <iprt/assert.h>
-#include <iprt/cpuset.h>
-#include <iprt/memobj.h>
-#include <iprt/mp.h>
-#include <iprt/semaphore.h>
-#include <iprt/spinlock.h>
-#include <iprt/string.h>
-
-
-/*******************************************************************************
-*   Defined Constants And Macros                                               *
-*******************************************************************************/
-/** The max size of the mapping cache (in pages). */
-#define PGMR0DYNMAP_MAX_PAGES               ((16*_1M) >> PAGE_SHIFT)
-/** The small segment size that is adopted on out-of-memory conditions with a
- * single big segment. */
-#define PGMR0DYNMAP_SMALL_SEG_PAGES         128
-/** The number of pages we reserve per CPU. */
-#define PGMR0DYNMAP_PAGES_PER_CPU           256
-/** The minimum number of pages we reserve per CPU.
- * This must be equal or larger than the autoset size.  */
-#define PGMR0DYNMAP_PAGES_PER_CPU_MIN       64
-/** The number of guard pages.
- * @remarks Never do tuning of the hashing or whatnot with a strict build!  */
-#if defined(VBOX_STRICT)
-# define PGMR0DYNMAP_GUARD_PAGES            1
-#else
-# define PGMR0DYNMAP_GUARD_PAGES            0
-#endif
-/** The dummy physical address of guard pages. */
-#define PGMR0DYNMAP_GUARD_PAGE_HCPHYS       UINT32_C(0x7777feed)
-/** The dummy reference count of guard pages. (Must be non-zero.) */
-#define PGMR0DYNMAP_GUARD_PAGE_REF_COUNT    INT32_C(0x7777feed)
-#if 0
-/** Define this to just clear the present bit on guard pages.
- * The alternative is to replace the entire PTE with an bad not-present
- * PTE. Either way, XNU will screw us. :-/   */
-#define PGMR0DYNMAP_GUARD_NP
-#endif
-/** The dummy PTE value for a page. */
-#define PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE   X86_PTE_PG_MASK
-/** The dummy PTE value for a page. */
-#define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE      UINT64_MAX /*X86_PTE_PAE_PG_MASK*/
-/** Calcs the overload threshold. Current set at 50%. */
-#define PGMR0DYNMAP_CALC_OVERLOAD(cPages)   ((cPages) / 2)
-
-#if 0
-/* Assertions causes panics if preemption is disabled, this can be used to work around that. */
-//#define RTSpinlockAcquire(a,b) do {} while (0)
-//#define RTSpinlockRelease(a,b) do {} while (0)
-#endif
-
-/** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */
-#define PGMR0DYNMAP_2_VMCPU(pSet)           (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
-
-/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
-#define PGMR0DYNMAP_2_VM(pSet)              (PGMR0DYNMAP_2_VMCPU(pSet)->CTX_SUFF(pVM))
-
-
-/*******************************************************************************
-*   Structures and Typedefs                                                    *
-*******************************************************************************/
-/**
- * Ring-0 dynamic mapping cache segment.
- *
- * The dynamic mapping cache can be extended with additional segments if the
- * load is found to be too high.  This done the next time a VM is created, under
- * the protection of the init mutex.  The arrays is reallocated and the new
- * segment is added to the end of these.  Nothing is rehashed of course, as the
- * indexes / addresses must remain unchanged.
- *
- * This structure is only modified while owning the init mutex or during module
- * init / term.
- */
-typedef struct PGMR0DYNMAPSEG
-{
-    /** Pointer to the next segment. */
-    struct PGMR0DYNMAPSEG      *pNext;
-    /** The memory object for the virtual address range that we're abusing. */
-    RTR0MEMOBJ                  hMemObj;
-    /** The start page in the cache. (I.e. index into the arrays.) */
-    uint16_t                    iPage;
-    /** The number of pages this segment contributes. */
-    uint16_t                    cPages;
-    /** The number of page tables. */
-    uint16_t                    cPTs;
-    /** The memory objects for the page tables. */
-    RTR0MEMOBJ                  ahMemObjPTs[1];
-} PGMR0DYNMAPSEG;
-/** Pointer to a ring-0 dynamic mapping cache segment. */
-typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
-
-
-/**
- * Ring-0 dynamic mapping cache entry.
- *
- * This structure tracks
- */
-typedef struct PGMR0DYNMAPENTRY
-{
-    /** The physical address of the currently mapped page.
-     * This is duplicate for three reasons: cache locality, cache policy of the PT
-     * mappings and sanity checks.   */
-    RTHCPHYS                    HCPhys;
-    /** Pointer to the page. */
-    void                       *pvPage;
-    /** The number of references. */
-    int32_t volatile            cRefs;
-    /** PTE pointer union. */
-    union PGMR0DYNMAPENTRY_PPTE
-    {
-        /** PTE pointer, 32-bit legacy version. */
-        PX86PTE                 pLegacy;
-        /** PTE pointer, PAE version. */
-        PX86PTEPAE              pPae;
-        /** PTE pointer, the void version. */
-        void                   *pv;
-    } uPte;
-    /** CPUs that haven't invalidated this entry after it's last update. */
-    RTCPUSET                    PendingSet;
-} PGMR0DYNMAPENTRY;
-/** Pointer to a ring-0 dynamic mapping cache entry. */
-typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
-
-
-/**
- * Ring-0 dynamic mapping cache.
- *
- * This is initialized during VMMR0 module init but no segments are allocated at
- * that time.  Segments will be added when the first VM is started and removed
- * again when the last VM shuts down, thus avoid consuming memory while dormant.
- * At module termination, the remaining bits will be freed up.
- */
-typedef struct PGMR0DYNMAP
-{
-    /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
-    uint32_t                    u32Magic;
-    /** Spinlock serializing the normal operation of the cache. */
-    RTSPINLOCK                  hSpinlock;
-    /** Array for tracking and managing the pages.  */
-    PPGMR0DYNMAPENTRY           paPages;
-    /** The cache size given as a number of pages. */
-    uint32_t                    cPages;
-    /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
-    bool                        fLegacyMode;
-    /** The current load.
-     * This does not include guard pages. */
-    uint32_t                    cLoad;
-    /** The max load ever.
-     * This is maintained to get trigger adding of more mapping space. */
-    uint32_t                    cMaxLoad;
-    /** Initialization / termination lock. */
-    RTSEMFASTMUTEX              hInitLock;
-    /** The number of guard pages. */
-    uint32_t                    cGuardPages;
-    /** The number of users (protected by hInitLock). */
-    uint32_t                    cUsers;
-    /** Array containing a copy of the original page tables.
-     * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
-    void                       *pvSavedPTEs;
-    /** List of segments. */
-    PPGMR0DYNMAPSEG             pSegHead;
-    /** The paging mode. */
-    SUPPAGINGMODE               enmPgMode;
-} PGMR0DYNMAP;
-/** Pointer to the ring-0 dynamic mapping cache */
-typedef PGMR0DYNMAP *PPGMR0DYNMAP;
-
-/** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
-#define PGMR0DYNMAP_MAGIC       0x19640201
-
-
-/**
- * Paging level data.
- */
-typedef struct PGMR0DYNMAPPGLVL
-{
-    uint32_t            cLevels;    /**< The number of levels. */
-    struct
-    {
-        RTHCPHYS        HCPhys;     /**< The address of the page for the current level,
-                                     *  i.e. what hMemObj/hMapObj is currently mapping. */
-        RTHCPHYS        fPhysMask;  /**< Mask for extracting HCPhys from uEntry. */
-        RTR0MEMOBJ      hMemObj;    /**< Memory object for HCPhys, PAGE_SIZE. */
-        RTR0MEMOBJ      hMapObj;    /**< Mapping object for hMemObj. */
-        uint32_t        fPtrShift;  /**< The pointer shift count. */
-        uint64_t        fPtrMask;   /**< The mask to apply to the shifted pointer to get the table index. */
-        uint64_t        fAndMask;   /**< And mask to check entry flags. */
-        uint64_t        fResMask;   /**< The result from applying fAndMask. */
-        union
-        {
-            void        *pv;        /**< hMapObj address. */
-            PX86PGUINT   paLegacy;  /**< Legacy table view. */
-            PX86PGPAEUINT paPae;    /**< PAE/AMD64 table view. */
-        } u;
-    } a[4];
-} PGMR0DYNMAPPGLVL;
-/** Pointer to paging level data. */
-typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
-
-
-/*******************************************************************************
-*   Global Variables                                                           *
-*******************************************************************************/
-/** Pointer to the ring-0 dynamic mapping cache. */
-static PPGMR0DYNMAP g_pPGMR0DynMap;
-/** For overflow testing. */
-static bool         g_fPGMR0DynMapTestRunning = false;
-
-
-/*******************************************************************************
-*   Internal Functions                                                         *
-*******************************************************************************/
-static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
-static int  pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
-static int  pgmR0DynMapExpand(PPGMR0DYNMAP pThis);
-static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
-#if 0 /*def DEBUG*/
-static int  pgmR0DynMapTest(PVM pVM);
-#endif
-
-
-/**
- * Initializes the ring-0 dynamic mapping cache.
- *
- * @returns VBox status code.
- */
-VMMR0DECL(int) PGMR0DynMapInit(void)
-{
-    Assert(!g_pPGMR0DynMap);
-
-    /*
-     * Create and initialize the cache instance.
-     */
-    PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
-    AssertLogRelReturn(pThis, VERR_NO_MEMORY);
-    int             rc = VINF_SUCCESS;
-    pThis->enmPgMode = SUPR0GetPagingMode();
-    switch (pThis->enmPgMode)
-    {
-        case SUPPAGINGMODE_32_BIT:
-        case SUPPAGINGMODE_32_BIT_GLOBAL:
-            pThis->fLegacyMode = false;
-            break;
-        case SUPPAGINGMODE_PAE:
-        case SUPPAGINGMODE_PAE_GLOBAL:
-        case SUPPAGINGMODE_PAE_NX:
-        case SUPPAGINGMODE_PAE_GLOBAL_NX:
-        case SUPPAGINGMODE_AMD64:
-        case SUPPAGINGMODE_AMD64_GLOBAL:
-        case SUPPAGINGMODE_AMD64_NX:
-        case SUPPAGINGMODE_AMD64_GLOBAL_NX:
-            pThis->fLegacyMode = false;
-            break;
-        default:
-            rc = VERR_INTERNAL_ERROR;
-            break;
-    }
-    if (RT_SUCCESS(rc))
-    {
-        rc = RTSemFastMutexCreate(&pThis->hInitLock);
-        if (RT_SUCCESS(rc))
-        {
-            rc = RTSpinlockCreate(&pThis->hSpinlock);
-            if (RT_SUCCESS(rc))
-            {
-                pThis->u32Magic = PGMR0DYNMAP_MAGIC;
-                g_pPGMR0DynMap = pThis;
-                return VINF_SUCCESS;
-            }
-            RTSemFastMutexDestroy(pThis->hInitLock);
-        }
-    }
-    RTMemFree(pThis);
-    return rc;
-}
-
-
-/**
- * Terminates the ring-0 dynamic mapping cache.
- */
-VMMR0DECL(void) PGMR0DynMapTerm(void)
-{
-    /*
-     * Destroy the cache.
-     *
-     * There is not supposed to be any races here, the loader should
-     * make sure about that. So, don't bother locking anything.
-     *
-     * The VM objects should all be destroyed by now, so there is no
-     * dangling users or anything like that to clean up. This routine
-     * is just a mirror image of PGMR0DynMapInit.
-     */
-    PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
-    if (pThis)
-    {
-        AssertPtr(pThis);
-        g_pPGMR0DynMap = NULL;
-
-        /* This should *never* happen, but in case it does try not to leak memory. */
-        AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->pvSavedPTEs && !pThis->cPages,
-                        ("cUsers=%d paPages=%p pvSavedPTEs=%p cPages=%#x\n",
-                         pThis->cUsers, pThis->paPages, pThis->pvSavedPTEs, pThis->cPages));
-        if (pThis->paPages)
-            pgmR0DynMapTearDown(pThis);
-
-        /* Free the associated resources. */
-        RTSemFastMutexDestroy(pThis->hInitLock);
-        pThis->hInitLock = NIL_RTSEMFASTMUTEX;
-        RTSpinlockDestroy(pThis->hSpinlock);
-        pThis->hSpinlock = NIL_RTSPINLOCK;
-        pThis->u32Magic = UINT32_MAX;
-        RTMemFree(pThis);
-    }
-}
-
-
-/**
- * Initializes the dynamic mapping cache for a new VM.
- *
- * @returns VBox status code.
- * @param   pVM         Pointer to the shared VM structure.
- */
-VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
-{
-    AssertMsgReturn(!pVM->pgm.s.pvR0DynMapUsed, ("%p (pThis=%p)\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap), VERR_WRONG_ORDER);
-
-    /*
-     * Initialize the auto sets.
-     */
-    VMCPUID idCpu = pVM->cCpus;
-    AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
-    while (idCpu-- > 0)
-    {
-        PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
-        uint32_t j = RT_ELEMENTS(pSet->aEntries);
-        while (j-- > 0)
-        {
-            pSet->aEntries[j].iPage  = UINT16_MAX;
-            pSet->aEntries[j].cRefs  = 0;
-            pSet->aEntries[j].pvPage = NULL;
-            pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
-        }
-        pSet->cEntries = PGMMAPSET_CLOSED;
-        pSet->iSubset = UINT32_MAX;
-        pSet->iCpu = -1;
-        memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
-    }
-
-    /*
-     * Do we need the cache? Skip the last bit if we don't.
-     */
-    if (!VMMIsHwVirtExtForced(pVM))
-        return VINF_SUCCESS;
-
-    /*
-     * Reference and if necessary setup or expand the cache.
-     */
-    PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
-    AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
-    int rc = RTSemFastMutexRequest(pThis->hInitLock);
-    AssertLogRelRCReturn(rc, rc);
-
-    pThis->cUsers++;
-    if (pThis->cUsers == 1)
-    {
-        rc = pgmR0DynMapSetup(pThis);
-#if 0 /*def DEBUG*/
-        if (RT_SUCCESS(rc))
-        {
-            rc = pgmR0DynMapTest(pVM);
-            if (RT_FAILURE(rc))
-                pgmR0DynMapTearDown(pThis);
-        }
-#endif
-    }
-    else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages - pThis->cGuardPages))
-        rc = pgmR0DynMapExpand(pThis);
-    if (RT_SUCCESS(rc))
-        pVM->pgm.s.pvR0DynMapUsed = pThis;
-    else
-        pThis->cUsers--;
-
-    RTSemFastMutexRelease(pThis->hInitLock);
-    return rc;
-}
-
-
-/**
- * Terminates the dynamic mapping cache usage for a VM.
- *
- * @param   pVM         Pointer to the shared VM structure.
- */
-VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM)
-{
-    /*
-     * Return immediately if we're not using the cache.
-     */
-    if (!pVM->pgm.s.pvR0DynMapUsed)
-        return;
-
-    PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
-    AssertPtrReturnVoid(pThis);
-
-    int rc = RTSemFastMutexRequest(pThis->hInitLock);
-    AssertLogRelRCReturnVoid(rc);
-
-    if (pVM->pgm.s.pvR0DynMapUsed == pThis)
-    {
-        pVM->pgm.s.pvR0DynMapUsed = NULL;
-
-#ifdef VBOX_STRICT
-        PGMR0DynMapAssertIntegrity();
-#endif
-
-        /*
-         * Clean up and check the auto sets.
-         */
-        VMCPUID idCpu = pVM->cCpus;
-        while (idCpu-- > 0)
-        {
-            PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
-            uint32_t j = pSet->cEntries;
-            if (j <= RT_ELEMENTS(pSet->aEntries))
-            {
-                /*
-                 * The set is open, close it.
-                 */
-                while (j-- > 0)
-                {
-                    int32_t  cRefs = pSet->aEntries[j].cRefs;
-                    uint32_t iPage = pSet->aEntries[j].iPage;
-                    LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
-                    if (iPage < pThis->cPages && cRefs > 0)
-                        pgmR0DynMapReleasePage(pThis, iPage, cRefs);
-                    else
-                        AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
-
-                    pSet->aEntries[j].iPage  = UINT16_MAX;
-                    pSet->aEntries[j].cRefs  = 0;
-                    pSet->aEntries[j].pvPage = NULL;
-                    pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
-                }
-                pSet->cEntries = PGMMAPSET_CLOSED;
-                pSet->iSubset = UINT32_MAX;
-                pSet->iCpu = -1;
-            }
-            else
-                AssertMsg(j == PGMMAPSET_CLOSED, ("cEntries=%#x\n", j));
-
-            j = RT_ELEMENTS(pSet->aEntries);
-            while (j-- > 0)
-            {
-                Assert(pSet->aEntries[j].iPage == UINT16_MAX);
-                Assert(!pSet->aEntries[j].cRefs);
-            }
-        }
-
-        /*
-         * Release our reference to the mapping cache.
-         */
-        Assert(pThis->cUsers > 0);
-        pThis->cUsers--;
-        if (!pThis->cUsers)
-            pgmR0DynMapTearDown(pThis);
-    }
-    else
-        AssertLogRelMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));
-
-    RTSemFastMutexRelease(pThis->hInitLock);
-}
-
-
-/**
- * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.
- *
- * @param   idCpu           The current CPU.
- * @param   pvUser1         The dynamic mapping cache instance.
- * @param   pvUser2         Unused, NULL.
- */
-static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2)
-{
-    Assert(!pvUser2);
-    PPGMR0DYNMAP        pThis   = (PPGMR0DYNMAP)pvUser1;
-    Assert(pThis == g_pPGMR0DynMap);
-    PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
-    uint32_t            iPage   = pThis->cPages;
-    while (iPage-- > 0)
-        ASMInvalidatePage(paPages[iPage].pvPage);
-}
-
-
-/**
- * Shoot down the TLBs for every single cache entry on all CPUs.
- *
- * @returns IPRT status code (RTMpOnAll).
- * @param   pThis       The dynamic mapping cache instance.
- */
-static int pgmR0DynMapTlbShootDown(PPGMR0DYNMAP pThis)
-{
-    int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
-    AssertRC(rc);
-    if (RT_FAILURE(rc))
-    {
-        uint32_t iPage = pThis->cPages;
-        while (iPage-- > 0)
-            ASMInvalidatePage(pThis->paPages[iPage].pvPage);
-    }
-    return rc;
-}
-
-
-/**
- * Calculate the new cache size based on cMaxLoad statistics.
- *
- * @returns Number of pages.
- * @param   pThis       The dynamic mapping cache instance.
- * @param   pcMinPages  The minimal size in pages.
- */
-static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis, uint32_t *pcMinPages)
-{
-    Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES);
-
-    /* cCpus * PGMR0DYNMAP_PAGES_PER_CPU(_MIN). */
-    RTCPUID     cCpus     = RTMpGetCount();
-    AssertReturn(cCpus > 0 && cCpus <= RTCPUSET_MAX_CPUS, 0);
-    uint32_t    cPages    = cCpus * PGMR0DYNMAP_PAGES_PER_CPU;
-    uint32_t    cMinPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU_MIN;
-
-    /* adjust against cMaxLoad. */
-    AssertMsg(pThis->cMaxLoad <= PGMR0DYNMAP_MAX_PAGES, ("%#x\n", pThis->cMaxLoad));
-    if (pThis->cMaxLoad > PGMR0DYNMAP_MAX_PAGES)
-        pThis->cMaxLoad = 0;
-
-    while (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(cPages))
-        cPages += PGMR0DYNMAP_PAGES_PER_CPU;
-
-    if (pThis->cMaxLoad > cMinPages)
-        cMinPages = pThis->cMaxLoad;
-
-    /* adjust against max and current size. */
-    if (cPages < pThis->cPages)
-        cPages = pThis->cPages;
-    cPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
-    if (cPages > PGMR0DYNMAP_MAX_PAGES)
-        cPages = PGMR0DYNMAP_MAX_PAGES;
-
-    if (cMinPages < pThis->cPages)
-        cMinPages = pThis->cPages;
-    cMinPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
-    if (cMinPages > PGMR0DYNMAP_MAX_PAGES)
-        cMinPages = PGMR0DYNMAP_MAX_PAGES;
-
-    Assert(cMinPages);
-    *pcMinPages = cMinPages;
-    return cPages;
-}
-
-
-/**
- * Initializes the paging level data.
- *
- * @param   pThis       The dynamic mapping cache instance.
- * @param   pPgLvl      The paging level data.
- */
-void pgmR0DynMapPagingArrayInit(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
-{
-    RTCCUINTREG     cr4 = ASMGetCR4();
-    switch (pThis->enmPgMode)
-    {
-        case SUPPAGINGMODE_32_BIT:
-        case SUPPAGINGMODE_32_BIT_GLOBAL:
-            pPgLvl->cLevels = 2;
-            pPgLvl->a[0].fPhysMask = X86_CR3_PAGE_MASK;
-            pPgLvl->a[0].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
-            pPgLvl->a[0].fResMask  = X86_PDE_P | X86_PDE_RW;
-            pPgLvl->a[0].fPtrMask  = X86_PD_MASK;
-            pPgLvl->a[0].fPtrShift = X86_PD_SHIFT;
-
-            pPgLvl->a[1].fPhysMask = X86_PDE_PG_MASK;
-            pPgLvl->a[1].fAndMask  = X86_PTE_P | X86_PTE_RW;
-            pPgLvl->a[1].fResMask  = X86_PTE_P | X86_PTE_RW;
-            pPgLvl->a[1].fPtrMask  = X86_PT_MASK;
-            pPgLvl->a[1].fPtrShift = X86_PT_SHIFT;
-            break;
-
-        case SUPPAGINGMODE_PAE:
-        case SUPPAGINGMODE_PAE_GLOBAL:
-        case SUPPAGINGMODE_PAE_NX:
-        case SUPPAGINGMODE_PAE_GLOBAL_NX:
-            pPgLvl->cLevels = 3;
-            pPgLvl->a[0].fPhysMask = X86_CR3_PAE_PAGE_MASK;
-            pPgLvl->a[0].fPtrMask  = X86_PDPT_MASK_PAE;
-            pPgLvl->a[0].fPtrShift = X86_PDPT_SHIFT;
-            pPgLvl->a[0].fAndMask  = X86_PDPE_P;
-            pPgLvl->a[0].fResMask  = X86_PDPE_P;
-
-            pPgLvl->a[1].fPhysMask = X86_PDPE_PG_MASK;
-            pPgLvl->a[1].fPtrMask  = X86_PD_PAE_MASK;
-            pPgLvl->a[1].fPtrShift = X86_PD_PAE_SHIFT;
-            pPgLvl->a[1].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
-            pPgLvl->a[1].fResMask  = X86_PDE_P | X86_PDE_RW;
-
-            pPgLvl->a[2].fPhysMask = X86_PDE_PAE_PG_MASK;
-            pPgLvl->a[2].fPtrMask  = X86_PT_PAE_MASK;
-            pPgLvl->a[2].fPtrShift = X86_PT_PAE_SHIFT;
-            pPgLvl->a[2].fAndMask  = X86_PTE_P | X86_PTE_RW;
-            pPgLvl->a[2].fResMask  = X86_PTE_P | X86_PTE_RW;
-            break;
-
-        case SUPPAGINGMODE_AMD64:
-        case SUPPAGINGMODE_AMD64_GLOBAL:
-        case SUPPAGINGMODE_AMD64_NX:
-        case SUPPAGINGMODE_AMD64_GLOBAL_NX:
-            pPgLvl->cLevels = 4;
-            pPgLvl->a[0].fPhysMask = X86_CR3_AMD64_PAGE_MASK;
-            pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT;
-            pPgLvl->a[0].fPtrMask  = X86_PML4_MASK;
-            pPgLvl->a[0].fAndMask  = X86_PML4E_P | X86_PML4E_RW;
-            pPgLvl->a[0].fResMask  = X86_PML4E_P | X86_PML4E_RW;
-
-            pPgLvl->a[1].fPhysMask = X86_PML4E_PG_MASK;
-            pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT;
-            pPgLvl->a[1].fPtrMask  = X86_PDPT_MASK_AMD64;
-            pPgLvl->a[1].fAndMask  = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */;
-            pPgLvl->a[1].fResMask  = X86_PDPE_P | X86_PDPE_RW;
-
-            pPgLvl->a[2].fPhysMask = X86_PDPE_PG_MASK;
-            pPgLvl->a[2].fPtrShift = X86_PD_PAE_SHIFT;
-            pPgLvl->a[2].fPtrMask  = X86_PD_PAE_MASK;
-            pPgLvl->a[2].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
-            pPgLvl->a[2].fResMask  = X86_PDE_P | X86_PDE_RW;
-
-            pPgLvl->a[3].fPhysMask = X86_PDE_PAE_PG_MASK;
-            pPgLvl->a[3].fPtrShift = X86_PT_PAE_SHIFT;
-            pPgLvl->a[3].fPtrMask  = X86_PT_PAE_MASK;
-            pPgLvl->a[3].fAndMask  = X86_PTE_P | X86_PTE_RW;
-            pPgLvl->a[3].fResMask  = X86_PTE_P | X86_PTE_RW;
-            break;
-
-        default:
-            AssertFailed();
-            pPgLvl->cLevels = 0;
-            break;
-    }
-
-    for (uint32_t i = 0; i < 4; i++) /* ASSUMING array size. */
-    {
-        pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
-        pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
-        pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
-        pPgLvl->a[i].u.pv = NULL;
-    }
-}
-
-
-/**
- * Maps a PTE.
- *
- * This will update the segment structure when new PTs are mapped.
- *
- * It also assumes that we (for paranoid reasons) wish to establish a mapping
- * chain from CR3 to the PT that all corresponds to the processor we're
- * currently running on, and go about this by running with interrupts disabled
- * and restarting from CR3 for every change.
- *
- * @returns VBox status code, VINF_TRY_AGAIN if we changed any mappings and had
- *          to re-enable interrupts.
- * @param   pThis       The dynamic mapping cache instance.
- * @param   pPgLvl      The paging level structure.
- * @param   pvPage      The page.
- * @param   pSeg        The segment.
- * @param   cMaxPTs     The max number of PTs expected in the segment.
- * @param   ppvPTE      Where to store the PTE address.
- */
-static int pgmR0DynMapPagingArrayMapPte(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
-                                        PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE)
-{
-    Assert(!(ASMGetFlags() & X86_EFL_IF));
-    void           *pvEntry = NULL;
-    X86PGPAEUINT    uEntry = ASMGetCR3();
-    for (uint32_t i = 0; i < pPgLvl->cLevels; i++)
-    {
-        RTHCPHYS HCPhys = uEntry & pPgLvl->a[i].fPhysMask;
-        if (pPgLvl->a[i].HCPhys != HCPhys)
-        {
-            /*
-             * Need to remap this level.
-             * The final level, the PT, will not be freed since that is what it's all about.
-             */
-            ASMIntEnable();
-            if (i + 1 == pPgLvl->cLevels)
-                AssertReturn(pSeg->cPTs < cMaxPTs, VERR_INTERNAL_ERROR);
-            else
-            {
-                int rc2 = RTR0MemObjFree(pPgLvl->a[i].hMemObj, true /* fFreeMappings */); AssertRC(rc2);
-                pPgLvl->a[i].hMemObj = pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
-            }
-
-            int rc = RTR0MemObjEnterPhys(&pPgLvl->a[i].hMemObj, HCPhys, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
-            if (RT_SUCCESS(rc))
-            {
-                rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj,
-                                         (void *)-1 /* pvFixed */, 0 /* cbAlignment */,
-                                         RTMEM_PROT_WRITE | RTMEM_PROT_READ);
-                if (RT_SUCCESS(rc))
-                {
-                    pPgLvl->a[i].u.pv   = RTR0MemObjAddress(pPgLvl->a[i].hMapObj);
-                    AssertMsg(((uintptr_t)pPgLvl->a[i].u.pv & ~(uintptr_t)PAGE_OFFSET_MASK), ("%p\n", pPgLvl->a[i].u.pv));
-                    pPgLvl->a[i].HCPhys = HCPhys;
-                    if (i + 1 == pPgLvl->cLevels)
-                        pSeg->ahMemObjPTs[pSeg->cPTs++] = pPgLvl->a[i].hMemObj;
-                    ASMIntDisable();
-                    return VINF_TRY_AGAIN;
-                }
-
-                pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
-            }
-            else
-                pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
-            pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
-            return rc;
-        }
-
-        /*
-         * The next level.
-         */
-        uint32_t iEntry = ((uint64_t)(uintptr_t)pvPage >> pPgLvl->a[i].fPtrShift) & pPgLvl->a[i].fPtrMask;
-        if (pThis->fLegacyMode)
-        {
-            pvEntry = &pPgLvl->a[i].u.paLegacy[iEntry];
-            uEntry  = pPgLvl->a[i].u.paLegacy[iEntry];
-        }
-        else
-        {
-            pvEntry = &pPgLvl->a[i].u.paPae[iEntry];
-            uEntry  = pPgLvl->a[i].u.paPae[iEntry];
-        }
-
-        if ((uEntry & pPgLvl->a[i].fAndMask) != pPgLvl->a[i].fResMask)
-        {
-            LogRel(("PGMR0DynMap: internal error - iPgLvl=%u cLevels=%u uEntry=%#llx fAnd=%#llx fRes=%#llx got=%#llx\n"
-                    "PGMR0DynMap: pv=%p pvPage=%p iEntry=%#x fLegacyMode=%RTbool\n",
-                    i, pPgLvl->cLevels, uEntry, pPgLvl->a[i].fAndMask, pPgLvl->a[i].fResMask, uEntry & pPgLvl->a[i].fAndMask,
-                    pPgLvl->a[i].u.pv, pvPage, iEntry, pThis->fLegacyMode));
-            return VERR_INTERNAL_ERROR;
-        }
-        /*Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));*/
-    }
-
-    /* made it thru without needing to remap anything. */
-    *ppvPTE = pvEntry;
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Sets up a guard page.
- *
- * @param   pThis       The dynamic mapping cache instance.
- * @param   pPage       The page.
- */
-DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)
-{
-    memset(pPage->pvPage, 0xfd, PAGE_SIZE);
-    pPage->cRefs  = PGMR0DYNMAP_GUARD_PAGE_REF_COUNT;
-    pPage->HCPhys = PGMR0DYNMAP_GUARD_PAGE_HCPHYS;
-#ifdef PGMR0DYNMAP_GUARD_NP
-    ASMAtomicBitClear(pPage->uPte.pv, X86_PTE_BIT_P);
-#else
-    if (pThis->fLegacyMode)
-        ASMAtomicWriteU32(&pPage->uPte.pLegacy->u, PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE);
-    else
-        ASMAtomicWriteU64(&pPage->uPte.pPae->u,    PGMR0DYNMAP_GUARD_PAGE_PAE_PTE);
-#endif
-    pThis->cGuardPages++;
-}
-
-
-/**
- * Adds a new segment of the specified size.
- *
- * @returns VBox status code.
- * @param   pThis       The dynamic mapping cache instance.
- * @param   cPages      The size of the new segment, give as a page count.
- */
-static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages)
-{
-    int rc2;
-    AssertReturn(ASMGetFlags() & X86_EFL_IF, VERR_PREEMPT_DISABLED);
-
-    /*
-     * Do the array reallocations first.
-     * (The pages array has to be replaced behind the spinlock of course.)
-     */
-    void *pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages));
-    if (!pvSavedPTEs)
-        return VERR_NO_MEMORY;
-    pThis->pvSavedPTEs = pvSavedPTEs;
-
-    void *pvPages = RTMemAllocZ(sizeof(pThis->paPages[0]) * (pThis->cPages + cPages));
-    if (!pvPages)
-    {
-        pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * pThis->cPages);
-        if (pvSavedPTEs)
-            pThis->pvSavedPTEs = pvSavedPTEs;
-        return VERR_NO_MEMORY;
-    }
-
-    RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
-    RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-
-    memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages);
-    void *pvToFree = pThis->paPages;
-    pThis->paPages = (PPGMR0DYNMAPENTRY)pvPages;
-
-    RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-    RTMemFree(pvToFree);
-
-    /*
-     * Allocate the segment structure and pages of memory, then touch all the pages (paranoia).
-     */
-    uint32_t cMaxPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2;
-    PPGMR0DYNMAPSEG pSeg = (PPGMR0DYNMAPSEG)RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cMaxPTs]));
-    if (!pSeg)
-        return VERR_NO_MEMORY;
-    pSeg->pNext  = NULL;
-    pSeg->cPages = cPages;
-    pSeg->iPage  = pThis->cPages;
-    pSeg->cPTs   = 0;
-    int rc = RTR0MemObjAllocPage(&pSeg->hMemObj, cPages << PAGE_SHIFT, false);
-    if (RT_SUCCESS(rc))
-    {
-        uint8_t            *pbPage = (uint8_t *)RTR0MemObjAddress(pSeg->hMemObj);
-        AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage));
-        memset(pbPage, 0xfe, cPages << PAGE_SHIFT);
-
-        /*
-         * Walk thru the pages and set them up with a mapping of their PTE and everything.
-         */
-        ASMIntDisable();
-        PGMR0DYNMAPPGLVL    PgLvl;
-        pgmR0DynMapPagingArrayInit(pThis, &PgLvl);
-        uint32_t const      iEndPage = pSeg->iPage + cPages;
-        for (uint32_t iPage = pSeg->iPage;
-             iPage < iEndPage;
-             iPage++, pbPage += PAGE_SIZE)
-        {
-            /* Initialize the page data. */
-            pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
-            pThis->paPages[iPage].pvPage = pbPage;
-            pThis->paPages[iPage].cRefs  = 0;
-            pThis->paPages[iPage].uPte.pPae = 0;
-            RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
-
-            /* Map its page table, retry until we've got a clean run (paranoia). */
-            do
-                rc = pgmR0DynMapPagingArrayMapPte(pThis, &PgLvl, pbPage, pSeg, cMaxPTs,
-                                                  &pThis->paPages[iPage].uPte.pv);
-            while (rc == VINF_TRY_AGAIN);
-            if (RT_FAILURE(rc))
-                break;
-
-            /* Save the PTE. */
-            if (pThis->fLegacyMode)
-                ((PX86PGUINT)pThis->pvSavedPTEs)[iPage]    = pThis->paPages[iPage].uPte.pLegacy->u;
-            else
-                ((PX86PGPAEUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pPae->u;
-
-#ifdef VBOX_STRICT
-            /* Check that we've got the right entry. */
-            RTHCPHYS HCPhysPage = RTR0MemObjGetPagePhysAddr(pSeg->hMemObj, iPage - pSeg->iPage);
-            RTHCPHYS HCPhysPte  = pThis->fLegacyMode
-                                ? pThis->paPages[iPage].uPte.pLegacy->u & X86_PTE_PG_MASK
-                                : pThis->paPages[iPage].uPte.pPae->u    & X86_PTE_PAE_PG_MASK;
-            if (HCPhysPage != HCPhysPte)
-            {
-                LogRel(("pgmR0DynMapAddSeg: internal error - page #%u HCPhysPage=%RHp HCPhysPte=%RHp pbPage=%p pvPte=%p\n",
-                        iPage - pSeg->iPage, HCPhysPage, HCPhysPte, pbPage, pThis->paPages[iPage].uPte.pv));
-                rc = VERR_INTERNAL_ERROR;
-                break;
-            }
-#endif
-        } /* for each page */
-        ASMIntEnable();
-
-        /* cleanup non-PT mappings */
-        for (uint32_t i = 0; i < PgLvl.cLevels - 1; i++)
-            RTR0MemObjFree(PgLvl.a[i].hMemObj, true /* fFreeMappings */);
-
-        if (RT_SUCCESS(rc))
-        {
-#if PGMR0DYNMAP_GUARD_PAGES > 0
-            /*
-             * Setup guard pages.
-             * (Note: TLBs will be shot down later on.)
-             */
-            uint32_t iPage = pSeg->iPage;
-            while (iPage < iEndPage)
-            {
-                for (uint32_t iGPg = 0; iGPg < PGMR0DYNMAP_GUARD_PAGES && iPage < iEndPage; iGPg++, iPage++)
-                    pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
-                iPage++; /* the guarded page */
-            }
-
-            /* Make sure the very last page is a guard page too. */
-            iPage = iEndPage - 1;
-            if (pThis->paPages[iPage].cRefs != PGMR0DYNMAP_GUARD_PAGE_REF_COUNT)
-                pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
-#endif /* PGMR0DYNMAP_GUARD_PAGES > 0 */
-
-            /*
-             * Commit it by adding the segment to the list and updating the page count.
-             */
-            pSeg->pNext = pThis->pSegHead;
-            pThis->pSegHead = pSeg;
-            pThis->cPages += cPages;
-            return VINF_SUCCESS;
-        }
-
-        /*
-         * Bail out.
-         */
-        while (pSeg->cPTs-- > 0)
-        {
-            rc2 = RTR0MemObjFree(pSeg->ahMemObjPTs[pSeg->cPTs], true /* fFreeMappings */);
-            AssertRC(rc2);
-            pSeg->ahMemObjPTs[pSeg->cPTs] = NIL_RTR0MEMOBJ;
-        }
-
-        rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */);
-        AssertRC(rc2);
-        pSeg->hMemObj = NIL_RTR0MEMOBJ;
-    }
-    RTMemFree(pSeg);
-
-    /* Don't bother resizing the arrays, but free them if we're the only user. */
-    if (!pThis->cPages)
-    {
-        RTMemFree(pThis->paPages);
-        pThis->paPages = NULL;
-        RTMemFree(pThis->pvSavedPTEs);
-        pThis->pvSavedPTEs = NULL;
-    }
-    return rc;
-}
-
-
-/**
- * Called by PGMR0DynMapInitVM under the init lock.
- *
- * @returns VBox status code.
- * @param   pThis       The dynamic mapping cache instance.
- */
-static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
-{
-    /*
-     * Calc the size and add a segment of that size.
-     */
-    uint32_t cMinPages;
-    uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
-    AssertReturn(cPages, VERR_INTERNAL_ERROR);
-    int rc = pgmR0DynMapAddSeg(pThis, cPages);
-    if (rc == VERR_NO_MEMORY)
-    {
-        /*
-         * Try adding smaller segments.
-         */
-        do
-            rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
-        while (RT_SUCCESS(rc) && pThis->cPages < cPages);
-        if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
-            rc = VINF_SUCCESS;
-        if (rc == VERR_NO_MEMORY)
-        {
-            if (pThis->cPages)
-                pgmR0DynMapTearDown(pThis);
-            rc = VERR_PGM_DYNMAP_SETUP_ERROR;
-        }
-    }
-    Assert(ASMGetFlags() & X86_EFL_IF);
-
-#if PGMR0DYNMAP_GUARD_PAGES > 0
-    /* paranoia */
-    if (RT_SUCCESS(rc))
-        pgmR0DynMapTlbShootDown(pThis);
-#endif
-    return rc;
-}
-
-
-/**
- * Called by PGMR0DynMapInitVM under the init lock.
- *
- * @returns VBox status code.
- * @param   pThis       The dynamic mapping cache instance.
- */
-static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis)
-{
-    /*
-     * Calc the new target size and add a segment of the appropriate size.
-     */
-    uint32_t cMinPages;
-    uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
-    AssertReturn(cPages, VERR_INTERNAL_ERROR);
-    if (pThis->cPages >= cPages)
-        return VINF_SUCCESS;
-
-    uint32_t cAdd = cPages - pThis->cPages;
-    int rc = pgmR0DynMapAddSeg(pThis, cAdd);
-    if (rc == VERR_NO_MEMORY)
-    {
-        /*
-         * Try adding smaller segments.
-         */
-        do
-            rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
-        while (RT_SUCCESS(rc) && pThis->cPages < cPages);
-        if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
-            rc = VINF_SUCCESS;
-        if (rc == VERR_NO_MEMORY)
-            rc = VERR_PGM_DYNMAP_EXPAND_ERROR;
-    }
-    Assert(ASMGetFlags() & X86_EFL_IF);
-
-#if PGMR0DYNMAP_GUARD_PAGES > 0
-    /* paranoia */
-    if (RT_SUCCESS(rc))
-        pgmR0DynMapTlbShootDown(pThis);
-#endif
-    return rc;
-}
-
-
-/**
- * Called by PGMR0DynMapTermVM under the init lock.
- *
- * @returns VBox status code.
- * @param   pThis       The dynamic mapping cache instance.
- */
-static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
-{
-    /*
-     * Restore the original page table entries
-     */
-    PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
-    uint32_t            iPage   = pThis->cPages;
-    if (pThis->fLegacyMode)
-    {
-        X86PGUINT const    *paSavedPTEs = (X86PGUINT const *)pThis->pvSavedPTEs;
-        while (iPage-- > 0)
-        {
-            X86PGUINT       uOld  = paPages[iPage].uPte.pLegacy->u;
-            X86PGUINT       uOld2 = uOld; NOREF(uOld2);
-            X86PGUINT       uNew  = paSavedPTEs[iPage];
-            while (!ASMAtomicCmpXchgExU32(&paPages[iPage].uPte.pLegacy->u, uNew, uOld, &uOld))
-                AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
-            Assert(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage]);
-        }
-    }
-    else
-    {
-        X86PGPAEUINT const *paSavedPTEs = (X86PGPAEUINT const *)pThis->pvSavedPTEs;
-        while (iPage-- > 0)
-        {
-            X86PGPAEUINT    uOld  = paPages[iPage].uPte.pPae->u;
-            X86PGPAEUINT    uOld2 = uOld; NOREF(uOld2);
-            X86PGPAEUINT    uNew  = paSavedPTEs[iPage];
-            while (!ASMAtomicCmpXchgExU64(&paPages[iPage].uPte.pPae->u, uNew, uOld, &uOld))
-                AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
-            Assert(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage]);
-        }
-    }
-
-    /*
-     * Shoot down the TLBs on all CPUs before freeing them.
-     */
-    pgmR0DynMapTlbShootDown(pThis);
-
-    /*
-     * Free the segments.
-     */
-    while (pThis->pSegHead)
-    {
-        int             rc;
-        PPGMR0DYNMAPSEG pSeg = pThis->pSegHead;
-        pThis->pSegHead = pSeg->pNext;
-
-        uint32_t iPT = pSeg->cPTs;
-        while (iPT-- > 0)
-        {
-            rc = RTR0MemObjFree(pSeg->ahMemObjPTs[iPT], true /* fFreeMappings */); AssertRC(rc);
-            pSeg->ahMemObjPTs[iPT] = NIL_RTR0MEMOBJ;
-        }
-        rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc);
-        pSeg->hMemObj   = NIL_RTR0MEMOBJ;
-        pSeg->pNext     = NULL;
-        pSeg->iPage     = UINT16_MAX;
-        pSeg->cPages    = 0;
-        pSeg->cPTs      = 0;
-        RTMemFree(pSeg);
-    }
-
-    /*
-     * Free the arrays and restore the initial state.
-     * The cLoadMax value is left behind for the next setup.
-     */
-    RTMemFree(pThis->paPages);
-    pThis->paPages = NULL;
-    RTMemFree(pThis->pvSavedPTEs);
-    pThis->pvSavedPTEs = NULL;
-    pThis->cPages = 0;
-    pThis->cLoad = 0;
-    pThis->cGuardPages = 0;
-}
-
-
-/**
- * Release references to a page, caller owns the spin lock.
- *
- * @param   pThis       The dynamic mapping cache instance.
- * @param   iPage       The page.
- * @param   cRefs       The number of references to release.
- */
-DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
-{
-    cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs;
-    AssertMsg(cRefs >= 0, ("%d\n", cRefs));
-    if (!cRefs)
-        pThis->cLoad--;
-}
-
-
-/**
- * Release references to a page, caller does not own the spin lock.
- *
- * @param   pThis       The dynamic mapping cache instance.
- * @param   iPage       The page.
- * @param   cRefs       The number of references to release.
- */
-static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
-{
-    RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
-    RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-    pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
-    RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-}
-
-
-/**
- * pgmR0DynMapPage worker that deals with the tedious bits.
- *
- * @returns The page index on success, UINT32_MAX on failure.
- * @param   pThis       The dynamic mapping cache instance.
- * @param   HCPhys      The address of the page to be mapped.
- * @param   iPage       The page index pgmR0DynMapPage hashed HCPhys to.
- * @param   pVCpu       The current CPU, for statistics.
- */
-static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu)
-{
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlow);
-
-    /*
-     * Check if any of the first 3 pages are unreferenced since the caller
-     * already has made sure they aren't matching.
-     */
-#ifdef VBOX_WITH_STATISTICS
-    bool                fLooped = false;
-#endif
-    uint32_t const      cPages  = pThis->cPages;
-    PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
-    uint32_t            iFreePage;
-    if (!paPages[iPage].cRefs)
-        iFreePage = iPage;
-    else if (!paPages[(iPage + 1) % cPages].cRefs)
-        iFreePage   = (iPage + 1) % cPages;
-    else if (!paPages[(iPage + 2) % cPages].cRefs)
-        iFreePage   = (iPage + 2) % cPages;
-    else
-    {
-        /*
-         * Search for an unused or matching entry.
-         */
-        iFreePage = (iPage + 3) % cPages;
-        for (;;)
-        {
-            if (paPages[iFreePage].HCPhys == HCPhys)
-            {
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopHits);
-                return iFreePage;
-            }
-            if (!paPages[iFreePage].cRefs)
-                break;
-
-            /* advance */
-            iFreePage = (iFreePage + 1) % cPages;
-            if (RT_UNLIKELY(iFreePage == iPage))
-                return UINT32_MAX;
-        }
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopMisses);
-#ifdef VBOX_WITH_STATISTICS
-        fLooped = true;
-#endif
-    }
-    Assert(iFreePage < cPages);
-
-#if 0 //def VBOX_WITH_STATISTICS
-    /* Check for lost hits. */
-    if (!fLooped)
-        for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages)
-            if (paPages[iPage2].HCPhys == HCPhys)
-                STAM_COUNTER_INC(&pVCpu->pgm.s.StatR0DynMapPageSlowLostHits);
-#endif
-
-    /*
-     * Setup the new entry.
-     */
-    /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
-    paPages[iFreePage].HCPhys = HCPhys;
-    RTCpuSetFill(&paPages[iFreePage].PendingSet);
-    if (pThis->fLegacyMode)
-    {
-        X86PGUINT       uOld  = paPages[iFreePage].uPte.pLegacy->u;
-        X86PGUINT       uOld2 = uOld; NOREF(uOld2);
-        X86PGUINT       uNew  = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
-                              | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
-                              | (HCPhys & X86_PTE_PG_MASK);
-        while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld))
-            AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
-        Assert(paPages[iFreePage].uPte.pLegacy->u == uNew);
-    }
-    else
-    {
-        X86PGPAEUINT    uOld  = paPages[iFreePage].uPte.pPae->u;
-        X86PGPAEUINT    uOld2 = uOld; NOREF(uOld2);
-        X86PGPAEUINT    uNew  = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
-                              | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
-                              | (HCPhys & X86_PTE_PAE_PG_MASK);
-        while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld))
-            AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
-        Assert(paPages[iFreePage].uPte.pPae->u == uNew);
-        /*Log6(("pgmR0DynMapPageSlow: #%x - %RHp %p %#llx\n", iFreePage, HCPhys, paPages[iFreePage].pvPage, uNew));*/
-    }
-    return iFreePage;
-}
-
-
-/**
- * Maps a page into the pool.
- *
- * @returns Page index on success, UINT32_MAX on failure.
- * @param   pThis       The dynamic mapping cache instance.
- * @param   HCPhys      The address of the page to be mapped.
- * @param   iRealCpu    The real cpu set index. (optimization)
- * @param   pVCpu       The current CPU (for statistics).
- * @param   ppvPage     Where to the page address.
- */
-DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
-{
-    RTSPINLOCKTMP       Tmp     = RTSPINLOCKTMP_INITIALIZER;
-    RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPage);
-
-    /*
-     * Find an entry, if possible a matching one. The HCPhys address is hashed
-     * down to a page index, collisions are handled by linear searching.
-     * Optimized for a hit in the first 3 pages.
-     *
-     * Field easy hits here and defer the tedious searching and inserting
-     * to pgmR0DynMapPageSlow().
-     */
-    uint32_t const      cPages  = pThis->cPages;
-    uint32_t            iPage   = (HCPhys >> PAGE_SHIFT) % cPages;
-    PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
-    if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys))
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits0);
-    else
-    {
-        uint32_t        iPage2 = (iPage + 1) % cPages;
-        if (RT_LIKELY(paPages[iPage2].HCPhys == HCPhys))
-        {
-            iPage = iPage2;
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits1);
-        }
-        else
-        {
-            iPage2 = (iPage + 2) % cPages;
-            if (paPages[iPage2].HCPhys == HCPhys)
-            {
-                iPage = iPage2;
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits2);
-            }
-            else
-            {
-                iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu);
-                if (RT_UNLIKELY(iPage == UINT32_MAX))
-                {
-                    RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-                    *ppvPage = NULL;
-                    return iPage;
-                }
-            }
-        }
-    }
-
-    /*
-     * Reference it, update statistics and get the return address.
-     */
-    int32_t cRefs = ASMAtomicIncS32(&paPages[iPage].cRefs);
-    if (cRefs == 1)
-    {
-        pThis->cLoad++;
-        if (pThis->cLoad > pThis->cMaxLoad)
-            pThis->cMaxLoad = pThis->cLoad;
-        AssertMsg(pThis->cLoad <= pThis->cPages - pThis->cGuardPages, ("%d/%d\n", pThis->cLoad, pThis->cPages - pThis->cGuardPages));
-    }
-    else if (RT_UNLIKELY(cRefs <= 0))
-    {
-        ASMAtomicDecS32(&paPages[iPage].cRefs);
-        RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-        *ppvPage = NULL;
-        AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX);
-    }
-    void *pvPage = paPages[iPage].pvPage;
-
-    /*
-     * Invalidate the entry?
-     */
-    bool fInvalidateIt = RTCpuSetIsMemberByIndex(&paPages[iPage].PendingSet, iRealCpu);
-    if (RT_UNLIKELY(fInvalidateIt))
-        RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu);
-
-    RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-
-    /*
-     * Do the actual invalidation outside the spinlock.
-     */
-    if (RT_UNLIKELY(fInvalidateIt))
-    {
-        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageInvlPg);
-        ASMInvalidatePage(pvPage);
-    }
-
-    *ppvPage = pvPage;
-    return iPage;
-}
-
-
-/**
- * Assert the the integrity of the pool.
- *
- * @returns VBox status code.
- */
-VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
-{
-    /*
-     * Basic pool stuff that doesn't require any lock, just assumes we're a user.
-     */
-    PPGMR0DYNMAP        pThis       = g_pPGMR0DynMap;
-    if (!pThis)
-        return VINF_SUCCESS;
-    AssertPtrReturn(pThis, VERR_INVALID_POINTER);
-    AssertReturn(pThis->u32Magic == PGMR0DYNMAP_MAGIC, VERR_INVALID_MAGIC);
-    if (!pThis->cUsers)
-        return VERR_INVALID_PARAMETER;
-
-
-    int                 rc          = VINF_SUCCESS;
-    RTSPINLOCKTMP       Tmp         = RTSPINLOCKTMP_INITIALIZER;
-    RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-
-#define CHECK_RET(expr, a) \
-    do { \
-        if (RT_UNLIKELY(!(expr))) \
-        { \
-            RTSpinlockRelease(pThis->hSpinlock, &Tmp); \
-            RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
-            RTAssertMsg2Weak a; \
-            return VERR_INTERNAL_ERROR; \
-        } \
-    } while (0)
-
-    /*
-     * Check that the PTEs are correct.
-     */
-    uint32_t            cGuard      = 0;
-    uint32_t            cLoad       = 0;
-    PPGMR0DYNMAPENTRY   paPages     = pThis->paPages;
-    uint32_t            iPage       = pThis->cPages;
-    if (pThis->fLegacyMode)
-    {
-        PCX86PGUINT     paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
-        while (iPage-- > 0)
-        {
-            CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
-            if (    paPages[iPage].cRefs  == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
-                &&  paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
-            {
-#ifdef PGMR0DYNMAP_GUARD_NP
-                CHECK_RET(paPages[iPage].uPte.pLegacy->u == (paSavedPTEs[iPage] & ~(X86PGUINT)X86_PTE_P),
-                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
-#else
-                CHECK_RET(paPages[iPage].uPte.pLegacy->u == PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE,
-                          ("#%u: %#x", iPage, paPages[iPage].uPte.pLegacy->u));
-#endif
-                cGuard++;
-            }
-            else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
-            {
-                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
-                X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
-                               | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
-                               | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
-                CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte,
-                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
-                if (paPages[iPage].cRefs)
-                    cLoad++;
-            }
-            else
-                CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage],
-                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
-        }
-    }
-    else
-    {
-        PCX86PGPAEUINT  paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
-        while (iPage-- > 0)
-        {
-            CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
-            if (    paPages[iPage].cRefs  == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
-                &&  paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
-            {
-#ifdef PGMR0DYNMAP_GUARD_NP
-                CHECK_RET(paPages[iPage].uPte.pPae->u == (paSavedPTEs[iPage] & ~(X86PGPAEUINT)X86_PTE_P),
-                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
-#else
-                CHECK_RET(paPages[iPage].uPte.pPae->u == PGMR0DYNMAP_GUARD_PAGE_PAE_PTE,
-                          ("#%u: %#llx", iPage, paPages[iPage].uPte.pPae->u));
-#endif
-                cGuard++;
-            }
-            else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
-            {
-                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
-                X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
-                                  | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
-                                  | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
-                CHECK_RET(paPages[iPage].uPte.pPae->u == uPte,
-                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
-                if (paPages[iPage].cRefs)
-                    cLoad++;
-            }
-            else
-                CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage],
-                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
-        }
-    }
-
-    CHECK_RET(cLoad == pThis->cLoad, ("%u %u\n", cLoad, pThis->cLoad));
-    CHECK_RET(cGuard == pThis->cGuardPages, ("%u %u\n", cGuard, pThis->cGuardPages));
-
-#undef CHECK_RET
-    RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Signals the start of a new set of mappings.
- *
- * Mostly for strictness. PGMDynMapHCPage won't work unless this
- * API is called.
- *
- * @param   pVCpu       The shared data for the current virtual CPU.
- */
-VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
-{
-    Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
-    Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX);
-    pVCpu->pgm.s.AutoSet.cEntries = 0;
-    pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
-}
-
-
-/**
- * Starts or migrates the autoset of a virtual CPU.
- *
- * This is used by HWACCMR0Enter.  When we've longjumped out of the HWACCM
- * execution loop with the set open, we'll migrate it when re-entering.  While
- * under normal circumstances, we'll start it so VMXR0LoadGuestState can access
- * guest memory.
- *
- * @returns @c true if started, @c false if migrated.
- * @param   pVCpu       The shared data for the current virtual CPU.
- * @thread  EMT
- */
-VMMDECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
-{
-    bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED;
-    if (fStartIt)
-        PGMDynMapStartAutoSet(pVCpu);
-    else
-        PGMDynMapMigrateAutoSet(pVCpu);
-    return fStartIt;
-}
-
-
-/**
- * Worker that performs the actual flushing of the set.
- *
- * @param   pSet        The set to flush.
- * @param   cEntries    The number of entries.
- */
-DECLINLINE(void) pgmDynMapFlushAutoSetWorker(PPGMMAPSET pSet, uint32_t cEntries)
-{
-    /*
-     * Release any pages it's referencing.
-     */
-    if (    cEntries != 0
-        &&  RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries)))
-    {
-        PPGMR0DYNMAP    pThis   = g_pPGMR0DynMap;
-        RTSPINLOCKTMP   Tmp     = RTSPINLOCKTMP_INITIALIZER;
-        RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-
-        uint32_t i = cEntries;
-        while (i-- > 0)
-        {
-            uint32_t iPage = pSet->aEntries[i].iPage;
-            Assert(iPage < pThis->cPages);
-            int32_t  cRefs = pSet->aEntries[i].cRefs;
-            Assert(cRefs > 0);
-            pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
-
-            pSet->aEntries[i].iPage = UINT16_MAX;
-            pSet->aEntries[i].cRefs = 0;
-        }
-
-        Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages);
-        RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-    }
-}
-
-
-/**
- * Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
- * since the PGMDynMapStartAutoSet call.
- *
- * @param   pVCpu       The shared data for the current virtual CPU.
- */
-VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
-{
-    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
-
-    /*
-     * Close and flush the set.
-     */
-    uint32_t    cEntries = pSet->cEntries;
-    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
-    pSet->cEntries = PGMMAPSET_CLOSED;
-    pSet->iSubset = UINT32_MAX;
-    pSet->iCpu = -1;
-
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
-    AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
-    if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100)
-        Log(("PGMDynMapReleaseAutoSet: cEntries=%d\n", pSet->cEntries));
-
-    pgmDynMapFlushAutoSetWorker(pSet, cEntries);
-}
-
-
-/**
- * Flushes the set if it's above a certain threshold.
- *
- * @param   pVCpu       The shared data for the current virtual CPU.
- */
-VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu)
-{
-    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
-    AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
-
-    /*
-     * Only flush it if it's 45% full.
-     */
-    uint32_t cEntries = pSet->cEntries;
-    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
-    if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100)
-    {
-        pSet->cEntries = 0;
-
-        AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
-        Log(("PGMDynMapFlushAutoSet: cEntries=%d\n", pSet->cEntries));
-
-        pgmDynMapFlushAutoSetWorker(pSet, cEntries);
-        AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
-    }
-}
-
-
-/**
- * Migrates the automatic mapping set of the current vCPU if it's active and
- * necessary.
- *
- * This is called when re-entering the hardware assisted execution mode after a
- * nip down to ring-3.  We run the risk that the CPU might have change and we
- * will therefore make sure all the cache entries currently in the auto set will
- * be valid on the new CPU.  If the cpu didn't change nothing will happen as all
- * the entries will have been flagged as invalidated.
- *
- * @param   pVCpu       The shared data for the current virtual CPU.
- * @thread  EMT
- */
-VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
-{
-    PPGMMAPSET      pSet     = &pVCpu->pgm.s.AutoSet;
-    int32_t         iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
-    if (pSet->iCpu != iRealCpu)
-    {
-        uint32_t    i        = pSet->cEntries;
-        if (i != PGMMAPSET_CLOSED)
-        {
-            AssertMsg(i <= RT_ELEMENTS(pSet->aEntries), ("%#x (%u)\n", i, i));
-            if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries)))
-            {
-                PPGMR0DYNMAP    pThis  = g_pPGMR0DynMap;
-                RTSPINLOCKTMP   Tmp    = RTSPINLOCKTMP_INITIALIZER;
-                RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-
-                while (i-- > 0)
-                {
-                    Assert(pSet->aEntries[i].cRefs > 0);
-                    uint32_t iPage = pSet->aEntries[i].iPage;
-                    Assert(iPage < pThis->cPages);
-                    if (RTCpuSetIsMemberByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu))
-                    {
-                        RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu);
-                        RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-
-                        ASMInvalidatePage(pThis->paPages[iPage].pvPage);
-                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapMigrateInvlPg);
-
-                        RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-                    }
-                }
-
-                RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-            }
-        }
-        pSet->iCpu = iRealCpu;
-    }
-}
-
-
-/**
- * Worker function that flushes the current subset.
- *
- * This is called when the set is popped or when the set
- * hash a too high load. As also pointed out elsewhere, the
- * whole subset thing is a hack for working around code that
- * accesses too many pages. Like PGMPool.
- *
- * @param   pSet        The set which subset to flush.
- */
-static void pgmDynMapFlushSubset(PPGMMAPSET pSet)
-{
-    uint32_t iSubset = pSet->iSubset;
-    uint32_t i       = pSet->cEntries;
-    Assert(i <= RT_ELEMENTS(pSet->aEntries));
-    if (    i > iSubset
-        &&  i <= RT_ELEMENTS(pSet->aEntries))
-    {
-        Log(("pgmDynMapFlushSubset: cEntries=%d iSubset=%d\n", pSet->cEntries, iSubset));
-        pSet->cEntries = iSubset;
-
-        PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
-        RTSPINLOCKTMP   Tmp   = RTSPINLOCKTMP_INITIALIZER;
-        RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
-
-        while (i-- > iSubset)
-        {
-            uint32_t iPage = pSet->aEntries[i].iPage;
-            Assert(iPage < pThis->cPages);
-            int32_t  cRefs = pSet->aEntries[i].cRefs;
-            Assert(cRefs > 0);
-            pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
-
-            pSet->aEntries[i].iPage = UINT16_MAX;
-            pSet->aEntries[i].cRefs = 0;
-        }
-
-        RTSpinlockRelease(pThis->hSpinlock, &Tmp);
-    }
-}
-
-
-/**
- * Creates a subset.
- *
- * A subset is a hack to avoid having to rewrite code that touches a lot of
- * pages. It prevents the mapping set from being overflowed by automatically
- * flushing previous mappings when a certain threshold is reached.
- *
- * Pages mapped after calling this function are only valid until the next page
- * is mapped.
- *
- * @returns The index of the previous subset. Pass this to
- *        PGMDynMapPopAutoSubset when poping it.
- * @param   pVCpu           Pointer to the virtual cpu data.
- */
-VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVMCPU pVCpu)
-{
-    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
-    AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX);
-    uint32_t        iPrevSubset = pSet->iSubset;
-    LogFlow(("PGMDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
-
-    pSet->iSubset = pSet->cEntries;
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSubsets);
-    return iPrevSubset;
-}
-
-
-/**
- * Pops a subset created by a previous call to PGMDynMapPushAutoSubset.
- *
- * @param   pVCpu           Pointer to the virtual cpu data.
- * @param   iPrevSubset     What PGMDynMapPushAutoSubset returned.
- */
-VMMDECL(void) PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
-{
-    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
-    uint32_t        cEntries = pSet->cEntries;
-    LogFlow(("PGMDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
-    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
-    AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX);
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
-    if (    cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100
-        &&  cEntries != pSet->iSubset)
-    {
-        AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
-        pgmDynMapFlushSubset(pSet);
-    }
-    pSet->iSubset = iPrevSubset;
-}
-
-
-/**
- * As a final resort for a full auto set, try merge duplicate entries.
- *
- * @param   pSet        The set.
- */
-static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
-{
-    for (uint32_t i = 0 ; i < pSet->cEntries; i++)
-    {
-        uint16_t const  iPage = pSet->aEntries[i].iPage;
-        uint32_t        j     = i + 1;
-        while (j < pSet->cEntries)
-        {
-            if (pSet->aEntries[j].iPage != iPage)
-                j++;
-            else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX)
-            {
-                /* merge j into i removing j. */
-                pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
-                pSet->cEntries--;
-                if (j < pSet->cEntries)
-                {
-                    pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
-                    pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
-                    pSet->aEntries[pSet->cEntries].cRefs = 0;
-                }
-                else
-                {
-                    pSet->aEntries[j].iPage = UINT16_MAX;
-                    pSet->aEntries[j].cRefs = 0;
-                }
-            }
-            else
-            {
-                /* migrate the max number of refs from j into i and quit the inner loop. */
-                uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
-                Assert(pSet->aEntries[j].cRefs > cMigrate);
-                pSet->aEntries[j].cRefs -= cMigrate;
-                pSet->aEntries[i].cRefs = UINT16_MAX - 1;
-                break;
-            }
-        }
-    }
-}
-
-
-/**
- * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and
- * pgmR0DynMapGCPageInlined.
- *
- * @returns VINF_SUCCESS, bails out to ring-3 on failure.
- * @param   pSet        The set.
- * @param   HCPhys      The physical address of the page.
- * @param   ppv         Where to store the address of the mapping on success.
- *
- * @remarks This is a very hot path.
- */
-int pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)
-{
-    LogFlow(("pgmR0DynMapHCPageCommon: pSet=%p HCPhys=%RHp ppv=%p\n", pSet, HCPhys, ppv));
-    AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
-    PVMCPU pVCpu = PGMR0DYNMAP_2_VMCPU(pSet);
-
-    /*
-     * Map it.
-     */
-    void *pvPage;
-    uint32_t const  iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVCpu, &pvPage);
-    if (RT_UNLIKELY(iPage == UINT32_MAX))
-    {
-        RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
-                         g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages);
-        if (!g_fPGMR0DynMapTestRunning)
-            VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
-        *ppv = NULL;
-        return VERR_PGM_DYNMAP_FAILED;
-    }
-
-    /*
-     * Add the page to the auto reference set.
-     *
-     * The typical usage pattern means that the same pages will be mapped
-     * several times in the same set. We can catch most of these
-     * remappings by looking a few pages back into the set. (The searching
-     * and set optimizing path will hardly ever be used when doing this.)
-     */
-    AssertCompile(RT_ELEMENTS(pSet->aEntries) >= 8);
-    int32_t i = pSet->cEntries;
-    if (i-- < 5)
-    {
-        unsigned iEntry = pSet->cEntries++;
-        pSet->aEntries[iEntry].cRefs  = 1;
-        pSet->aEntries[iEntry].iPage  = iPage;
-        pSet->aEntries[iEntry].pvPage = pvPage;
-        pSet->aEntries[iEntry].HCPhys = HCPhys;
-        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
-    }
-    /* Any of the last 5 pages? */
-    else if (   pSet->aEntries[i - 0].iPage == iPage
-             && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1)
-        pSet->aEntries[i - 0].cRefs++;
-    else if (   pSet->aEntries[i - 1].iPage == iPage
-             && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1)
-        pSet->aEntries[i - 1].cRefs++;
-    else if (   pSet->aEntries[i - 2].iPage == iPage
-             && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1)
-        pSet->aEntries[i - 2].cRefs++;
-    else if (   pSet->aEntries[i - 3].iPage == iPage
-             && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1)
-        pSet->aEntries[i - 3].cRefs++;
-    else if (   pSet->aEntries[i - 4].iPage == iPage
-             && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1)
-        pSet->aEntries[i - 4].cRefs++;
-    /* Don't bother searching unless we're above a 60% load. */
-    else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100))
-    {
-        unsigned iEntry = pSet->cEntries++;
-        pSet->aEntries[iEntry].cRefs  = 1;
-        pSet->aEntries[iEntry].iPage  = iPage;
-        pSet->aEntries[iEntry].pvPage = pvPage;
-        pSet->aEntries[iEntry].HCPhys = HCPhys;
-        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
-    }
-    else
-    {
-        /* Search the rest of the set. */
-        Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
-        i -= 4;
-        while (i-- > 0)
-            if (    pSet->aEntries[i].iPage == iPage
-                &&  pSet->aEntries[i].cRefs < UINT16_MAX - 1)
-            {
-                pSet->aEntries[i].cRefs++;
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchHits);
-                break;
-            }
-        if (i < 0)
-        {
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchMisses);
-            if (pSet->iSubset < pSet->cEntries)
-            {
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchFlushes);
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
-                AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries));
-                pgmDynMapFlushSubset(pSet);
-            }
-
-            if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
-            {
-                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetOptimize);
-                pgmDynMapOptimizeAutoSet(pSet);
-            }
-
-            if (RT_LIKELY(pSet->cEntries < RT_ELEMENTS(pSet->aEntries)))
-            {
-                unsigned iEntry = pSet->cEntries++;
-                pSet->aEntries[iEntry].cRefs  = 1;
-                pSet->aEntries[iEntry].iPage  = iPage;
-                pSet->aEntries[iEntry].pvPage = pvPage;
-                pSet->aEntries[iEntry].HCPhys = HCPhys;
-                pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
-            }
-            else
-            {
-                /* We're screwed. */
-                pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
-
-                RTAssertMsg2Weak("PGMDynMapHCPage: set is full!\n");
-                if (!g_fPGMR0DynMapTestRunning)
-                    VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
-                *ppv = NULL;
-                return VERR_PGM_DYNMAP_FULL_SET;
-            }
-        }
-    }
-
-    *ppv = pvPage;
-    return VINF_SUCCESS;
-}
-
-
-#if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */
-/* documented elsewhere - a bit of a mess. */
-VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
-{
-#ifdef VBOX_WITH_STATISTICS
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-#endif
-    /*
-     * Validate state.
-     */
-    STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
-    AssertPtr(ppv);
-    AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
-              ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap));
-    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
-    PVMCPU          pVCpu   = VMMGetCpu(pVM);
-    AssertPtr(pVCpu);
-    PPGMMAPSET      pSet    = &pVCpu->pgm.s.AutoSet;
-    AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),
-              ("%#x (%u)\n", pSet->cEntries, pSet->cEntries));
-
-    /*
-     * Call common code.
-     */
-    int rc = pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
-
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
-    return rc;
-}
-#endif
-
-
-#if 0 /*def DEBUG*/
-/** For pgmR0DynMapTest3PerCpu. */
-typedef struct PGMR0DYNMAPTEST
-{
-    uint32_t            u32Expect;
-    uint32_t           *pu32;
-    uint32_t volatile   cFailures;
-} PGMR0DYNMAPTEST;
-typedef PGMR0DYNMAPTEST *PPGMR0DYNMAPTEST;
-
-/**
- * Checks that the content of the page is the same on all CPUs, i.e. that there
- * are no CPU specfic PTs or similar nasty stuff involved.
- *
- * @param   idCpu           The current CPU.
- * @param   pvUser1         Pointer a PGMR0DYNMAPTEST structure.
- * @param   pvUser2         Unused, ignored.
- */
-static DECLCALLBACK(void) pgmR0DynMapTest3PerCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
-{
-    PPGMR0DYNMAPTEST    pTest = (PPGMR0DYNMAPTEST)pvUser1;
-    ASMInvalidatePage(pTest->pu32);
-    if (*pTest->pu32 != pTest->u32Expect)
-        ASMAtomicIncU32(&pTest->cFailures);
-    NOREF(pvUser2); NOREF(idCpu);
-}
-
-
-/**
- * Performs some basic tests in debug builds.
- */
-static int pgmR0DynMapTest(PVM pVM)
-{
-    LogRel(("pgmR0DynMapTest: ****** START ******\n"));
-    PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
-    PPGMMAPSET      pSet  = &pVM->aCpus[0].pgm.s.AutoSet;
-    uint32_t        i;
-
-    /*
-     * Assert internal integrity first.
-     */
-    LogRel(("Test #0\n"));
-    int rc = PGMR0DynMapAssertIntegrity();
-    if (RT_FAILURE(rc))
-        return rc;
-
-    void           *pvR0DynMapUsedSaved = pVM->pgm.s.pvR0DynMapUsed;
-    pVM->pgm.s.pvR0DynMapUsed = pThis;
-    g_fPGMR0DynMapTestRunning = true;
-
-    /*
-     * Simple test, map CR3 twice and check that we're getting the
-     * same mapping address back.
-     */
-    LogRel(("Test #1\n"));
-    ASMIntDisable();
-    PGMDynMapStartAutoSet(&pVM->aCpus[0]);
-
-    uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK;
-    void    *pv  = (void *)(intptr_t)-1;
-    void    *pv2 = (void *)(intptr_t)-2;
-    rc           = PGMDynMapHCPage(pVM, cr3, &pv);
-    int      rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);
-    ASMIntEnable();
-    if (    RT_SUCCESS(rc2)
-        &&  RT_SUCCESS(rc)
-        &&  pv == pv2)
-    {
-        LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
-        rc = PGMR0DynMapAssertIntegrity();
-
-        /*
-         * Check that the simple set overflow code works by filling it
-         * with more CR3 mappings.
-         */
-        LogRel(("Test #2\n"));
-        ASMIntDisable();
-        PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-        for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++)
-        {
-            pv2 = (void *)(intptr_t)-4;
-            rc = PGMDynMapHCPage(pVM, cr3, &pv2);
-        }
-        ASMIntEnable();
-        if (RT_FAILURE(rc) || pv != pv2)
-        {
-            LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%p\n", __LINE__, rc, pv, pv2, i));
-            if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
-        }
-        else if (pSet->cEntries != 5)
-        {
-            LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) / 2));
-            rc = VERR_INTERNAL_ERROR;
-        }
-        else if (   pSet->aEntries[4].cRefs != UINT16_MAX - 1
-                 || pSet->aEntries[3].cRefs != UINT16_MAX - 1
-                 || pSet->aEntries[2].cRefs != 1
-                 || pSet->aEntries[1].cRefs != 1
-                 || pSet->aEntries[0].cRefs != 1)
-        {
-            LogRel(("failed(%d): bad set dist: ", __LINE__));
-            for (i = 0; i < pSet->cEntries; i++)
-                LogRel(("[%d]=%d, ", i, pSet->aEntries[i].cRefs));
-            LogRel(("\n"));
-            rc = VERR_INTERNAL_ERROR;
-        }
-        if (RT_SUCCESS(rc))
-            rc = PGMR0DynMapAssertIntegrity();
-        if (RT_SUCCESS(rc))
-        {
-            /*
-             * Trigger an set optimization run (exactly).
-             */
-            LogRel(("Test #3\n"));
-            ASMIntDisable();
-            PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-            pv2 = NULL;
-            for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++)
-            {
-                pv2 = (void *)(intptr_t)(-5 - i);
-                rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);
-            }
-            ASMIntEnable();
-            if (RT_FAILURE(rc) || pv == pv2)
-            {
-                LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%d\n", __LINE__, rc, pv, pv2, i));
-                if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
-            }
-            else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries))
-            {
-                LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
-                rc = VERR_INTERNAL_ERROR;
-            }
-            LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
-            if (RT_SUCCESS(rc))
-                rc = PGMR0DynMapAssertIntegrity();
-            if (RT_SUCCESS(rc))
-            {
-                /*
-                 * Trigger an overflow error.
-                 */
-                LogRel(("Test #4\n"));
-                ASMIntDisable();
-                PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-                for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++)
-                {
-                    rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);
-                    if (RT_SUCCESS(rc))
-                        rc = PGMR0DynMapAssertIntegrity();
-                    if (RT_FAILURE(rc))
-                        break;
-                }
-                ASMIntEnable();
-                if (rc == VERR_PGM_DYNMAP_FULL_SET)
-                {
-                    /* flush the set. */
-                    LogRel(("Test #5\n"));
-                    ASMIntDisable();
-                    PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-                    PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
-                    PGMDynMapStartAutoSet(&pVM->aCpus[0]);
-                    ASMIntEnable();
-
-                    rc = PGMR0DynMapAssertIntegrity();
-                }
-                else
-                {
-                    LogRel(("failed(%d): rc=%Rrc, wanted %d ; pv2=%p Set=%u/%u; i=%d\n", __LINE__,
-                            rc, VERR_PGM_DYNMAP_FULL_SET, pv2, pSet->cEntries, RT_ELEMENTS(pSet->aEntries), i));
-                    if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
-                }
-            }
-        }
-    }
-    else
-    {
-        LogRel(("failed(%d): rc=%Rrc rc2=%Rrc; pv=%p pv2=%p\n", __LINE__, rc, rc2, pv, pv2));
-        if (RT_SUCCESS(rc))
-            rc = rc2;
-    }
-
-    /*
-     * Check that everyone sees the same stuff.
-     */
-    if (RT_SUCCESS(rc))
-    {
-        LogRel(("Test #5\n"));
-        ASMIntDisable();
-        PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-        RTHCPHYS  HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0);
-        rc  = PGMDynMapHCPage(pVM, HCPhysPT, &pv);
-        if (RT_SUCCESS(rc))
-        {
-            PGMR0DYNMAPTEST Test;
-            uint32_t       *pu32Real = &pThis->paPages[pThis->pSegHead->iPage].uPte.pLegacy->u;
-            Test.pu32       = (uint32_t *)((uintptr_t)pv | ((uintptr_t)pu32Real & PAGE_OFFSET_MASK));
-            Test.u32Expect  = *pu32Real;
-            ASMAtomicWriteU32(&Test.cFailures, 0);
-            ASMIntEnable();
-
-            rc = RTMpOnAll(pgmR0DynMapTest3PerCpu, &Test, NULL);
-            if (RT_FAILURE(rc))
-                LogRel(("failed(%d): RTMpOnAll rc=%Rrc\n", __LINE__, rc));
-            else if (Test.cFailures)
-            {
-                LogRel(("failed(%d): cFailures=%d pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n", __LINE__,
-                        Test.cFailures, pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
-                rc = VERR_INTERNAL_ERROR;
-            }
-            else
-                LogRel(("pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n",
-                        pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
-        }
-        else
-        {
-            ASMIntEnable();
-            LogRel(("failed(%d): rc=%Rrc\n", rc));
-        }
-    }
-
-    /*
-     * Clean up.
-     */
-    LogRel(("Cleanup.\n"));
-    ASMIntDisable();
-    PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
-    PGMDynMapFlushAutoSet(&pVM->aCpus[0]);
-    PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
-    ASMIntEnable();
-
-    if (RT_SUCCESS(rc))
-        rc = PGMR0DynMapAssertIntegrity();
-    else
-        PGMR0DynMapAssertIntegrity();
-
-    g_fPGMR0DynMapTestRunning = false;
-    LogRel(("Result: rc=%Rrc Load=%u/%u/%u Set=%#x/%u\n", rc,
-            pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
-    pVM->pgm.s.pvR0DynMapUsed = pvR0DynMapUsedSaved;
-    LogRel(("pgmR0DynMapTest: ****** END ******\n"));
-    return rc;
-}
-#endif /* DEBUG */
-
Index: /trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp	(revision 31402)
+++ /trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp	(revision 31402)
@@ -0,0 +1,2639 @@
+/* $Id$ */
+/** @file
+ * PGM - Page Manager and Monitor, dynamic mapping cache.
+ */
+
+/*
+ * Copyright (C) 2008-2010 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*******************************************************************************
+*   Internal Functions                                                         *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM_DYNMAP
+#include <VBox/pgm.h>
+#include "../PGMInternal.h"
+#include <VBox/vm.h>
+#include "../PGMInline.h"
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <VBox/sup.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#ifndef IN_RC
+# include <iprt/cpuset.h>
+# include <iprt/mem.h>
+# include <iprt/memobj.h>
+# include <iprt/mp.h>
+# include <iprt/semaphore.h>
+# include <iprt/spinlock.h>
+#endif
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+*   Defined Constants And Macros                                               *
+*******************************************************************************/
+#ifdef IN_RING0
+/** The max size of the mapping cache (in pages). */
+# define PGMR0DYNMAP_MAX_PAGES              ((16*_1M) >> PAGE_SHIFT)
+/** The small segment size that is adopted on out-of-memory conditions with a
+ * single big segment. */
+# define PGMR0DYNMAP_SMALL_SEG_PAGES        128
+/** The number of pages we reserve per CPU. */
+# define PGMR0DYNMAP_PAGES_PER_CPU          256
+/** The minimum number of pages we reserve per CPU.
+ * This must be equal or larger than the autoset size.  */
+# define PGMR0DYNMAP_PAGES_PER_CPU_MIN      64
+/** Calcs the overload threshold (safety margin).  Current set at 50%. */
+# define PGMR0DYNMAP_CALC_OVERLOAD(cPages)  ((cPages) / 2)
+/** The number of guard pages.
+ * @remarks Never do tuning of the hashing or whatnot with a strict build!  */
+# if defined(VBOX_STRICT)
+#  define PGMR0DYNMAP_GUARD_PAGES           1
+# else
+#  define PGMR0DYNMAP_GUARD_PAGES           0
+# endif
+#endif /* IN_RING0 */
+/** The dummy physical address of guard pages. */
+#define PGMR0DYNMAP_GUARD_PAGE_HCPHYS       UINT32_C(0x7777feed)
+/** The dummy reference count of guard pages. (Must be non-zero.) */
+#define PGMR0DYNMAP_GUARD_PAGE_REF_COUNT    INT32_C(0x7777feed)
+#if 0
+/** Define this to just clear the present bit on guard pages.
+ * The alternative is to replace the entire PTE with an bad not-present
+ * PTE. Either way, XNU will screw us. :-/   */
+# define PGMR0DYNMAP_GUARD_NP
+#endif
+/** The dummy PTE value for a page. */
+#define PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE   X86_PTE_PG_MASK
+/** The dummy PTE value for a page. */
+#define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE      UINT64_MAX /*X86_PTE_PAE_PG_MASK*/
+
+#ifdef IN_RING0 /* Note! Assertions causes panics if preemption is disabled,
+                 *       disable this to work around that. */
+/**
+ * Acquire the spinlock.
+ * This will declare a temporary variable and expands to two statements!
+ */
+# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) \
+    RTSPINLOCKTMP   MySpinlockTmp = RTSPINLOCKTMP_INITIALIZER; \
+    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
+/**
+ * Releases the spinlock.
+ */
+# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) \
+    RTSpinlockRelease((pThis)->hSpinlock, &MySpinlockTmp)
+
+/**
+ * Re-acquires the spinlock.
+ */
+# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) \
+    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
+#else
+# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis)   do { } while (0)
+# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis)   do { } while (0)
+# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) do { } while (0)
+#endif
+
+
+/** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */
+#define PGMRZDYNMAP_SET_2_VMCPU(pSet)       (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
+
+/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
+#define PGMRZDYNMAP_SET_2_VM(pSet)          (PGMRZDYNMAP_SET_2_VMCPU(pSet)->CTX_SUFF(pVM))
+
+/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
+#ifdef IN_RC
+# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (PGMRZDYNMAP_SET_2_VM(pSet)->pgm.s.pRCDynMap)
+#else
+# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (g_pPGMR0DynMap)
+#endif
+
+/**
+ * Gets the set index of the current CPU.
+ *
+ * This always returns 0 when in raw-mode context because there is only ever
+ * one EMT in that context (at least presently).
+ */
+#ifdef IN_RC
+# define PGMRZDYNMAP_CUR_CPU()              (0)
+#else
+# define PGMRZDYNMAP_CUR_CPU()              RTMpCpuIdToSetIndex(RTMpCpuId())
+#endif
+
+/** PGMRZDYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
+#define PGMRZDYNMAP_MAGIC                   UINT32_C(0x19640201)
+
+
+/** Zaps an set entry. */
+#define PGMRZDYNMAP_ZAP_ENTRY(pEntry) \
+    do \
+    { \
+        (pEntry)->iPage        = UINT16_MAX; \
+        (pEntry)->cRefs        = 0; \
+        (pEntry)->cInlinedRefs = 0; \
+        (pEntry)->cUnrefs      = 0; \
+    } while (0)
+
+
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+#ifdef IN_RING0
+/**
+ * Ring-0 dynamic mapping cache segment.
+ *
+ * The dynamic mapping cache can be extended with additional segments if the
+ * load is found to be too high.  This done the next time a VM is created, under
+ * the protection of the init mutex.  The arrays is reallocated and the new
+ * segment is added to the end of these.  Nothing is rehashed of course, as the
+ * indexes / addresses must remain unchanged.
+ *
+ * This structure is only modified while owning the init mutex or during module
+ * init / term.
+ */
+typedef struct PGMR0DYNMAPSEG
+{
+    /** Pointer to the next segment. */
+    struct PGMR0DYNMAPSEG      *pNext;
+    /** The memory object for the virtual address range that we're abusing. */
+    RTR0MEMOBJ                  hMemObj;
+    /** The start page in the cache. (I.e. index into the arrays.) */
+    uint16_t                    iPage;
+    /** The number of pages this segment contributes. */
+    uint16_t                    cPages;
+    /** The number of page tables. */
+    uint16_t                    cPTs;
+    /** The memory objects for the page tables. */
+    RTR0MEMOBJ                  ahMemObjPTs[1];
+} PGMR0DYNMAPSEG;
+/** Pointer to a ring-0 dynamic mapping cache segment. */
+typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
+
+
+/**
+ * Ring-0 dynamic mapping cache entry.
+ *
+ * @sa PGMRZDYNMAPENTRY, PGMRCDYNMAPENTRY.
+ */
+typedef struct PGMR0DYNMAPENTRY
+{
+    /** The physical address of the currently mapped page.
+     * This is duplicate for three reasons: cache locality, cache policy of the PT
+     * mappings and sanity checks.   */
+    RTHCPHYS                    HCPhys;
+    /** Pointer to the page. */
+    void                       *pvPage;
+    /** The number of references. */
+    int32_t volatile            cRefs;
+    /** PTE pointer union. */
+    union PGMR0DYNMAPENTRY_PPTE
+    {
+        /** PTE pointer, 32-bit legacy version. */
+        PX86PTE                 pLegacy;
+        /** PTE pointer, PAE version. */
+        PX86PTEPAE              pPae;
+        /** PTE pointer, the void version. */
+        void                   *pv;
+    } uPte;
+# ifndef IN_RC
+    /** CPUs that haven't invalidated this entry after it's last update. */
+    RTCPUSET                    PendingSet;
+# endif
+} PGMR0DYNMAPENTRY;
+/** Pointer a mapping cache entry for the ring-0.
+ * @sa PPGMRZDYNMAPENTRY, PPGMRCDYNMAPENTRY,  */
+typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
+
+
+/**
+ * Dynamic mapping cache for ring-0.
+ *
+ * This is initialized during VMMR0 module init but no segments are allocated
+ * at that time.  Segments will be added when the first VM is started and
+ * removed again when the last VM shuts down, thus avoid consuming memory while
+ * dormant. At module termination, the remaining bits will be freed up.
+ *
+ * @sa PPGMRZDYNMAP, PGMRCDYNMAP.
+ */
+typedef struct PGMR0DYNMAP
+{
+    /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */
+    uint32_t                    u32Magic;
+# ifndef IN_RC
+    /** Spinlock serializing the normal operation of the cache. */
+    RTSPINLOCK                  hSpinlock;
+# endif
+    /** Array for tracking and managing the pages.  */
+    PPGMR0DYNMAPENTRY           paPages;
+    /** The cache size given as a number of pages. */
+    uint32_t                    cPages;
+    /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
+    bool                        fLegacyMode;
+    /** The current load.
+     * This does not include guard pages. */
+    uint32_t                    cLoad;
+    /** The max load ever.
+     * This is maintained to get trigger adding of more mapping space. */
+    uint32_t                    cMaxLoad;
+# ifndef IN_RC
+    /** Initialization / termination lock. */
+    RTSEMFASTMUTEX              hInitLock;
+# endif
+    /** The number of guard pages. */
+    uint32_t                    cGuardPages;
+    /** The number of users (protected by hInitLock). */
+    uint32_t                    cUsers;
+# ifndef IN_RC
+    /** Array containing a copy of the original page tables.
+     * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
+    void                       *pvSavedPTEs;
+    /** List of segments. */
+    PPGMR0DYNMAPSEG             pSegHead;
+    /** The paging mode. */
+    SUPPAGINGMODE               enmPgMode;
+# endif
+} PGMR0DYNMAP;
+
+
+/**
+ * Paging level data.
+ */
+typedef struct PGMR0DYNMAPPGLVL
+{
+    uint32_t            cLevels;    /**< The number of levels. */
+    struct
+    {
+        RTHCPHYS        HCPhys;     /**< The address of the page for the current level,
+                                     *  i.e. what hMemObj/hMapObj is currently mapping. */
+        RTHCPHYS        fPhysMask;  /**< Mask for extracting HCPhys from uEntry. */
+        RTR0MEMOBJ      hMemObj;    /**< Memory object for HCPhys, PAGE_SIZE. */
+        RTR0MEMOBJ      hMapObj;    /**< Mapping object for hMemObj. */
+        uint32_t        fPtrShift;  /**< The pointer shift count. */
+        uint64_t        fPtrMask;   /**< The mask to apply to the shifted pointer to get the table index. */
+        uint64_t        fAndMask;   /**< And mask to check entry flags. */
+        uint64_t        fResMask;   /**< The result from applying fAndMask. */
+        union
+        {
+            void        *pv;        /**< hMapObj address. */
+            PX86PGUINT   paLegacy;  /**< Legacy table view. */
+            PX86PGPAEUINT paPae;    /**< PAE/AMD64 table view. */
+        } u;
+    } a[4];
+} PGMR0DYNMAPPGLVL;
+/** Pointer to paging level data. */
+typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
+#endif
+
+/** Mapping cache entry for the current context.
+ * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
+typedef CTX_MID(PGM,DYNMAPENTRY) PGMRZDYNMAPENTRY;
+/** Pointer a mapping cache entry for the current context.
+ * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
+typedef PGMRZDYNMAPENTRY *PPGMRZDYNMAPENTRY;
+
+/** Pointer the mapping cache instance for the current context.
+ * @sa PGMR0DYNMAP, PGMRCDYNMAP  */
+typedef CTX_MID(PGM,DYNMAP) *PPGMRZDYNMAP;
+
+
+
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+#ifdef IN_RING0
+/** Pointer to the ring-0 dynamic mapping cache. */
+static PGMR0DYNMAP *g_pPGMR0DynMap;
+#endif
+/** For overflow testing. */
+static bool         g_fPGMR0DynMapTestRunning = false;
+
+
+/*******************************************************************************
+*   Internal Functions                                                         *
+*******************************************************************************/
+static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs);
+#ifdef IN_RING0
+static int  pgmR0DynMapSetup(PPGMRZDYNMAP pThis);
+static int  pgmR0DynMapExpand(PPGMRZDYNMAP pThis);
+static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis);
+#endif
+#if 0 /*def DEBUG*/
+static int  pgmR0DynMapTest(PVM pVM);
+#endif
+
+
+/**
+ * Initializes the auto mapping sets for a VM.
+ *
+ * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
+ * @param   pVM         The VM in question.
+ */
+static int pgmRZDynMapInitAutoSetsForVM(PVM pVM)
+{
+    VMCPUID idCpu = pVM->cCpus;
+    AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
+    while (idCpu-- > 0)
+    {
+        PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
+        uint32_t j = RT_ELEMENTS(pSet->aEntries);
+        while (j-- > 0)
+        {
+            pSet->aEntries[j].pvPage        = NULL;
+            pSet->aEntries[j].HCPhys        = NIL_RTHCPHYS;
+            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
+        }
+        pSet->cEntries = PGMMAPSET_CLOSED;
+        pSet->iSubset = UINT32_MAX;
+        pSet->iCpu = -1;
+        memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+#ifdef IN_RING0
+
+/**
+ * Initializes the ring-0 dynamic mapping cache.
+ *
+ * @returns VBox status code.
+ */
+VMMR0DECL(int) PGMR0DynMapInit(void)
+{
+    Assert(!g_pPGMR0DynMap);
+
+    /*
+     * Create and initialize the cache instance.
+     */
+    PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)RTMemAllocZ(sizeof(*pThis));
+    AssertLogRelReturn(pThis, VERR_NO_MEMORY);
+    int             rc = VINF_SUCCESS;
+    pThis->enmPgMode = SUPR0GetPagingMode();
+    switch (pThis->enmPgMode)
+    {
+        case SUPPAGINGMODE_32_BIT:
+        case SUPPAGINGMODE_32_BIT_GLOBAL:
+            pThis->fLegacyMode = false;
+            break;
+        case SUPPAGINGMODE_PAE:
+        case SUPPAGINGMODE_PAE_GLOBAL:
+        case SUPPAGINGMODE_PAE_NX:
+        case SUPPAGINGMODE_PAE_GLOBAL_NX:
+        case SUPPAGINGMODE_AMD64:
+        case SUPPAGINGMODE_AMD64_GLOBAL:
+        case SUPPAGINGMODE_AMD64_NX:
+        case SUPPAGINGMODE_AMD64_GLOBAL_NX:
+            pThis->fLegacyMode = false;
+            break;
+        default:
+            rc = VERR_INTERNAL_ERROR;
+            break;
+    }
+    if (RT_SUCCESS(rc))
+    {
+        rc = RTSemFastMutexCreate(&pThis->hInitLock);
+        if (RT_SUCCESS(rc))
+        {
+            rc = RTSpinlockCreate(&pThis->hSpinlock);
+            if (RT_SUCCESS(rc))
+            {
+                pThis->u32Magic = PGMRZDYNMAP_MAGIC;
+                g_pPGMR0DynMap = pThis;
+                return VINF_SUCCESS;
+            }
+            RTSemFastMutexDestroy(pThis->hInitLock);
+        }
+    }
+    RTMemFree(pThis);
+    return rc;
+}
+
+
+/**
+ * Terminates the ring-0 dynamic mapping cache.
+ */
+VMMR0DECL(void) PGMR0DynMapTerm(void)
+{
+    /*
+     * Destroy the cache.
+     *
+     * There is not supposed to be any races here, the loader should
+     * make sure about that. So, don't bother locking anything.
+     *
+     * The VM objects should all be destroyed by now, so there is no
+     * dangling users or anything like that to clean up. This routine
+     * is just a mirror image of PGMR0DynMapInit.
+     */
+    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
+    if (pThis)
+    {
+        AssertPtr(pThis);
+        g_pPGMR0DynMap = NULL;
+
+        /* This should *never* happen, but in case it does try not to leak memory. */
+        AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->pvSavedPTEs && !pThis->cPages,
+                        ("cUsers=%d paPages=%p pvSavedPTEs=%p cPages=%#x\n",
+                         pThis->cUsers, pThis->paPages, pThis->pvSavedPTEs, pThis->cPages));
+        if (pThis->paPages)
+            pgmR0DynMapTearDown(pThis);
+
+        /* Free the associated resources. */
+        RTSemFastMutexDestroy(pThis->hInitLock);
+        pThis->hInitLock = NIL_RTSEMFASTMUTEX;
+        RTSpinlockDestroy(pThis->hSpinlock);
+        pThis->hSpinlock = NIL_RTSPINLOCK;
+        pThis->u32Magic = UINT32_MAX;
+        RTMemFree(pThis);
+    }
+}
+
+
+/**
+ * Initializes the dynamic mapping cache for a new VM.
+ *
+ * @returns VBox status code.
+ * @param   pVM         Pointer to the shared VM structure.
+ */
+VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
+{
+    AssertMsgReturn(!pVM->pgm.s.pvR0DynMapUsed, ("%p (pThis=%p)\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap), VERR_WRONG_ORDER);
+
+    /*
+     * Initialize the auto sets.
+     */
+    int rc = pgmRZDynMapInitAutoSetsForVM(pVM);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    /*
+     * Do we need the cache? Skip the last bit if we don't.
+     */
+    if (!VMMIsHwVirtExtForced(pVM))
+        return VINF_SUCCESS;
+
+    /*
+     * Reference and if necessary setup or expand the cache.
+     */
+    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
+    AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
+    rc = RTSemFastMutexRequest(pThis->hInitLock);
+    AssertLogRelRCReturn(rc, rc);
+
+    pThis->cUsers++;
+    if (pThis->cUsers == 1)
+    {
+        rc = pgmR0DynMapSetup(pThis);
+#if 0 /*def DEBUG*/
+        if (RT_SUCCESS(rc))
+        {
+            rc = pgmR0DynMapTest(pVM);
+            if (RT_FAILURE(rc))
+                pgmR0DynMapTearDown(pThis);
+        }
+#endif
+    }
+    else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages - pThis->cGuardPages))
+        rc = pgmR0DynMapExpand(pThis);
+    if (RT_SUCCESS(rc))
+        pVM->pgm.s.pvR0DynMapUsed = pThis;
+    else
+        pThis->cUsers--;
+
+    RTSemFastMutexRelease(pThis->hInitLock);
+    return rc;
+}
+
+
+/**
+ * Terminates the dynamic mapping cache usage for a VM.
+ *
+ * @param   pVM         Pointer to the shared VM structure.
+ */
+VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM)
+{
+    /*
+     * Return immediately if we're not using the cache.
+     */
+    if (!pVM->pgm.s.pvR0DynMapUsed)
+        return;
+
+    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
+    AssertPtrReturnVoid(pThis);
+
+    int rc = RTSemFastMutexRequest(pThis->hInitLock);
+    AssertLogRelRCReturnVoid(rc);
+
+    if (pVM->pgm.s.pvR0DynMapUsed == pThis)
+    {
+        pVM->pgm.s.pvR0DynMapUsed = NULL;
+
+#ifdef VBOX_STRICT
+        PGMR0DynMapAssertIntegrity();
+#endif
+
+        /*
+         * Clean up and check the auto sets.
+         */
+        VMCPUID idCpu = pVM->cCpus;
+        while (idCpu-- > 0)
+        {
+            PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
+            uint32_t j = pSet->cEntries;
+            if (j <= RT_ELEMENTS(pSet->aEntries))
+            {
+                /*
+                 * The set is open, close it.
+                 */
+                while (j-- > 0)
+                {
+                    int32_t  cRefs = pSet->aEntries[j].cRefs;
+                    uint32_t iPage = pSet->aEntries[j].iPage;
+                    LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
+                    if (iPage < pThis->cPages && cRefs > 0)
+                        pgmRZDynMapReleasePage(pThis, iPage, cRefs);
+                    else
+                        AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
+
+                    PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
+                }
+                pSet->cEntries = PGMMAPSET_CLOSED;
+                pSet->iSubset = UINT32_MAX;
+                pSet->iCpu = -1;
+            }
+            else
+                AssertMsg(j == PGMMAPSET_CLOSED, ("cEntries=%#x\n", j));
+
+            j = RT_ELEMENTS(pSet->aEntries);
+            while (j-- > 0)
+            {
+                Assert(pSet->aEntries[j].iPage == UINT16_MAX);
+                Assert(!pSet->aEntries[j].cRefs);
+            }
+        }
+
+        /*
+         * Release our reference to the mapping cache.
+         */
+        Assert(pThis->cUsers > 0);
+        pThis->cUsers--;
+        if (!pThis->cUsers)
+            pgmR0DynMapTearDown(pThis);
+    }
+    else
+        AssertLogRelMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));
+
+    RTSemFastMutexRelease(pThis->hInitLock);
+}
+
+
+/**
+ * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.
+ *
+ * @param   idCpu           The current CPU.
+ * @param   pvUser1         The dynamic mapping cache instance.
+ * @param   pvUser2         Unused, NULL.
+ */
+static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+    Assert(!pvUser2);
+    PPGMRZDYNMAP        pThis   = (PPGMRZDYNMAP)pvUser1;
+    Assert(pThis == g_pPGMR0DynMap);
+    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
+    uint32_t            iPage   = pThis->cPages;
+    while (iPage-- > 0)
+        ASMInvalidatePage(paPages[iPage].pvPage);
+}
+
+
+/**
+ * Shoot down the TLBs for every single cache entry on all CPUs.
+ *
+ * @returns IPRT status code (RTMpOnAll).
+ * @param   pThis       The dynamic mapping cache instance.
+ */
+static int pgmR0DynMapTlbShootDown(PPGMRZDYNMAP pThis)
+{
+    int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
+    AssertRC(rc);
+    if (RT_FAILURE(rc))
+    {
+        uint32_t iPage = pThis->cPages;
+        while (iPage-- > 0)
+            ASMInvalidatePage(pThis->paPages[iPage].pvPage);
+    }
+    return rc;
+}
+
+
+/**
+ * Calculate the new cache size based on cMaxLoad statistics.
+ *
+ * @returns Number of pages.
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   pcMinPages  The minimal size in pages.
+ */
+static uint32_t pgmR0DynMapCalcNewSize(PPGMRZDYNMAP pThis, uint32_t *pcMinPages)
+{
+    Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES);
+
+    /* cCpus * PGMR0DYNMAP_PAGES_PER_CPU(_MIN). */
+    RTCPUID     cCpus     = RTMpGetCount();
+    AssertReturn(cCpus > 0 && cCpus <= RTCPUSET_MAX_CPUS, 0);
+    uint32_t    cPages    = cCpus * PGMR0DYNMAP_PAGES_PER_CPU;
+    uint32_t    cMinPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU_MIN;
+
+    /* adjust against cMaxLoad. */
+    AssertMsg(pThis->cMaxLoad <= PGMR0DYNMAP_MAX_PAGES, ("%#x\n", pThis->cMaxLoad));
+    if (pThis->cMaxLoad > PGMR0DYNMAP_MAX_PAGES)
+        pThis->cMaxLoad = 0;
+
+    while (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(cPages))
+        cPages += PGMR0DYNMAP_PAGES_PER_CPU;
+
+    if (pThis->cMaxLoad > cMinPages)
+        cMinPages = pThis->cMaxLoad;
+
+    /* adjust against max and current size. */
+    if (cPages < pThis->cPages)
+        cPages = pThis->cPages;
+    cPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
+    if (cPages > PGMR0DYNMAP_MAX_PAGES)
+        cPages = PGMR0DYNMAP_MAX_PAGES;
+
+    if (cMinPages < pThis->cPages)
+        cMinPages = pThis->cPages;
+    cMinPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
+    if (cMinPages > PGMR0DYNMAP_MAX_PAGES)
+        cMinPages = PGMR0DYNMAP_MAX_PAGES;
+
+    Assert(cMinPages);
+    *pcMinPages = cMinPages;
+    return cPages;
+}
+
+
+/**
+ * Initializes the paging level data.
+ *
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   pPgLvl      The paging level data.
+ */
+void pgmR0DynMapPagingArrayInit(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
+{
+    RTCCUINTREG     cr4 = ASMGetCR4();
+    switch (pThis->enmPgMode)
+    {
+        case SUPPAGINGMODE_32_BIT:
+        case SUPPAGINGMODE_32_BIT_GLOBAL:
+            pPgLvl->cLevels = 2;
+            pPgLvl->a[0].fPhysMask = X86_CR3_PAGE_MASK;
+            pPgLvl->a[0].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
+            pPgLvl->a[0].fResMask  = X86_PDE_P | X86_PDE_RW;
+            pPgLvl->a[0].fPtrMask  = X86_PD_MASK;
+            pPgLvl->a[0].fPtrShift = X86_PD_SHIFT;
+
+            pPgLvl->a[1].fPhysMask = X86_PDE_PG_MASK;
+            pPgLvl->a[1].fAndMask  = X86_PTE_P | X86_PTE_RW;
+            pPgLvl->a[1].fResMask  = X86_PTE_P | X86_PTE_RW;
+            pPgLvl->a[1].fPtrMask  = X86_PT_MASK;
+            pPgLvl->a[1].fPtrShift = X86_PT_SHIFT;
+            break;
+
+        case SUPPAGINGMODE_PAE:
+        case SUPPAGINGMODE_PAE_GLOBAL:
+        case SUPPAGINGMODE_PAE_NX:
+        case SUPPAGINGMODE_PAE_GLOBAL_NX:
+            pPgLvl->cLevels = 3;
+            pPgLvl->a[0].fPhysMask = X86_CR3_PAE_PAGE_MASK;
+            pPgLvl->a[0].fPtrMask  = X86_PDPT_MASK_PAE;
+            pPgLvl->a[0].fPtrShift = X86_PDPT_SHIFT;
+            pPgLvl->a[0].fAndMask  = X86_PDPE_P;
+            pPgLvl->a[0].fResMask  = X86_PDPE_P;
+
+            pPgLvl->a[1].fPhysMask = X86_PDPE_PG_MASK;
+            pPgLvl->a[1].fPtrMask  = X86_PD_PAE_MASK;
+            pPgLvl->a[1].fPtrShift = X86_PD_PAE_SHIFT;
+            pPgLvl->a[1].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
+            pPgLvl->a[1].fResMask  = X86_PDE_P | X86_PDE_RW;
+
+            pPgLvl->a[2].fPhysMask = X86_PDE_PAE_PG_MASK;
+            pPgLvl->a[2].fPtrMask  = X86_PT_PAE_MASK;
+            pPgLvl->a[2].fPtrShift = X86_PT_PAE_SHIFT;
+            pPgLvl->a[2].fAndMask  = X86_PTE_P | X86_PTE_RW;
+            pPgLvl->a[2].fResMask  = X86_PTE_P | X86_PTE_RW;
+            break;
+
+        case SUPPAGINGMODE_AMD64:
+        case SUPPAGINGMODE_AMD64_GLOBAL:
+        case SUPPAGINGMODE_AMD64_NX:
+        case SUPPAGINGMODE_AMD64_GLOBAL_NX:
+            pPgLvl->cLevels = 4;
+            pPgLvl->a[0].fPhysMask = X86_CR3_AMD64_PAGE_MASK;
+            pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT;
+            pPgLvl->a[0].fPtrMask  = X86_PML4_MASK;
+            pPgLvl->a[0].fAndMask  = X86_PML4E_P | X86_PML4E_RW;
+            pPgLvl->a[0].fResMask  = X86_PML4E_P | X86_PML4E_RW;
+
+            pPgLvl->a[1].fPhysMask = X86_PML4E_PG_MASK;
+            pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT;
+            pPgLvl->a[1].fPtrMask  = X86_PDPT_MASK_AMD64;
+            pPgLvl->a[1].fAndMask  = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */;
+            pPgLvl->a[1].fResMask  = X86_PDPE_P | X86_PDPE_RW;
+
+            pPgLvl->a[2].fPhysMask = X86_PDPE_PG_MASK;
+            pPgLvl->a[2].fPtrShift = X86_PD_PAE_SHIFT;
+            pPgLvl->a[2].fPtrMask  = X86_PD_PAE_MASK;
+            pPgLvl->a[2].fAndMask  = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
+            pPgLvl->a[2].fResMask  = X86_PDE_P | X86_PDE_RW;
+
+            pPgLvl->a[3].fPhysMask = X86_PDE_PAE_PG_MASK;
+            pPgLvl->a[3].fPtrShift = X86_PT_PAE_SHIFT;
+            pPgLvl->a[3].fPtrMask  = X86_PT_PAE_MASK;
+            pPgLvl->a[3].fAndMask  = X86_PTE_P | X86_PTE_RW;
+            pPgLvl->a[3].fResMask  = X86_PTE_P | X86_PTE_RW;
+            break;
+
+        default:
+            AssertFailed();
+            pPgLvl->cLevels = 0;
+            break;
+    }
+
+    for (uint32_t i = 0; i < 4; i++) /* ASSUMING array size. */
+    {
+        pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
+        pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
+        pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
+        pPgLvl->a[i].u.pv = NULL;
+    }
+}
+
+
+/**
+ * Maps a PTE.
+ *
+ * This will update the segment structure when new PTs are mapped.
+ *
+ * It also assumes that we (for paranoid reasons) wish to establish a mapping
+ * chain from CR3 to the PT that all corresponds to the processor we're
+ * currently running on, and go about this by running with interrupts disabled
+ * and restarting from CR3 for every change.
+ *
+ * @returns VBox status code, VINF_TRY_AGAIN if we changed any mappings and had
+ *          to re-enable interrupts.
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   pPgLvl      The paging level structure.
+ * @param   pvPage      The page.
+ * @param   pSeg        The segment.
+ * @param   cMaxPTs     The max number of PTs expected in the segment.
+ * @param   ppvPTE      Where to store the PTE address.
+ */
+static int pgmR0DynMapPagingArrayMapPte(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
+                                        PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE)
+{
+    Assert(!(ASMGetFlags() & X86_EFL_IF));
+    void           *pvEntry = NULL;
+    X86PGPAEUINT    uEntry = ASMGetCR3();
+    for (uint32_t i = 0; i < pPgLvl->cLevels; i++)
+    {
+        RTHCPHYS HCPhys = uEntry & pPgLvl->a[i].fPhysMask;
+        if (pPgLvl->a[i].HCPhys != HCPhys)
+        {
+            /*
+             * Need to remap this level.
+             * The final level, the PT, will not be freed since that is what it's all about.
+             */
+            ASMIntEnable();
+            if (i + 1 == pPgLvl->cLevels)
+                AssertReturn(pSeg->cPTs < cMaxPTs, VERR_INTERNAL_ERROR);
+            else
+            {
+                int rc2 = RTR0MemObjFree(pPgLvl->a[i].hMemObj, true /* fFreeMappings */); AssertRC(rc2);
+                pPgLvl->a[i].hMemObj = pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
+            }
+
+            int rc = RTR0MemObjEnterPhys(&pPgLvl->a[i].hMemObj, HCPhys, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
+            if (RT_SUCCESS(rc))
+            {
+                rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj,
+                                         (void *)-1 /* pvFixed */, 0 /* cbAlignment */,
+                                         RTMEM_PROT_WRITE | RTMEM_PROT_READ);
+                if (RT_SUCCESS(rc))
+                {
+                    pPgLvl->a[i].u.pv   = RTR0MemObjAddress(pPgLvl->a[i].hMapObj);
+                    AssertMsg(((uintptr_t)pPgLvl->a[i].u.pv & ~(uintptr_t)PAGE_OFFSET_MASK), ("%p\n", pPgLvl->a[i].u.pv));
+                    pPgLvl->a[i].HCPhys = HCPhys;
+                    if (i + 1 == pPgLvl->cLevels)
+                        pSeg->ahMemObjPTs[pSeg->cPTs++] = pPgLvl->a[i].hMemObj;
+                    ASMIntDisable();
+                    return VINF_TRY_AGAIN;
+                }
+
+                pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
+            }
+            else
+                pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
+            pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
+            return rc;
+        }
+
+        /*
+         * The next level.
+         */
+        uint32_t iEntry = ((uint64_t)(uintptr_t)pvPage >> pPgLvl->a[i].fPtrShift) & pPgLvl->a[i].fPtrMask;
+        if (pThis->fLegacyMode)
+        {
+            pvEntry = &pPgLvl->a[i].u.paLegacy[iEntry];
+            uEntry  = pPgLvl->a[i].u.paLegacy[iEntry];
+        }
+        else
+        {
+            pvEntry = &pPgLvl->a[i].u.paPae[iEntry];
+            uEntry  = pPgLvl->a[i].u.paPae[iEntry];
+        }
+
+        if ((uEntry & pPgLvl->a[i].fAndMask) != pPgLvl->a[i].fResMask)
+        {
+            LogRel(("PGMR0DynMap: internal error - iPgLvl=%u cLevels=%u uEntry=%#llx fAnd=%#llx fRes=%#llx got=%#llx\n"
+                    "PGMR0DynMap: pv=%p pvPage=%p iEntry=%#x fLegacyMode=%RTbool\n",
+                    i, pPgLvl->cLevels, uEntry, pPgLvl->a[i].fAndMask, pPgLvl->a[i].fResMask, uEntry & pPgLvl->a[i].fAndMask,
+                    pPgLvl->a[i].u.pv, pvPage, iEntry, pThis->fLegacyMode));
+            return VERR_INTERNAL_ERROR;
+        }
+        /*Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));*/
+    }
+
+    /* made it thru without needing to remap anything. */
+    *ppvPTE = pvEntry;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Sets up a guard page.
+ *
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   pPage       The page.
+ */
+DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMRZDYNMAP pThis, PPGMRZDYNMAPENTRY pPage)
+{
+    memset(pPage->pvPage, 0xfd, PAGE_SIZE);
+    pPage->cRefs  = PGMR0DYNMAP_GUARD_PAGE_REF_COUNT;
+    pPage->HCPhys = PGMR0DYNMAP_GUARD_PAGE_HCPHYS;
+#ifdef PGMR0DYNMAP_GUARD_NP
+    ASMAtomicBitClear(pPage->uPte.pv, X86_PTE_BIT_P);
+#else
+    if (pThis->fLegacyMode)
+        ASMAtomicWriteU32(&pPage->uPte.pLegacy->u, PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE);
+    else
+        ASMAtomicWriteU64(&pPage->uPte.pPae->u,    PGMR0DYNMAP_GUARD_PAGE_PAE_PTE);
+#endif
+    pThis->cGuardPages++;
+}
+
+
+/**
+ * Adds a new segment of the specified size.
+ *
+ * @returns VBox status code.
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   cPages      The size of the new segment, give as a page count.
+ */
+static int pgmR0DynMapAddSeg(PPGMRZDYNMAP pThis, uint32_t cPages)
+{
+    int rc2;
+    AssertReturn(ASMGetFlags() & X86_EFL_IF, VERR_PREEMPT_DISABLED);
+
+    /*
+     * Do the array reallocations first.
+     * (The pages array has to be replaced behind the spinlock of course.)
+     */
+    void *pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages));
+    if (!pvSavedPTEs)
+        return VERR_NO_MEMORY;
+    pThis->pvSavedPTEs = pvSavedPTEs;
+
+    void *pvPages = RTMemAllocZ(sizeof(pThis->paPages[0]) * (pThis->cPages + cPages));
+    if (!pvPages)
+    {
+        pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * pThis->cPages);
+        if (pvSavedPTEs)
+            pThis->pvSavedPTEs = pvSavedPTEs;
+        return VERR_NO_MEMORY;
+    }
+
+    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+
+    memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages);
+    void *pvToFree = pThis->paPages;
+    pThis->paPages = (PPGMRZDYNMAPENTRY)pvPages;
+
+    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+    RTMemFree(pvToFree);
+
+    /*
+     * Allocate the segment structure and pages of memory, then touch all the pages (paranoia).
+     */
+    uint32_t cMaxPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2;
+    PPGMR0DYNMAPSEG pSeg = (PPGMR0DYNMAPSEG)RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cMaxPTs]));
+    if (!pSeg)
+        return VERR_NO_MEMORY;
+    pSeg->pNext  = NULL;
+    pSeg->cPages = cPages;
+    pSeg->iPage  = pThis->cPages;
+    pSeg->cPTs   = 0;
+    int rc = RTR0MemObjAllocPage(&pSeg->hMemObj, cPages << PAGE_SHIFT, false);
+    if (RT_SUCCESS(rc))
+    {
+        uint8_t            *pbPage = (uint8_t *)RTR0MemObjAddress(pSeg->hMemObj);
+        AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage));
+        memset(pbPage, 0xfe, cPages << PAGE_SHIFT);
+
+        /*
+         * Walk thru the pages and set them up with a mapping of their PTE and everything.
+         */
+        ASMIntDisable();
+        PGMR0DYNMAPPGLVL    PgLvl;
+        pgmR0DynMapPagingArrayInit(pThis, &PgLvl);
+        uint32_t const      iEndPage = pSeg->iPage + cPages;
+        for (uint32_t iPage = pSeg->iPage;
+             iPage < iEndPage;
+             iPage++, pbPage += PAGE_SIZE)
+        {
+            /* Initialize the page data. */
+            pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
+            pThis->paPages[iPage].pvPage = pbPage;
+            pThis->paPages[iPage].cRefs  = 0;
+            pThis->paPages[iPage].uPte.pPae = 0;
+#ifndef IN_RC
+            RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
+#endif
+
+            /* Map its page table, retry until we've got a clean run (paranoia). */
+            do
+                rc = pgmR0DynMapPagingArrayMapPte(pThis, &PgLvl, pbPage, pSeg, cMaxPTs,
+                                                  &pThis->paPages[iPage].uPte.pv);
+            while (rc == VINF_TRY_AGAIN);
+            if (RT_FAILURE(rc))
+                break;
+
+            /* Save the PTE. */
+            if (pThis->fLegacyMode)
+                ((PX86PGUINT)pThis->pvSavedPTEs)[iPage]    = pThis->paPages[iPage].uPte.pLegacy->u;
+            else
+                ((PX86PGPAEUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pPae->u;
+
+#ifdef VBOX_STRICT
+            /* Check that we've got the right entry. */
+            RTHCPHYS HCPhysPage = RTR0MemObjGetPagePhysAddr(pSeg->hMemObj, iPage - pSeg->iPage);
+            RTHCPHYS HCPhysPte  = pThis->fLegacyMode
+                                ? pThis->paPages[iPage].uPte.pLegacy->u & X86_PTE_PG_MASK
+                                : pThis->paPages[iPage].uPte.pPae->u    & X86_PTE_PAE_PG_MASK;
+            if (HCPhysPage != HCPhysPte)
+            {
+                LogRel(("pgmR0DynMapAddSeg: internal error - page #%u HCPhysPage=%RHp HCPhysPte=%RHp pbPage=%p pvPte=%p\n",
+                        iPage - pSeg->iPage, HCPhysPage, HCPhysPte, pbPage, pThis->paPages[iPage].uPte.pv));
+                rc = VERR_INTERNAL_ERROR;
+                break;
+            }
+#endif
+        } /* for each page */
+        ASMIntEnable();
+
+        /* cleanup non-PT mappings */
+        for (uint32_t i = 0; i < PgLvl.cLevels - 1; i++)
+            RTR0MemObjFree(PgLvl.a[i].hMemObj, true /* fFreeMappings */);
+
+        if (RT_SUCCESS(rc))
+        {
+#if PGMR0DYNMAP_GUARD_PAGES > 0
+            /*
+             * Setup guard pages.
+             * (Note: TLBs will be shot down later on.)
+             */
+            uint32_t iPage = pSeg->iPage;
+            while (iPage < iEndPage)
+            {
+                for (uint32_t iGPg = 0; iGPg < PGMR0DYNMAP_GUARD_PAGES && iPage < iEndPage; iGPg++, iPage++)
+                    pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
+                iPage++; /* the guarded page */
+            }
+
+            /* Make sure the very last page is a guard page too. */
+            iPage = iEndPage - 1;
+            if (pThis->paPages[iPage].cRefs != PGMR0DYNMAP_GUARD_PAGE_REF_COUNT)
+                pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
+#endif /* PGMR0DYNMAP_GUARD_PAGES > 0 */
+
+            /*
+             * Commit it by adding the segment to the list and updating the page count.
+             */
+            pSeg->pNext = pThis->pSegHead;
+            pThis->pSegHead = pSeg;
+            pThis->cPages += cPages;
+            return VINF_SUCCESS;
+        }
+
+        /*
+         * Bail out.
+         */
+        while (pSeg->cPTs-- > 0)
+        {
+            rc2 = RTR0MemObjFree(pSeg->ahMemObjPTs[pSeg->cPTs], true /* fFreeMappings */);
+            AssertRC(rc2);
+            pSeg->ahMemObjPTs[pSeg->cPTs] = NIL_RTR0MEMOBJ;
+        }
+
+        rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */);
+        AssertRC(rc2);
+        pSeg->hMemObj = NIL_RTR0MEMOBJ;
+    }
+    RTMemFree(pSeg);
+
+    /* Don't bother resizing the arrays, but free them if we're the only user. */
+    if (!pThis->cPages)
+    {
+        RTMemFree(pThis->paPages);
+        pThis->paPages = NULL;
+        RTMemFree(pThis->pvSavedPTEs);
+        pThis->pvSavedPTEs = NULL;
+    }
+    return rc;
+}
+
+
+/**
+ * Called by PGMR0DynMapInitVM under the init lock.
+ *
+ * @returns VBox status code.
+ * @param   pThis       The dynamic mapping cache instance.
+ */
+static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis)
+{
+    /*
+     * Calc the size and add a segment of that size.
+     */
+    uint32_t cMinPages;
+    uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
+    AssertReturn(cPages, VERR_INTERNAL_ERROR);
+    int rc = pgmR0DynMapAddSeg(pThis, cPages);
+    if (rc == VERR_NO_MEMORY)
+    {
+        /*
+         * Try adding smaller segments.
+         */
+        do
+            rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
+        while (RT_SUCCESS(rc) && pThis->cPages < cPages);
+        if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
+            rc = VINF_SUCCESS;
+        if (rc == VERR_NO_MEMORY)
+        {
+            if (pThis->cPages)
+                pgmR0DynMapTearDown(pThis);
+            rc = VERR_PGM_DYNMAP_SETUP_ERROR;
+        }
+    }
+    Assert(ASMGetFlags() & X86_EFL_IF);
+
+#if PGMR0DYNMAP_GUARD_PAGES > 0
+    /* paranoia */
+    if (RT_SUCCESS(rc))
+        pgmR0DynMapTlbShootDown(pThis);
+#endif
+    return rc;
+}
+
+
+/**
+ * Called by PGMR0DynMapInitVM under the init lock.
+ *
+ * @returns VBox status code.
+ * @param   pThis       The dynamic mapping cache instance.
+ */
+static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis)
+{
+    /*
+     * Calc the new target size and add a segment of the appropriate size.
+     */
+    uint32_t cMinPages;
+    uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
+    AssertReturn(cPages, VERR_INTERNAL_ERROR);
+    if (pThis->cPages >= cPages)
+        return VINF_SUCCESS;
+
+    uint32_t cAdd = cPages - pThis->cPages;
+    int rc = pgmR0DynMapAddSeg(pThis, cAdd);
+    if (rc == VERR_NO_MEMORY)
+    {
+        /*
+         * Try adding smaller segments.
+         */
+        do
+            rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
+        while (RT_SUCCESS(rc) && pThis->cPages < cPages);
+        if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
+            rc = VINF_SUCCESS;
+        if (rc == VERR_NO_MEMORY)
+            rc = VERR_PGM_DYNMAP_EXPAND_ERROR;
+    }
+    Assert(ASMGetFlags() & X86_EFL_IF);
+
+#if PGMR0DYNMAP_GUARD_PAGES > 0
+    /* paranoia */
+    if (RT_SUCCESS(rc))
+        pgmR0DynMapTlbShootDown(pThis);
+#endif
+    return rc;
+}
+
+
+/**
+ * Called by PGMR0DynMapTermVM under the init lock.
+ *
+ * @returns VBox status code.
+ * @param   pThis       The dynamic mapping cache instance.
+ */
+static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis)
+{
+    /*
+     * Restore the original page table entries
+     */
+    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
+    uint32_t            iPage   = pThis->cPages;
+    if (pThis->fLegacyMode)
+    {
+        X86PGUINT const    *paSavedPTEs = (X86PGUINT const *)pThis->pvSavedPTEs;
+        while (iPage-- > 0)
+        {
+            X86PGUINT       uOld  = paPages[iPage].uPte.pLegacy->u;
+            X86PGUINT       uOld2 = uOld; NOREF(uOld2);
+            X86PGUINT       uNew  = paSavedPTEs[iPage];
+            while (!ASMAtomicCmpXchgExU32(&paPages[iPage].uPte.pLegacy->u, uNew, uOld, &uOld))
+                AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
+            Assert(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage]);
+        }
+    }
+    else
+    {
+        X86PGPAEUINT const *paSavedPTEs = (X86PGPAEUINT const *)pThis->pvSavedPTEs;
+        while (iPage-- > 0)
+        {
+            X86PGPAEUINT    uOld  = paPages[iPage].uPte.pPae->u;
+            X86PGPAEUINT    uOld2 = uOld; NOREF(uOld2);
+            X86PGPAEUINT    uNew  = paSavedPTEs[iPage];
+            while (!ASMAtomicCmpXchgExU64(&paPages[iPage].uPte.pPae->u, uNew, uOld, &uOld))
+                AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
+            Assert(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage]);
+        }
+    }
+
+    /*
+     * Shoot down the TLBs on all CPUs before freeing them.
+     */
+    pgmR0DynMapTlbShootDown(pThis);
+
+    /*
+     * Free the segments.
+     */
+    while (pThis->pSegHead)
+    {
+        int             rc;
+        PPGMR0DYNMAPSEG pSeg = pThis->pSegHead;
+        pThis->pSegHead = pSeg->pNext;
+
+        uint32_t iPT = pSeg->cPTs;
+        while (iPT-- > 0)
+        {
+            rc = RTR0MemObjFree(pSeg->ahMemObjPTs[iPT], true /* fFreeMappings */); AssertRC(rc);
+            pSeg->ahMemObjPTs[iPT] = NIL_RTR0MEMOBJ;
+        }
+        rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc);
+        pSeg->hMemObj   = NIL_RTR0MEMOBJ;
+        pSeg->pNext     = NULL;
+        pSeg->iPage     = UINT16_MAX;
+        pSeg->cPages    = 0;
+        pSeg->cPTs      = 0;
+        RTMemFree(pSeg);
+    }
+
+    /*
+     * Free the arrays and restore the initial state.
+     * The cLoadMax value is left behind for the next setup.
+     */
+    RTMemFree(pThis->paPages);
+    pThis->paPages = NULL;
+    RTMemFree(pThis->pvSavedPTEs);
+    pThis->pvSavedPTEs = NULL;
+    pThis->cPages = 0;
+    pThis->cLoad = 0;
+    pThis->cGuardPages = 0;
+}
+
+#endif /* IN_RING0 */
+#ifdef IN_RC
+
+/**
+ * Initializes the dynamic mapping cache in raw-mode context.
+ *
+ * @returns VBox status code.
+ * @param   pVM                 The VM handle.
+ */
+VMMRCDECL(int) PGMRCDynMapInit(PVM pVM)
+{
+    /*
+     * Allocate and initialize the instance data and page array.
+     */
+    PPGMRZDYNMAP    pThis;
+    size_t const    cPages = MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE;
+    size_t const    cb     = RT_ALIGN_Z(sizeof(*pThis), 32)
+                           + sizeof(PGMRZDYNMAPENTRY) * cPages;
+    int rc = MMHyperAlloc(pVM, cb, 32, MM_TAG_PGM, (void **)&pThis);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    pThis->u32Magic     = PGMRZDYNMAP_MAGIC;
+    pThis->paPages      = RT_ALIGN_PT(pThis + 1, 32, PPGMRZDYNMAPENTRY);
+    pThis->cPages       = cPages;
+    pThis->fLegacyMode  = PGMGetHostMode(pVM) == PGMMODE_32_BIT;
+    pThis->cLoad        = 0;
+    pThis->cMaxLoad     = 0;
+    pThis->cGuardPages  = 0;
+    pThis->cUsers       = 1;
+
+    for (size_t iPage = 0; iPage < cPages; iPage++)
+    {
+        pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
+        pThis->paPages[iPage].pvPage = pVM->pgm.s.pbDynPageMapBaseGC + iPage * PAGE_SIZE;
+        pThis->paPages[iPage].cRefs  = 0;
+        if (pThis->fLegacyMode)
+            pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage];
+        else
+            pThis->paPages[iPage].uPte.pPae    = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage];
+    }
+
+    pVM->pgm.s.pRCDynMap = pThis;
+
+    /*
+     * Initialize the autosets the VM.
+     */
+    rc = pgmRZDynMapInitAutoSetsForVM(pVM);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    return VINF_SUCCESS;
+}
+
+#endif /* IN_RC */
+
+/**
+ * Release references to a page, caller owns the spin lock.
+ *
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   iPage       The page.
+ * @param   cRefs       The number of references to release.
+ */
+DECLINLINE(void) pgmRZDynMapReleasePageLocked(PPGMRZDYNMAP pThis, uint32_t iPage, int32_t cRefs)
+{
+    cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs;
+    AssertMsg(cRefs >= 0, ("%d\n", cRefs));
+    if (!cRefs)
+        pThis->cLoad--;
+}
+
+
+/**
+ * Release references to a page, caller does not own the spin lock.
+ *
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   iPage       The page.
+ * @param   cRefs       The number of references to release.
+ */
+static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs)
+{
+    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+    pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
+    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+}
+
+
+/**
+ * pgmR0DynMapPage worker that deals with the tedious bits.
+ *
+ * @returns The page index on success, UINT32_MAX on failure.
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   HCPhys      The address of the page to be mapped.
+ * @param   iPage       The page index pgmR0DynMapPage hashed HCPhys to.
+ * @param   pVCpu       The current CPU, for statistics.
+ * @param   pfNew       Set to @c true if a new entry was made and @c false if
+ *                      an old entry was found and reused.
+ */
+static uint32_t pgmR0DynMapPageSlow(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu, bool *pfNew)
+{
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlow);
+
+    /*
+     * Check if any of the first 3 pages are unreferenced since the caller
+     * already has made sure they aren't matching.
+     */
+#ifdef VBOX_WITH_STATISTICS
+    bool                fLooped = false;
+#endif
+    uint32_t const      cPages  = pThis->cPages;
+    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
+    uint32_t            iFreePage;
+    if (!paPages[iPage].cRefs)
+        iFreePage = iPage;
+    else if (!paPages[(iPage + 1) % cPages].cRefs)
+        iFreePage   = (iPage + 1) % cPages;
+    else if (!paPages[(iPage + 2) % cPages].cRefs)
+        iFreePage   = (iPage + 2) % cPages;
+    else
+    {
+        /*
+         * Search for an unused or matching entry.
+         */
+        iFreePage = (iPage + 3) % cPages;
+        for (;;)
+        {
+            if (paPages[iFreePage].HCPhys == HCPhys)
+            {
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopHits);
+                *pfNew = false;
+                return iFreePage;
+            }
+            if (!paPages[iFreePage].cRefs)
+                break;
+
+            /* advance */
+            iFreePage = (iFreePage + 1) % cPages;
+            if (RT_UNLIKELY(iFreePage == iPage))
+                return UINT32_MAX;
+        }
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopMisses);
+#ifdef VBOX_WITH_STATISTICS
+        fLooped = true;
+#endif
+    }
+    Assert(iFreePage < cPages);
+
+#if 0 //def VBOX_WITH_STATISTICS
+    /* Check for lost hits. */
+    if (!fLooped)
+        for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages)
+            if (paPages[iPage2].HCPhys == HCPhys)
+                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZDynMapPageSlowLostHits);
+#endif
+
+    /*
+     * Setup the new entry.
+     */
+    *pfNew = true;
+    /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
+    paPages[iFreePage].HCPhys = HCPhys;
+#ifndef IN_RC
+    RTCpuSetFill(&paPages[iFreePage].PendingSet);
+#endif
+    if (pThis->fLegacyMode)
+    {
+        X86PGUINT       uOld  = paPages[iFreePage].uPte.pLegacy->u;
+        X86PGUINT       uOld2 = uOld; NOREF(uOld2);
+        X86PGUINT       uNew  = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
+                              | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
+                              | (HCPhys & X86_PTE_PG_MASK);
+        while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld))
+            AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
+        Assert(paPages[iFreePage].uPte.pLegacy->u == uNew);
+    }
+    else
+    {
+        X86PGPAEUINT    uOld  = paPages[iFreePage].uPte.pPae->u;
+        X86PGPAEUINT    uOld2 = uOld; NOREF(uOld2);
+        X86PGPAEUINT    uNew  = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
+                              | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
+                              | (HCPhys & X86_PTE_PAE_PG_MASK);
+        while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld))
+            AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
+        Assert(paPages[iFreePage].uPte.pPae->u == uNew);
+        /*Log6(("pgmR0DynMapPageSlow: #%x - %RHp %p %#llx\n", iFreePage, HCPhys, paPages[iFreePage].pvPage, uNew));*/
+    }
+    return iFreePage;
+}
+
+
+/**
+ * Maps a page into the pool.
+ *
+ * @returns Page index on success, UINT32_MAX on failure.
+ * @param   pThis       The dynamic mapping cache instance.
+ * @param   HCPhys      The address of the page to be mapped.
+ * @param   iRealCpu    The real cpu set index. (optimization)
+ * @param   pVCpu       The current CPU (for statistics).
+ * @param   ppvPage     Where to the page address.
+ */
+DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
+{
+    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPage);
+
+    /*
+     * Find an entry, if possible a matching one. The HCPhys address is hashed
+     * down to a page index, collisions are handled by linear searching.
+     * Optimized for a hit in the first 3 pages.
+     *
+     * Field easy hits here and defer the tedious searching and inserting
+     * to pgmR0DynMapPageSlow().
+     */
+    bool                fNew    = false;
+    uint32_t const      cPages  = pThis->cPages;
+    uint32_t            iPage   = (HCPhys >> PAGE_SHIFT) % cPages;
+    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
+    if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys))
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits0);
+    else
+    {
+        uint32_t        iPage2 = (iPage + 1) % cPages;
+        if (RT_LIKELY(paPages[iPage2].HCPhys == HCPhys))
+        {
+            iPage = iPage2;
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits1);
+        }
+        else
+        {
+            iPage2 = (iPage + 2) % cPages;
+            if (paPages[iPage2].HCPhys == HCPhys)
+            {
+                iPage = iPage2;
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits2);
+            }
+            else
+            {
+                iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu, &fNew);
+                if (RT_UNLIKELY(iPage == UINT32_MAX))
+                {
+                    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+                    *ppvPage = NULL;
+                    return iPage;
+                }
+            }
+        }
+    }
+
+    /*
+     * Reference it, update statistics and get the return address.
+     */
+    int32_t cRefs = ASMAtomicIncS32(&paPages[iPage].cRefs);
+    if (cRefs == 1)
+    {
+        pThis->cLoad++;
+        if (pThis->cLoad > pThis->cMaxLoad)
+            pThis->cMaxLoad = pThis->cLoad;
+        AssertMsg(pThis->cLoad <= pThis->cPages - pThis->cGuardPages, ("%d/%d\n", pThis->cLoad, pThis->cPages - pThis->cGuardPages));
+    }
+    else if (RT_UNLIKELY(cRefs <= 0))
+    {
+        ASMAtomicDecS32(&paPages[iPage].cRefs);
+        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+        *ppvPage = NULL;
+        AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX);
+    }
+    void *pvPage = paPages[iPage].pvPage;
+
+#ifndef IN_RC
+    /*
+     * Invalidate the entry?
+     */
+    bool fInvalidateIt = RTCpuSetIsMemberByIndex(&paPages[iPage].PendingSet, iRealCpu);
+    if (RT_UNLIKELY(fInvalidateIt))
+        RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu);
+#endif
+
+    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+
+    /*
+     * Do the actual invalidation outside the spinlock.
+     */
+#ifdef IN_RC
+    if (RT_UNLIKELY(fNew))
+#else
+    if (RT_UNLIKELY(fInvalidateIt))
+#endif
+    {
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageInvlPg);
+        ASMInvalidatePage(pvPage);
+    }
+
+    *ppvPage = pvPage;
+    return iPage;
+}
+
+
+/**
+ * Assert the the integrity of the pool.
+ *
+ * @returns VBox status code.
+ */
+static int pgmRZDynMapAssertIntegrity(PPGMRZDYNMAP pThis)
+{
+    /*
+     * Basic pool stuff that doesn't require any lock, just assumes we're a user.
+     */
+    if (!pThis)
+        return VINF_SUCCESS;
+    AssertPtrReturn(pThis, VERR_INVALID_POINTER);
+    AssertReturn(pThis->u32Magic == PGMRZDYNMAP_MAGIC, VERR_INVALID_MAGIC);
+    if (!pThis->cUsers)
+        return VERR_INVALID_PARAMETER;
+
+
+    int                 rc          = VINF_SUCCESS;
+    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+
+#define CHECK_RET(expr, a) \
+    do { \
+        if (RT_UNLIKELY(!(expr))) \
+        { \
+            PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); \
+            RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
+            RTAssertMsg2Weak a; \
+            return VERR_INTERNAL_ERROR; \
+        } \
+    } while (0)
+
+    /*
+     * Check that the PTEs are correct.
+     */
+    uint32_t            cGuard      = 0;
+    uint32_t            cLoad       = 0;
+    PPGMRZDYNMAPENTRY   paPages     = pThis->paPages;
+    uint32_t            iPage       = pThis->cPages;
+    if (pThis->fLegacyMode)
+    {
+#ifdef IN_RING0
+        PCX86PGUINT     paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
+#endif
+        while (iPage-- > 0)
+        {
+            CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
+            if (    paPages[iPage].cRefs  == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
+                &&  paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
+            {
+#ifdef PGMR0DYNMAP_GUARD_NP
+                CHECK_RET(paPages[iPage].uPte.pLegacy->u == (paSavedPTEs[iPage] & ~(X86PGUINT)X86_PTE_P),
+                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
+#else
+                CHECK_RET(paPages[iPage].uPte.pLegacy->u == PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE,
+                          ("#%u: %#x", iPage, paPages[iPage].uPte.pLegacy->u));
+#endif
+                cGuard++;
+            }
+            else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
+            {
+                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
+                X86PGUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
+#ifdef IN_RING0
+                               | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
+#endif
+                               | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
+                CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte,
+                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
+                if (paPages[iPage].cRefs)
+                    cLoad++;
+            }
+#ifdef IN_RING0
+            else
+                CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage],
+                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
+#endif
+        }
+    }
+    else
+    {
+#ifdef IN_RING0
+        PCX86PGPAEUINT  paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
+#endif
+        while (iPage-- > 0)
+        {
+            CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
+            if (    paPages[iPage].cRefs  == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
+                &&  paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
+            {
+#ifdef PGMR0DYNMAP_GUARD_NP
+                CHECK_RET(paPages[iPage].uPte.pPae->u == (paSavedPTEs[iPage] & ~(X86PGPAEUINT)X86_PTE_P),
+                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
+#else
+                CHECK_RET(paPages[iPage].uPte.pPae->u == PGMR0DYNMAP_GUARD_PAGE_PAE_PTE,
+                          ("#%u: %#llx", iPage, paPages[iPage].uPte.pPae->u));
+#endif
+                cGuard++;
+            }
+            else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
+            {
+                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
+                X86PGPAEUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
+#ifdef IN_RING0
+                                  | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
+#endif
+                                  | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
+                CHECK_RET(paPages[iPage].uPte.pPae->u == uPte,
+                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
+                if (paPages[iPage].cRefs)
+                    cLoad++;
+            }
+#ifdef IN_RING0
+            else
+                CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage],
+                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
+#endif
+        }
+    }
+
+    CHECK_RET(cLoad == pThis->cLoad, ("%u %u\n", cLoad, pThis->cLoad));
+    CHECK_RET(cGuard == pThis->cGuardPages, ("%u %u\n", cGuard, pThis->cGuardPages));
+
+#undef CHECK_RET
+    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+    return VINF_SUCCESS;
+}
+
+#ifdef IN_RING0
+/**
+ * Assert the the integrity of the pool.
+ *
+ * @returns VBox status code.
+ */
+VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
+{
+    return pgmRZDynMapAssertIntegrity(g_pPGMR0DynMap);
+}
+#endif /* IN_RING0 */
+
+#ifdef IN_RC
+/**
+ * Assert the the integrity of the pool.
+ *
+ * @returns VBox status code.
+ */
+VMMRCDECL(int) PGMRCDynMapAssertIntegrity(PVM pVM)
+{
+    return pgmRZDynMapAssertIntegrity((PPGMRZDYNMAP)pVM->pgm.s.pRCDynMap);
+}
+#endif /* IN_RC */
+
+
+/**
+ * As a final resort for a (somewhat) full auto set or full cache, try merge
+ * duplicate entries and flush the ones we can.
+ *
+ * @param   pSet        The set.
+ */
+static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
+{
+    LogFlow(("pgmDynMapOptimizeAutoSet\n"));
+
+    for (uint32_t i = 0 ; i < pSet->cEntries; i++)
+    {
+        /*
+         * Try merge entries.
+         */
+        uint16_t const  iPage = pSet->aEntries[i].iPage;
+        uint32_t        j     = i + 1;
+        while (   j < pSet->cEntries
+               && (   pSet->iSubset == UINT32_MAX
+                   || pSet->iSubset < pSet->cEntries) )
+        {
+            if (pSet->aEntries[j].iPage != iPage)
+                j++;
+            else
+            {
+                uint32_t const  cHardRefs    = (uint32_t)pSet->aEntries[i].cRefs
+                                             + (uint32_t)pSet->aEntries[j].cRefs;
+                uint32_t        cInlinedRefs = (uint32_t)pSet->aEntries[i].cInlinedRefs
+                                             + (uint32_t)pSet->aEntries[j].cInlinedRefs;
+                uint32_t        cUnrefs      = (uint32_t)pSet->aEntries[i].cUnrefs
+                                             + (uint32_t)pSet->aEntries[j].cUnrefs;
+                uint32_t        cSub         = RT_MIN(cUnrefs, cInlinedRefs);
+                cInlinedRefs -= cSub;
+                cUnrefs      -= cSub;
+
+                if (    cHardRefs    < UINT16_MAX
+                    &&  cInlinedRefs < UINT16_MAX
+                    &&  cUnrefs      < UINT16_MAX)
+                {
+                    /* merge j into i removing j. */
+                    Log2(("pgmDynMapOptimizeAutoSet: Merging #%u into #%u\n", j, i));
+                    pSet->aEntries[i].cRefs        = cHardRefs;
+                    pSet->aEntries[i].cInlinedRefs = cInlinedRefs;
+                    pSet->aEntries[i].cUnrefs      = cUnrefs;
+                    pSet->cEntries--;
+                    if (j < pSet->cEntries)
+                    {
+                        pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
+                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
+                    }
+                    else
+                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
+                }
+#if 0 /* too complicated, skip it. */
+                else
+                {
+                    /* migrate the max number of refs from j into i and quit the inner loop. */
+                    uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
+                    Assert(pSet->aEntries[j].cRefs > cMigrate);
+                    pSet->aEntries[j].cRefs -= cMigrate;
+                    pSet->aEntries[i].cRefs = UINT16_MAX - 1;
+                    break;
+                }
+#endif
+            }
+        }
+
+        /*
+         * Try make use of the unused hinting (cUnrefs) to evict entries
+         * from both the set as well as the mapping cache.
+         */
+
+        uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[i].cRefs + pSet->aEntries[i].cInlinedRefs;
+        Log2(("pgmDynMapOptimizeAutoSet: #%u/%u/%u pvPage=%p iPage=%u cRefs=%u cInlinedRefs=%u cUnrefs=%u cTotalRefs=%u\n",
+              i,
+              pSet->iSubset,
+              pSet->cEntries,
+              pSet->aEntries[i].pvPage,
+              pSet->aEntries[i].iPage,
+              pSet->aEntries[i].cRefs,
+              pSet->aEntries[i].cInlinedRefs,
+              pSet->aEntries[i].cUnrefs,
+              cTotalRefs));
+        Assert(cTotalRefs >= pSet->aEntries[i].cUnrefs);
+
+        if (    cTotalRefs == pSet->aEntries[i].cUnrefs
+            &&  (   pSet->iSubset == UINT32_MAX
+                 || pSet->iSubset < pSet->cEntries)
+           )
+        {
+            Log2(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
+            //LogFlow(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
+            pgmRZDynMapReleasePage(PGMRZDYNMAP_SET_2_DYNMAP(pSet),
+                                   pSet->aEntries[i].iPage,
+                                   pSet->aEntries[i].cRefs);
+            pSet->cEntries--;
+            if (i < pSet->cEntries)
+            {
+                pSet->aEntries[i] = pSet->aEntries[pSet->cEntries];
+                PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
+            }
+
+            i--;
+        }
+    }
+}
+
+
+
+
+/**
+ * Signals the start of a new set of mappings.
+ *
+ * Mostly for strictness. PGMDynMapHCPage won't work unless this
+ * API is called.
+ *
+ * @param   pVCpu       The shared data for the current virtual CPU.
+ */
+VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu)
+{
+    LogFlow(("PGMRZDynMapStartAutoSet:\n"));
+    Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
+    Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX);
+    pVCpu->pgm.s.AutoSet.cEntries = 0;
+    pVCpu->pgm.s.AutoSet.iCpu = PGMRZDYNMAP_CUR_CPU();
+}
+
+
+#ifdef IN_RING0
+/**
+ * Starts or migrates the autoset of a virtual CPU.
+ *
+ * This is used by HWACCMR0Enter.  When we've longjumped out of the HWACCM
+ * execution loop with the set open, we'll migrate it when re-entering.  While
+ * under normal circumstances, we'll start it so VMXR0LoadGuestState can access
+ * guest memory.
+ *
+ * @returns @c true if started, @c false if migrated.
+ * @param   pVCpu       The shared data for the current virtual CPU.
+ * @thread  EMT
+ */
+VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
+{
+    bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED;
+    if (fStartIt)
+        PGMRZDynMapStartAutoSet(pVCpu);
+    else
+        PGMR0DynMapMigrateAutoSet(pVCpu);
+    return fStartIt;
+}
+#endif /* IN_RING0 */
+
+
+/**
+ * Worker that performs the actual flushing of the set.
+ *
+ * @param   pSet        The set to flush.
+ * @param   cEntries    The number of entries.
+ */
+DECLINLINE(void) pgmDynMapFlushAutoSetWorker(PPGMMAPSET pSet, uint32_t cEntries)
+{
+    /*
+     * Release any pages it's referencing.
+     */
+    if (    cEntries != 0
+        &&  RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries)))
+    {
+        PPGMRZDYNMAP    pThis   = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
+        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+
+        uint32_t i = cEntries;
+        while (i-- > 0)
+        {
+            uint32_t iPage = pSet->aEntries[i].iPage;
+            Assert(iPage < pThis->cPages);
+            int32_t  cRefs = pSet->aEntries[i].cRefs;
+            Assert(cRefs > 0);
+            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
+
+            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
+        }
+
+        Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages);
+        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+    }
+}
+
+
+/**
+ * Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
+ * since the PGMDynMapStartAutoSet call.
+ *
+ * @param   pVCpu       The shared data for the current virtual CPU.
+ */
+VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu)
+{
+    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
+
+    /*
+     * Close and flush the set.
+     */
+    uint32_t    cEntries = pSet->cEntries;
+    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
+    pSet->cEntries = PGMMAPSET_CLOSED;
+    pSet->iSubset = UINT32_MAX;
+    pSet->iCpu = -1;
+
+#ifdef IN_RC
+    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
+    else
+#endif
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
+    AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
+    if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100)
+        Log(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
+    else
+        LogFlow(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
+
+    pgmDynMapFlushAutoSetWorker(pSet, cEntries);
+}
+
+
+/**
+ * Flushes the set if it's above a certain threshold.
+ *
+ * @param   pVCpu       The shared data for the current virtual CPU.
+ */
+VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu)
+{
+    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
+    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
+
+    /*
+     * Only flush it if it's 45% full.
+     */
+    uint32_t cEntries = pSet->cEntries;
+    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
+#ifdef IN_RC
+    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
+    else
+#endif
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
+    if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100)
+    {
+        pSet->cEntries = 0;
+
+        AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
+        Log(("PGMDynMapFlushAutoSet: cEntries=%d\n", pSet->cEntries));
+
+        pgmDynMapFlushAutoSetWorker(pSet, cEntries);
+        AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
+    }
+}
+
+
+#ifndef IN_RC
+/**
+ * Migrates the automatic mapping set of the current vCPU if it's active and
+ * necessary.
+ *
+ * This is called when re-entering the hardware assisted execution mode after a
+ * nip down to ring-3.  We run the risk that the CPU might have change and we
+ * will therefore make sure all the cache entries currently in the auto set will
+ * be valid on the new CPU.  If the cpu didn't change nothing will happen as all
+ * the entries will have been flagged as invalidated.
+ *
+ * @param   pVCpu       The shared data for the current virtual CPU.
+ * @thread  EMT
+ */
+VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu)
+{
+    LogFlow(("PGMR0DynMapMigrateAutoSet\n"));
+    PPGMMAPSET      pSet     = &pVCpu->pgm.s.AutoSet;
+    int32_t         iRealCpu = PGMRZDYNMAP_CUR_CPU();
+    if (pSet->iCpu != iRealCpu)
+    {
+        uint32_t    i        = pSet->cEntries;
+        if (i != PGMMAPSET_CLOSED)
+        {
+            AssertMsg(i <= RT_ELEMENTS(pSet->aEntries), ("%#x (%u)\n", i, i));
+            if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries)))
+            {
+                PPGMRZDYNMAP    pThis  = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
+                PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+
+                while (i-- > 0)
+                {
+                    Assert(pSet->aEntries[i].cRefs > 0);
+                    uint32_t iPage = pSet->aEntries[i].iPage;
+                    Assert(iPage < pThis->cPages);
+                    if (RTCpuSetIsMemberByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu))
+                    {
+                        RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu);
+                        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+
+                        ASMInvalidatePage(pThis->paPages[iPage].pvPage);
+                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapMigrateInvlPg);
+
+                        PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis);
+                    }
+                }
+
+                PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+            }
+        }
+        pSet->iCpu = iRealCpu;
+    }
+}
+#endif /* !IN_RC */
+
+
+/**
+ * Worker function that flushes the current subset.
+ *
+ * This is called when the set is popped or when the set
+ * hash a too high load. As also pointed out elsewhere, the
+ * whole subset thing is a hack for working around code that
+ * accesses too many pages. Like PGMPool.
+ *
+ * @param   pSet        The set which subset to flush.
+ */
+static void pgmDynMapFlushSubset(PPGMMAPSET pSet)
+{
+    uint32_t iSubset = pSet->iSubset;
+    uint32_t i       = pSet->cEntries;
+    Assert(i <= RT_ELEMENTS(pSet->aEntries));
+    if (    i > iSubset
+        &&  i <= RT_ELEMENTS(pSet->aEntries))
+    {
+        Log(("pgmDynMapFlushSubset: cEntries=%d iSubset=%d\n", pSet->cEntries, iSubset));
+        pSet->cEntries = iSubset;
+
+        PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
+        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
+
+        while (i-- > iSubset)
+        {
+            uint32_t iPage = pSet->aEntries[i].iPage;
+            Assert(iPage < pThis->cPages);
+            int32_t  cRefs = pSet->aEntries[i].cRefs;
+            Assert(cRefs > 0);
+            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
+
+            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
+        }
+
+        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
+    }
+}
+
+
+/**
+ * Creates a subset.
+ *
+ * A subset is a hack to avoid having to rewrite code that touches a lot of
+ * pages. It prevents the mapping set from being overflowed by automatically
+ * flushing previous mappings when a certain threshold is reached.
+ *
+ * Pages mapped after calling this function are only valid until the next page
+ * is mapped.
+ *
+ * @returns The index of the previous subset. Pass this to
+ *          PGMDynMapPopAutoSubset when popping it.
+ * @param   pVCpu           Pointer to the virtual cpu data.
+ */
+VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu)
+{
+    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
+    AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX);
+    uint32_t        iPrevSubset = pSet->iSubset;
+    LogFlow(("PGMRZDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
+
+#ifdef IN_RC
+    /* kludge */
+    if (pSet->cEntries > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE / 2)
+    {
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
+        pgmDynMapOptimizeAutoSet(pSet);
+    }
+#endif
+
+    pSet->iSubset = pSet->cEntries;
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSubsets);
+
+    return iPrevSubset;
+}
+
+
+/**
+ * Pops a subset created by a previous call to PGMDynMapPushAutoSubset.
+ *
+ * @param   pVCpu           Pointer to the virtual cpu data.
+ * @param   iPrevSubset     What PGMDynMapPushAutoSubset returned.
+ */
+VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
+{
+    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
+    uint32_t        cEntries = pSet->cEntries;
+    LogFlow(("PGMRZDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
+    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
+    AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX);
+#ifdef IN_RC
+    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
+    else
+#endif
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
+    if (    cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100
+        &&  cEntries != pSet->iSubset)
+    {
+        AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
+        pgmDynMapFlushSubset(pSet);
+    }
+    pSet->iSubset = iPrevSubset;
+}
+
+
+/**
+ * Indicates that the given page is unused and its mapping can be re-used.
+ *
+ * @param   pVCpu           The current CPU.
+ * @param   pvHint          The page that is now unused.  This does not have to
+ *                          point at the start of the page.  NULL is ignored.
+ */
+#ifdef LOG_ENABLED
+void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL)
+#else
+void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint)
+#endif
+{
+    /*
+     * Ignore NULL pointers and mask off the page offset bits.
+     */
+    if (pvHint == NULL)
+        return;
+    pvHint = (void *)((uintptr_t)pvHint & ~(uintptr_t)PAGE_OFFSET_MASK);
+
+    PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
+    uint32_t    iEntry  = pSet->cEntries;
+    AssertReturnVoid(iEntry > 0);
+
+    /*
+     * Find the entry in the usual unrolled fashion.
+     */
+#define IS_MATCHING_ENTRY(pSet, iEntry, pvHint) \
+        (   (pSet)->aEntries[(iEntry)].pvPage == (pvHint) \
+         &&   (uint32_t)(pSet)->aEntries[(iEntry)].cRefs + (pSet)->aEntries[(iEntry)].cInlinedRefs \
+            > (pSet)->aEntries[(iEntry)].cUnrefs )
+    if (     iEntry >= 1 && IS_MATCHING_ENTRY(pSet, iEntry - 1, pvHint))
+        iEntry = iEntry - 1;
+    else if (iEntry >= 2 && IS_MATCHING_ENTRY(pSet, iEntry - 2, pvHint))
+        iEntry = iEntry - 2;
+    else if (iEntry >= 3 && IS_MATCHING_ENTRY(pSet, iEntry - 3, pvHint))
+        iEntry = iEntry - 3;
+    else if (iEntry >= 4 && IS_MATCHING_ENTRY(pSet, iEntry - 4, pvHint))
+        iEntry = iEntry - 4;
+    else if (iEntry >= 5 && IS_MATCHING_ENTRY(pSet, iEntry - 5, pvHint))
+        iEntry = iEntry - 5;
+    else if (iEntry >= 6 && IS_MATCHING_ENTRY(pSet, iEntry - 6, pvHint))
+        iEntry = iEntry - 6;
+    else if (iEntry >= 7 && IS_MATCHING_ENTRY(pSet, iEntry - 7, pvHint))
+        iEntry = iEntry - 7;
+    else
+    {
+        /*
+         * Loop till we find it.
+         */
+        bool fFound = false;
+        if (iEntry > 7)
+        {
+            iEntry -= 7;
+            while (iEntry-- > 0)
+                if (IS_MATCHING_ENTRY(pSet, iEntry, pvHint))
+                {
+                    fFound = true;
+                    break;
+                }
+        }
+        AssertMsgReturnVoid(fFound,
+                            ("pvHint=%p cEntries=%#x iSubset=%#x\n"
+                             "aEntries[0] = {%#x, %#x, %#x, %#x, %p}\n"
+                             "aEntries[1] = {%#x, %#x, %#x, %#x, %p}\n"
+                             "aEntries[2] = {%#x, %#x, %#x, %#x, %p}\n"
+                             "aEntries[3] = {%#x, %#x, %#x, %#x, %p}\n"
+                             "aEntries[4] = {%#x, %#x, %#x, %#x, %p}\n"
+                             "aEntries[5] = {%#x, %#x, %#x, %#x, %p}\n"
+                             ,
+                             pvHint, pSet->cEntries, pSet->iSubset,
+                             pSet->aEntries[0].iPage, pSet->aEntries[0].cRefs, pSet->aEntries[0].cInlinedRefs, pSet->aEntries[0].cUnrefs, pSet->aEntries[0].pvPage,
+                             pSet->aEntries[1].iPage, pSet->aEntries[1].cRefs, pSet->aEntries[1].cInlinedRefs, pSet->aEntries[1].cUnrefs, pSet->aEntries[1].pvPage,
+                             pSet->aEntries[2].iPage, pSet->aEntries[2].cRefs, pSet->aEntries[2].cInlinedRefs, pSet->aEntries[2].cUnrefs, pSet->aEntries[2].pvPage,
+                             pSet->aEntries[3].iPage, pSet->aEntries[3].cRefs, pSet->aEntries[3].cInlinedRefs, pSet->aEntries[3].cUnrefs, pSet->aEntries[3].pvPage,
+                             pSet->aEntries[4].iPage, pSet->aEntries[4].cRefs, pSet->aEntries[4].cInlinedRefs, pSet->aEntries[4].cUnrefs, pSet->aEntries[4].pvPage,
+                             pSet->aEntries[5].iPage, pSet->aEntries[5].cRefs, pSet->aEntries[5].cInlinedRefs, pSet->aEntries[5].cUnrefs, pSet->aEntries[5].pvPage));
+    }
+#undef IS_MATCHING_ENTRY
+
+    /*
+     * Update it.
+     */
+    uint32_t const  cTotalRefs = (uint32_t)pSet->aEntries[iEntry].cRefs + pSet->aEntries[iEntry].cInlinedRefs;
+    uint32_t const  cUnrefs    = pSet->aEntries[iEntry].cUnrefs;
+    LogFlow(("pgmRZDynMapUnusedHint: pvHint=%p #%u cRefs=%d cInlinedRefs=%d cUnrefs=%d (+1) cTotalRefs=%d %s(%d) %s\n",
+             pvHint, iEntry, pSet->aEntries[iEntry].cRefs, pSet->aEntries[iEntry].cInlinedRefs, cUnrefs, cTotalRefs, pszFile, iLine, pszFunction));
+    AssertReturnVoid(cTotalRefs > cUnrefs);
+
+    if (RT_LIKELY(cUnrefs < UINT16_MAX - 1))
+        pSet->aEntries[iEntry].cUnrefs++;
+    else if (pSet->aEntries[iEntry].cInlinedRefs)
+    {
+        uint32_t cSub = RT_MIN(pSet->aEntries[iEntry].cInlinedRefs, pSet->aEntries[iEntry].cUnrefs);
+        pSet->aEntries[iEntry].cInlinedRefs -= cSub;
+        pSet->aEntries[iEntry].cUnrefs      -= cSub;
+        pSet->aEntries[iEntry].cUnrefs++;
+    }
+    else
+        Log(("pgmRZDynMapUnusedHint: pvHint=%p ignored because of overflow! %s(%d) %s\n", pvHint, pszFile, iLine, pszFunction));
+}
+
+
+/**
+ * Common worker code for pgmRZDynMapHCPageInlined, pgmRZDynMapHCPageV2Inlined
+ * and pgmR0DynMapGCPageOffInlined.
+ *
+ * @returns VINF_SUCCESS, bails out to ring-3 on failure.
+ * @param   pSet        The set.
+ * @param   HCPhys      The physical address of the page.
+ * @param   ppv         Where to store the address of the mapping on success.
+ *
+ * @remarks This is a very hot path.
+ */
+int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
+{
+    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
+    PVMCPU pVCpu = PGMRZDYNMAP_SET_2_VMCPU(pSet);
+    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
+
+    /*
+     * Map it.
+     */
+    void           *pvPage;
+    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
+    uint32_t        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
+    if (RT_UNLIKELY(iPage == UINT32_MAX))
+    {
+        /*
+         * We're out of mapping space, optimize our set to try remedy the
+         * situation.  (Only works if there are unreference hints.)
+         */
+        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
+        pgmDynMapOptimizeAutoSet(pSet);
+
+        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
+        if (RT_UNLIKELY(iPage == UINT32_MAX))
+        {
+            RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
+                             pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pThis->cGuardPages);
+            if (!g_fPGMR0DynMapTestRunning)
+                VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
+            *ppv = NULL;
+            STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
+            return VERR_PGM_DYNMAP_FAILED;
+        }
+    }
+
+    /*
+     * Add the page to the auto reference set.
+     *
+     * The typical usage pattern means that the same pages will be mapped
+     * several times in the same set. We can catch most of these
+     * remappings by looking a few pages back into the set. (The searching
+     * and set optimizing path will hardly ever be used when doing this.)
+     */
+    AssertCompile(RT_ELEMENTS(pSet->aEntries) >= 8);
+    int32_t i = pSet->cEntries;
+    if (i-- < 5)
+    {
+        unsigned iEntry = pSet->cEntries++;
+        pSet->aEntries[iEntry].cRefs        = 1;
+        pSet->aEntries[iEntry].cUnrefs      = 0;
+        pSet->aEntries[iEntry].cInlinedRefs = 0;
+        pSet->aEntries[iEntry].iPage        = iPage;
+        pSet->aEntries[iEntry].pvPage       = pvPage;
+        pSet->aEntries[iEntry].HCPhys       = HCPhys;
+        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/0/0 iPage=%#x  [a] %s(%d) %s\n",
+                 pSet, HCPhys, iEntry, iEntry + 1, pvPage, 1, iPage, pszFile, iLine, pszFunction));
+    }
+    /* Any of the last 5 pages? */
+    else if (   pSet->aEntries[i - 0].iPage == iPage
+             && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[i - 0].cRefs++;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [0] %s(%d) %s\n", pSet, HCPhys, i - 0, pSet->cEntries, pvPage, pSet->aEntries[i - 0].cRefs, pSet->aEntries[i - 0].cInlinedRefs, pSet->aEntries[i - 0].cUnrefs, iPage, pszFile, iLine, pszFunction));
+    }
+    else if (   pSet->aEntries[i - 1].iPage == iPage
+             && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[i - 1].cRefs++;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [1] %s(%d) %s\n", pSet, HCPhys, i - 1, pSet->cEntries, pvPage, pSet->aEntries[i - 1].cRefs, pSet->aEntries[i - 1].cInlinedRefs, pSet->aEntries[i - 1].cUnrefs, iPage, pszFile, iLine, pszFunction));
+    }
+    else if (   pSet->aEntries[i - 2].iPage == iPage
+             && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[i - 2].cRefs++;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [2] %s(%d) %s\n", pSet, HCPhys, i - 2, pSet->cEntries, pvPage, pSet->aEntries[i - 2].cRefs, pSet->aEntries[i - 2].cInlinedRefs, pSet->aEntries[i - 2].cUnrefs, iPage, pszFile, iLine, pszFunction));
+    }
+    else if (   pSet->aEntries[i - 3].iPage == iPage
+             && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[i - 3].cRefs++;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 3, pSet->cEntries, pvPage, pSet->aEntries[i - 3].cRefs, pSet->aEntries[i - 3].cInlinedRefs, pSet->aEntries[i - 3].cUnrefs, iPage, pszFile, iLine, pszFunction));
+    }
+    else if (   pSet->aEntries[i - 4].iPage == iPage
+             && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1)
+    {
+        pSet->aEntries[i - 4].cRefs++;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 4, pSet->cEntries, pvPage, pSet->aEntries[i - 4].cRefs, pSet->aEntries[i - 4].cInlinedRefs, pSet->aEntries[i - 4].cUnrefs, iPage, pszFile, iLine, pszFunction));
+    }
+    /* Don't bother searching unless we're above a 60% load. */
+    else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100))
+    {
+        unsigned iEntry = pSet->cEntries++;
+        pSet->aEntries[iEntry].cRefs        = 1;
+        pSet->aEntries[iEntry].cUnrefs      = 0;
+        pSet->aEntries[iEntry].cInlinedRefs = 0;
+        pSet->aEntries[iEntry].iPage        = iPage;
+        pSet->aEntries[iEntry].pvPage       = pvPage;
+        pSet->aEntries[iEntry].HCPhys       = HCPhys;
+        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
+        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [b] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
+    }
+    else
+    {
+        /* Search the rest of the set. */
+        Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
+        i -= 4;
+        while (i-- > 0)
+            if (    pSet->aEntries[i].iPage == iPage
+                &&  pSet->aEntries[i].cRefs < UINT16_MAX - 1)
+            {
+                pSet->aEntries[i].cRefs++;
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchHits);
+                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [c] %s(%d) %s\n", pSet, HCPhys, i, pSet->cEntries, pvPage, pSet->aEntries[i].cRefs, pSet->aEntries[i].cInlinedRefs, pSet->aEntries[i].cUnrefs, iPage, pszFile, iLine, pszFunction));
+                break;
+            }
+        if (i < 0)
+        {
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchMisses);
+            if (pSet->iSubset < pSet->cEntries)
+            {
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchFlushes);
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
+                AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries));
+                pgmDynMapFlushSubset(pSet);
+            }
+
+            if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
+            {
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
+                pgmDynMapOptimizeAutoSet(pSet);
+            }
+
+            if (RT_LIKELY(pSet->cEntries < RT_ELEMENTS(pSet->aEntries)))
+            {
+                unsigned iEntry = pSet->cEntries++;
+                pSet->aEntries[iEntry].cRefs        = 1;
+                pSet->aEntries[iEntry].cUnrefs      = 0;
+                pSet->aEntries[iEntry].cInlinedRefs = 0;
+                pSet->aEntries[iEntry].iPage        = iPage;
+                pSet->aEntries[iEntry].pvPage       = pvPage;
+                pSet->aEntries[iEntry].HCPhys       = HCPhys;
+                pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
+                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [d] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
+            }
+            else
+            {
+                /* We're screwed. */
+                pgmRZDynMapReleasePage(pThis, iPage, 1);
+
+                RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: set is full!\n");
+                if (!g_fPGMR0DynMapTestRunning)
+                    VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
+                *ppv = NULL;
+                STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
+                return VERR_PGM_DYNMAP_FULL_SET;
+            }
+        }
+    }
+
+    *ppv = pvPage;
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
+    return VINF_SUCCESS;
+}
+
+
+#if 0 /*def DEBUG*/
+/** For pgmR0DynMapTest3PerCpu. */
+typedef struct PGMR0DYNMAPTEST
+{
+    uint32_t            u32Expect;
+    uint32_t           *pu32;
+    uint32_t volatile   cFailures;
+} PGMR0DYNMAPTEST;
+typedef PGMR0DYNMAPTEST *PPGMR0DYNMAPTEST;
+
+/**
+ * Checks that the content of the page is the same on all CPUs, i.e. that there
+ * are no CPU specfic PTs or similar nasty stuff involved.
+ *
+ * @param   idCpu           The current CPU.
+ * @param   pvUser1         Pointer a PGMR0DYNMAPTEST structure.
+ * @param   pvUser2         Unused, ignored.
+ */
+static DECLCALLBACK(void) pgmR0DynMapTest3PerCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+    PPGMR0DYNMAPTEST    pTest = (PPGMR0DYNMAPTEST)pvUser1;
+    ASMInvalidatePage(pTest->pu32);
+    if (*pTest->pu32 != pTest->u32Expect)
+        ASMAtomicIncU32(&pTest->cFailures);
+    NOREF(pvUser2); NOREF(idCpu);
+}
+
+
+/**
+ * Performs some basic tests in debug builds.
+ */
+static int pgmR0DynMapTest(PVM pVM)
+{
+    LogRel(("pgmR0DynMapTest: ****** START ******\n"));
+    PPGMMAPSET      pSet  = &pVM->aCpus[0].pgm.s.AutoSet;
+    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
+    uint32_t        i;
+
+    /*
+     * Assert internal integrity first.
+     */
+    LogRel(("Test #0\n"));
+    int rc = PGMR0DynMapAssertIntegrity();
+    if (RT_FAILURE(rc))
+        return rc;
+
+    void           *pvR0DynMapUsedSaved = pVM->pgm.s.pvR0DynMapUsed;
+    pVM->pgm.s.pvR0DynMapUsed = pThis;
+    g_fPGMR0DynMapTestRunning = true;
+
+    /*
+     * Simple test, map CR3 twice and check that we're getting the
+     * same mapping address back.
+     */
+    LogRel(("Test #1\n"));
+    ASMIntDisable();
+    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
+
+    uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK;
+    void    *pv  = (void *)(intptr_t)-1;
+    void    *pv2 = (void *)(intptr_t)-2;
+    rc           = pgmRZDynMapHCPageCommon(pVM, cr3, &pv  RTLOG_COMMA_SRC_POS);
+    int      rc2 = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
+    ASMIntEnable();
+    if (    RT_SUCCESS(rc2)
+        &&  RT_SUCCESS(rc)
+        &&  pv == pv2)
+    {
+        LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
+        rc = PGMR0DynMapAssertIntegrity();
+
+        /*
+         * Check that the simple set overflow code works by filling it
+         * with more CR3 mappings.
+         */
+        LogRel(("Test #2\n"));
+        ASMIntDisable();
+        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+        for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++)
+        {
+            pv2 = (void *)(intptr_t)-4;
+            rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
+        }
+        ASMIntEnable();
+        if (RT_FAILURE(rc) || pv != pv2)
+        {
+            LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%p\n", __LINE__, rc, pv, pv2, i));
+            if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
+        }
+        else if (pSet->cEntries != 5)
+        {
+            LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) / 2));
+            rc = VERR_INTERNAL_ERROR;
+        }
+        else if (   pSet->aEntries[4].cRefs != UINT16_MAX - 1
+                 || pSet->aEntries[3].cRefs != UINT16_MAX - 1
+                 || pSet->aEntries[2].cRefs != 1
+                 || pSet->aEntries[1].cRefs != 1
+                 || pSet->aEntries[0].cRefs != 1)
+        {
+            LogRel(("failed(%d): bad set dist: ", __LINE__));
+            for (i = 0; i < pSet->cEntries; i++)
+                LogRel(("[%d]=%d, ", i, pSet->aEntries[i].cRefs));
+            LogRel(("\n"));
+            rc = VERR_INTERNAL_ERROR;
+        }
+        if (RT_SUCCESS(rc))
+            rc = PGMR0DynMapAssertIntegrity();
+        if (RT_SUCCESS(rc))
+        {
+            /*
+             * Trigger an set optimization run (exactly).
+             */
+            LogRel(("Test #3\n"));
+            ASMIntDisable();
+            PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+            pv2 = NULL;
+            for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++)
+            {
+                pv2 = (void *)(intptr_t)(-5 - i);
+                rc = pgmRZDynMapHCPageCommon(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
+            }
+            ASMIntEnable();
+            if (RT_FAILURE(rc) || pv == pv2)
+            {
+                LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%d\n", __LINE__, rc, pv, pv2, i));
+                if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
+            }
+            else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries))
+            {
+                LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
+                rc = VERR_INTERNAL_ERROR;
+            }
+            LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
+            if (RT_SUCCESS(rc))
+                rc = PGMR0DynMapAssertIntegrity();
+            if (RT_SUCCESS(rc))
+            {
+                /*
+                 * Trigger an overflow error.
+                 */
+                LogRel(("Test #4\n"));
+                ASMIntDisable();
+                PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+                for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++)
+                {
+                    rc = pgmRZDynMapHCPageCommon(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
+                    if (RT_SUCCESS(rc))
+                        rc = PGMR0DynMapAssertIntegrity();
+                    if (RT_FAILURE(rc))
+                        break;
+                }
+                ASMIntEnable();
+                if (rc == VERR_PGM_DYNMAP_FULL_SET)
+                {
+                    /* flush the set. */
+                    LogRel(("Test #5\n"));
+                    ASMIntDisable();
+                    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+                    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
+                    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
+                    ASMIntEnable();
+
+                    rc = PGMR0DynMapAssertIntegrity();
+                }
+                else
+                {
+                    LogRel(("failed(%d): rc=%Rrc, wanted %d ; pv2=%p Set=%u/%u; i=%d\n", __LINE__,
+                            rc, VERR_PGM_DYNMAP_FULL_SET, pv2, pSet->cEntries, RT_ELEMENTS(pSet->aEntries), i));
+                    if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
+                }
+            }
+        }
+    }
+    else
+    {
+        LogRel(("failed(%d): rc=%Rrc rc2=%Rrc; pv=%p pv2=%p\n", __LINE__, rc, rc2, pv, pv2));
+        if (RT_SUCCESS(rc))
+            rc = rc2;
+    }
+
+    /*
+     * Check that everyone sees the same stuff.
+     */
+    if (RT_SUCCESS(rc))
+    {
+        LogRel(("Test #5\n"));
+        ASMIntDisable();
+        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+        RTHCPHYS  HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0);
+        rc  = pgmRZDynMapHCPageCommon(pVM, HCPhysPT, &pv RTLOG_COMMA_SRC_POS);
+        if (RT_SUCCESS(rc))
+        {
+            PGMR0DYNMAPTEST Test;
+            uint32_t       *pu32Real = &pThis->paPages[pThis->pSegHead->iPage].uPte.pLegacy->u;
+            Test.pu32       = (uint32_t *)((uintptr_t)pv | ((uintptr_t)pu32Real & PAGE_OFFSET_MASK));
+            Test.u32Expect  = *pu32Real;
+            ASMAtomicWriteU32(&Test.cFailures, 0);
+            ASMIntEnable();
+
+            rc = RTMpOnAll(pgmR0DynMapTest3PerCpu, &Test, NULL);
+            if (RT_FAILURE(rc))
+                LogRel(("failed(%d): RTMpOnAll rc=%Rrc\n", __LINE__, rc));
+            else if (Test.cFailures)
+            {
+                LogRel(("failed(%d): cFailures=%d pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n", __LINE__,
+                        Test.cFailures, pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
+                rc = VERR_INTERNAL_ERROR;
+            }
+            else
+                LogRel(("pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n",
+                        pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
+        }
+        else
+        {
+            ASMIntEnable();
+            LogRel(("failed(%d): rc=%Rrc\n", rc));
+        }
+    }
+
+    /*
+     * Clean up.
+     */
+    LogRel(("Cleanup.\n"));
+    ASMIntDisable();
+    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
+    PGMRZDynMapFlushAutoSet(&pVM->aCpus[0]);
+    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
+    ASMIntEnable();
+
+    if (RT_SUCCESS(rc))
+        rc = PGMR0DynMapAssertIntegrity();
+    else
+        PGMR0DynMapAssertIntegrity();
+
+    g_fPGMR0DynMapTestRunning = false;
+    LogRel(("Result: rc=%Rrc Load=%u/%u/%u Set=%#x/%u\n", rc,
+            pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
+    pVM->pgm.s.pvR0DynMapUsed = pvR0DynMapUsedSaved;
+    LogRel(("pgmR0DynMapTest: ****** END ******\n"));
+    return rc;
+}
+#endif /* DEBUG */
+
Index: /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp	(revision 31402)
@@ -461,5 +461,5 @@
     GEN_CHECK_OFF(PGMCPU, offVCpu);
     GEN_CHECK_OFF(PGMCPU, offPGM);
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)
     GEN_CHECK_OFF(PGMCPU, AutoSet);
 #endif
@@ -568,6 +568,5 @@
     GEN_CHECK_OFF(PGM, HCPhysInterPaePML4);
     GEN_CHECK_OFF(PGM, pbDynPageMapBaseGC);
-    GEN_CHECK_OFF(PGM, iDynPageMapLast);
-    GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache);
+    GEN_CHECK_OFF(PGM, pRCDynMap);
     GEN_CHECK_OFF(PGM, pvR0DynMapUsed);
     GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask);
@@ -575,6 +574,4 @@
     GEN_CHECK_OFF(PGMCPU, fA20Enabled);
     GEN_CHECK_OFF(PGMCPU, fSyncFlags);
-    GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache);
-    GEN_CHECK_OFF(PGM, aLockedDynPageMapCache);
     GEN_CHECK_OFF(PGM, CritSect);
     GEN_CHECK_OFF(PGM, pPoolR3);
@@ -788,4 +785,26 @@
     GEN_CHECK_OFF(PGMPOOL, aPages[1]);
     GEN_CHECK_OFF(PGMPOOL, aPages[PGMPOOL_IDX_FIRST - 1]);
+    GEN_CHECK_SIZE(PGMRCDYNMAP);
+    GEN_CHECK_OFF(PGMRCDYNMAP, u32Magic);
+    GEN_CHECK_OFF(PGMRCDYNMAP, paPages);
+    GEN_CHECK_OFF(PGMRCDYNMAP, cPages);
+    GEN_CHECK_OFF(PGMRCDYNMAP, fLegacyMode);
+    GEN_CHECK_OFF(PGMRCDYNMAP, cLoad);
+    GEN_CHECK_OFF(PGMRCDYNMAP, cMaxLoad);
+    GEN_CHECK_OFF(PGMRCDYNMAP, cGuardPages);
+    GEN_CHECK_OFF(PGMRCDYNMAP, cUsers);
+    GEN_CHECK_SIZE(PGMRCDYNMAPENTRY);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, HCPhys);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, pvPage);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, cRefs);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pLegacy);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pPae);
+    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pv);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, pvPage);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, iPage);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, cRefs);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, cInlinedRefs);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, cUnrefs);
+    GEN_CHECK_OFF(PGMMAPSETENTRY, HCPhys);
 
     GEN_CHECK_SIZE(REM);
Index: /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 31401)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 31402)
@@ -332,5 +332,5 @@
 
     /* pgm */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE)  || defined(VBOX_WITH_RAW_MODE)
     CHECK_MEMBER_ALIGNMENT(PGMCPU, AutoSet, 8);
 #endif
