Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 19871)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 19872)
@@ -2939,5 +2939,4 @@
 void            pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
 int             pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
-void            pgmPoolFlushAll(PVM pVM);
 void            pgmPoolClearAll(PVM pVM);
 PPGMPOOLPAGE    pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
Index: /trunk/src/VBox/VMM/PGMPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGMPool.cpp	(revision 19871)
+++ /trunk/src/VBox/VMM/PGMPool.cpp	(revision 19872)
@@ -404,16 +404,4 @@
 
 /**
- * Reset notification.
- *
- * This will flush the pool.
- * @param   pVM     The VM handle.
- */
-void pgmR3PoolReset(PVM pVM)
-{
-    pgmPoolFlushAll(pVM);
-}
-
-
-/**
  * Grows the shadow page pool.
  *
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 19871)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 19872)
@@ -3807,6 +3807,378 @@
     PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
 }
-
 #endif /* PGMPOOL_WITH_USER_TRACKING */
+
+/**
+ * Flushes a pool page.
+ *
+ * This moves the page to the free list after removing all user references to it.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS on success.
+ * @param   pPool       The pool.
+ * @param   HCPhys      The HC physical address of the shadow page.
+ */
+int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
+{
+    PVM pVM = pPool->CTX_SUFF(pVM);
+
+    int rc = VINF_SUCCESS;
+    STAM_PROFILE_START(&pPool->StatFlushPage, f);
+    LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
+             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
+
+    /*
+     * Quietly reject any attempts at flushing any of the special root pages.
+     */
+    if (pPage->idx < PGMPOOL_IDX_FIRST)
+    {
+        AssertFailed(); /* can no longer happen */
+        Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
+        return VINF_SUCCESS;
+    }
+
+    pgmLock(pVM);
+
+    /*
+     * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
+     */
+    if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
+    {
+        AssertMsg(   pPage->enmKind == PGMPOOLKIND_64BIT_PML4
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
+                  || pPage->enmKind == PGMPOOLKIND_32BIT_PD
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
+                  || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
+                  ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
+        Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
+        pgmUnlock(pVM);
+        return VINF_SUCCESS;
+    }
+
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    /* Start a subset so we won't run out of mapping space. */
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+    uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
+#endif
+
+    /*
+     * Mark the page as being in need of a ASMMemZeroPage().
+     */
+    pPage->fZeroed = false;
+
+#ifdef PGMPOOL_WITH_USER_TRACKING
+    /*
+     * Clear the page.
+     */
+    pgmPoolTrackClearPageUsers(pPool, pPage);
+    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
+    pgmPoolTrackDeref(pPool, pPage);
+    STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
+#endif
+
+#ifdef PGMPOOL_WITH_CACHE
+    /*
+     * Flush it from the cache.
+     */
+    pgmPoolCacheFlushPage(pPool, pPage);
+#endif /* PGMPOOL_WITH_CACHE */
+
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    /* Heavy stuff done. */
+    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
+#endif
+
+#ifdef PGMPOOL_WITH_MONITORING
+    /*
+     * Deregistering the monitoring.
+     */
+    if (pPage->fMonitored)
+        rc = pgmPoolMonitorFlush(pPool, pPage);
+#endif
+
+    /*
+     * Free the page.
+     */
+    Assert(pPage->iNext == NIL_PGMPOOL_IDX);
+    pPage->iNext = pPool->iFreeHead;
+    pPool->iFreeHead = pPage->idx;
+    pPage->enmKind = PGMPOOLKIND_FREE;
+    pPage->GCPhys = NIL_RTGCPHYS;
+    pPage->fReusedFlushPending = false;
+
+    pPool->cUsedPages--;
+    pgmUnlock(pVM);
+    STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
+    return rc;
+}
+
+
+/**
+ * Frees a usage of a pool page.
+ *
+ * The caller is responsible to updating the user table so that it no longer
+ * references the shadow page.
+ *
+ * @param   pPool       The pool.
+ * @param   HCPhys      The HC physical address of the shadow page.
+ * @param   iUser       The shadow page pool index of the user table.
+ * @param   iUserTable  The index into the user table (shadowed).
+ */
+void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
+{
+    PVM pVM = pPool->CTX_SUFF(pVM);
+
+    STAM_PROFILE_START(&pPool->StatFree, a);
+    LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
+             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
+    Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
+    pgmLock(pVM);
+#ifdef PGMPOOL_WITH_USER_TRACKING
+    pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
+#endif
+#ifdef PGMPOOL_WITH_CACHE
+    if (!pPage->fCached)
+#endif
+        pgmPoolFlushPage(pPool, pPage);
+    pgmUnlock(pVM);
+    STAM_PROFILE_STOP(&pPool->StatFree, a);
+}
+
+
+/**
+ * Makes one or more free page free.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
+ *
+ * @param   pPool       The pool.
+ * @param   enmKind     Page table kind
+ * @param   iUser       The user of the page.
+ */
+static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
+{
+    LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
+
+    /*
+     * If the pool isn't full grown yet, expand it.
+     */
+    if (    pPool->cCurPages < pPool->cMaxPages
+#if defined(IN_RC)
+        /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
+        &&  enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
+        &&  (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
+#endif
+        )
+    {
+        STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
+#ifdef IN_RING3
+        int rc = PGMR3PoolGrow(pPool->pVMR3);
+#else
+        int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);
+#endif
+        if (RT_FAILURE(rc))
+            return rc;
+        STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
+        if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
+            return VINF_SUCCESS;
+    }
+
+#ifdef PGMPOOL_WITH_CACHE
+    /*
+     * Free one cached page.
+     */
+    return pgmPoolCacheFreeOne(pPool, iUser);
+#else
+    /*
+     * Flush the pool.
+     *
+     * If we have tracking enabled, it should be possible to come up with
+     * a cheap replacement strategy...
+     */
+    /* @todo This path no longer works (CR3 root pages will be flushed)!! */
+    AssertCompileFailed();
+    Assert(!CPUMIsGuestInLongMode(pVM));
+    pgmPoolFlushAllInt(pPool);
+    return VERR_PGM_POOL_FLUSHED;
+#endif
+}
+
+
+/**
+ * Allocates a page from the pool.
+ *
+ * This page may actually be a cached page and not in need of any processing
+ * on the callers part.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS if a NEW page was allocated.
+ * @retval  VINF_PGM_CACHED_PAGE if a CACHED page was returned.
+ * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
+ * @param   pVM         The VM handle.
+ * @param   GCPhys      The GC physical address of the page we're gonna shadow.
+ *                      For 4MB and 2MB PD entries, it's the first address the
+ *                      shadow PT is covering.
+ * @param   enmKind     The kind of mapping.
+ * @param   iUser       The shadow page pool index of the user table.
+ * @param   iUserTable  The index into the user table (shadowed).
+ * @param   ppPage      Where to store the pointer to the page. NULL is stored here on failure.
+ */
+int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
+{
+    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+    STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
+    LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
+    *ppPage = NULL;
+    /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
+     *  (TRPMR3SyncIDT) because of FF priority. Try fix that?
+     *  Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL)); */
+
+    pgmLock(pVM);
+
+#ifdef PGMPOOL_WITH_CACHE
+    if (pPool->fCacheEnabled)
+    {
+        int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, iUser, iUserTable, ppPage);
+        if (RT_SUCCESS(rc2))
+        {
+            pgmUnlock(pVM);
+            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
+            LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
+            return rc2;
+        }
+    }
+#endif
+
+    /*
+     * Allocate a new one.
+     */
+    int         rc = VINF_SUCCESS;
+    uint16_t    iNew = pPool->iFreeHead;
+    if (iNew == NIL_PGMPOOL_IDX)
+    {
+        rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
+        if (RT_FAILURE(rc))
+        {
+            pgmUnlock(pVM);
+            Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
+            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
+            return rc;
+        }
+        iNew = pPool->iFreeHead;
+        AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
+    }
+
+    /* unlink the free head */
+    PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
+    pPool->iFreeHead = pPage->iNext;
+    pPage->iNext = NIL_PGMPOOL_IDX;
+
+    /*
+     * Initialize it.
+     */
+    pPool->cUsedPages++;                /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
+    pPage->enmKind = enmKind;
+    pPage->GCPhys = GCPhys;
+    pPage->fSeenNonGlobal = false;      /* Set this to 'true' to disable this feature. */
+    pPage->fMonitored = false;
+    pPage->fCached = false;
+    pPage->fReusedFlushPending = false;
+#ifdef PGMPOOL_WITH_MONITORING
+    pPage->cModifications = 0;
+    pPage->iModifiedNext = NIL_PGMPOOL_IDX;
+    pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
+#else
+    pPage->fCR3Mix = false;
+#endif
+#ifdef PGMPOOL_WITH_USER_TRACKING
+    pPage->cPresent = 0;
+    pPage->iFirstPresent = ~0;
+
+    /*
+     * Insert into the tracking and cache. If this fails, free the page.
+     */
+    int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
+    if (RT_FAILURE(rc3))
+    {
+        pPool->cUsedPages--;
+        pPage->enmKind = PGMPOOLKIND_FREE;
+        pPage->GCPhys = NIL_RTGCPHYS;
+        pPage->iNext = pPool->iFreeHead;
+        pPool->iFreeHead = pPage->idx;
+        pgmUnlock(pVM);
+        STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
+        Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
+        return rc3;
+    }
+#endif /* PGMPOOL_WITH_USER_TRACKING */
+
+    /*
+     * Commit the allocation, clear the page and return.
+     */
+#ifdef VBOX_WITH_STATISTICS
+    if (pPool->cUsedPages > pPool->cUsedPagesHigh)
+        pPool->cUsedPagesHigh = pPool->cUsedPages;
+#endif
+
+    if (!pPage->fZeroed)
+    {
+        STAM_PROFILE_START(&pPool->StatZeroPage, z);
+        void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
+        ASMMemZeroPage(pv);
+        STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
+    }
+
+    *ppPage = pPage;
+    pgmUnlock(pVM);
+    LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
+             rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
+    STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
+    return rc;
+}
+
+
+/**
+ * Frees a usage of a pool page.
+ *
+ * @param   pVM         The VM handle.
+ * @param   HCPhys      The HC physical address of the shadow page.
+ * @param   iUser       The shadow page pool index of the user table.
+ * @param   iUserTable  The index into the user table (shadowed).
+ */
+void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
+{
+    LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
+    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+    pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
+}
+
+/**
+ * Internal worker for finding a 'in-use' shadow page give by it's physical address.
+ *
+ * @returns Pointer to the shadow page structure.
+ * @param   pPool       The pool.
+ * @param   HCPhys      The HC physical address of the shadow page.
+ */
+PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
+{
+    PVM pVM = pPool->CTX_SUFF(pVM);
+
+    /*
+     * Look up the page.
+     */
+    pgmLock(pVM);
+    PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
+    pgmUnlock(pVM);
+
+    AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
+    return pPage;
+}
+
+
 #ifdef IN_RING3
 /**
@@ -3817,10 +4189,8 @@
  *
  * @param   pPool       The pool.
- *
- * @remark Only used during reset now, we might want to rename and/or move it.
- */
-static void pgmPoolFlushAllInt(PPGMPOOL pPool)
-{
-    PVM pVM = pPool->CTX_SUFF(pVM);
+ */
+void pgmR3PoolReset(PVM pVM)
+{
+    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
 
     Assert(PGMIsLockOwner(pVM));
@@ -3841,6 +4211,4 @@
      * including the root page.
      */
-    /** @todo Need to synchronize this across all VCPUs! */
-    Assert(pVM->cCPUs == 1);
     for (unsigned i=0;i<pVM->cCPUs;i++)
     {
@@ -3998,393 +4366,4 @@
 
     STAM_PROFILE_STOP(&pPool->StatFlushAllInt, a);
-}
-
-#endif /* IN_RING3 */
-
-/**
- * Flushes a pool page.
- *
- * This moves the page to the free list after removing all user references to it.
- *
- * @returns VBox status code.
- * @retval  VINF_SUCCESS on success.
- * @param   pPool       The pool.
- * @param   HCPhys      The HC physical address of the shadow page.
- */
-int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
-{
-    PVM pVM = pPool->CTX_SUFF(pVM);
-
-    int rc = VINF_SUCCESS;
-    STAM_PROFILE_START(&pPool->StatFlushPage, f);
-    LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
-             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
-
-    /*
-     * Quietly reject any attempts at flushing any of the special root pages.
-     */
-    if (pPage->idx < PGMPOOL_IDX_FIRST)
-    {
-        AssertFailed(); /* can no longer happen */
-        Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
-        return VINF_SUCCESS;
-    }
-
-    pgmLock(pVM);
-
-    /*
-     * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
-     */
-    if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
-    {
-        AssertMsg(   pPage->enmKind == PGMPOOLKIND_64BIT_PML4
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
-                  || pPage->enmKind == PGMPOOLKIND_32BIT_PD
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
-                  || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
-                  ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
-        Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
-        pgmUnlock(pVM);
-        return VINF_SUCCESS;
-    }
-
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    /* Start a subset so we won't run out of mapping space. */
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
-#endif
-
-    /*
-     * Mark the page as being in need of a ASMMemZeroPage().
-     */
-    pPage->fZeroed = false;
-
-#ifdef PGMPOOL_WITH_USER_TRACKING
-    /*
-     * Clear the page.
-     */
-    pgmPoolTrackClearPageUsers(pPool, pPage);
-    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
-    pgmPoolTrackDeref(pPool, pPage);
-    STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
-#endif
-
-#ifdef PGMPOOL_WITH_CACHE
-    /*
-     * Flush it from the cache.
-     */
-    pgmPoolCacheFlushPage(pPool, pPage);
-#endif /* PGMPOOL_WITH_CACHE */
-
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    /* Heavy stuff done. */
-    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
-#endif
-
-#ifdef PGMPOOL_WITH_MONITORING
-    /*
-     * Deregistering the monitoring.
-     */
-    if (pPage->fMonitored)
-        rc = pgmPoolMonitorFlush(pPool, pPage);
-#endif
-
-    /*
-     * Free the page.
-     */
-    Assert(pPage->iNext == NIL_PGMPOOL_IDX);
-    pPage->iNext = pPool->iFreeHead;
-    pPool->iFreeHead = pPage->idx;
-    pPage->enmKind = PGMPOOLKIND_FREE;
-    pPage->GCPhys = NIL_RTGCPHYS;
-    pPage->fReusedFlushPending = false;
-
-    pPool->cUsedPages--;
-    pgmUnlock(pVM);
-    STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
-    return rc;
-}
-
-
-/**
- * Frees a usage of a pool page.
- *
- * The caller is responsible to updating the user table so that it no longer
- * references the shadow page.
- *
- * @param   pPool       The pool.
- * @param   HCPhys      The HC physical address of the shadow page.
- * @param   iUser       The shadow page pool index of the user table.
- * @param   iUserTable  The index into the user table (shadowed).
- */
-void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
-{
-    PVM pVM = pPool->CTX_SUFF(pVM);
-
-    STAM_PROFILE_START(&pPool->StatFree, a);
-    LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
-             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
-    Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
-    pgmLock(pVM);
-#ifdef PGMPOOL_WITH_USER_TRACKING
-    pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
-#endif
-#ifdef PGMPOOL_WITH_CACHE
-    if (!pPage->fCached)
-#endif
-        pgmPoolFlushPage(pPool, pPage);
-    pgmUnlock(pVM);
-    STAM_PROFILE_STOP(&pPool->StatFree, a);
-}
-
-
-/**
- * Makes one or more free page free.
- *
- * @returns VBox status code.
- * @retval  VINF_SUCCESS on success.
- * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
- *
- * @param   pPool       The pool.
- * @param   enmKind     Page table kind
- * @param   iUser       The user of the page.
- */
-static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
-{
-    LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
-
-    /*
-     * If the pool isn't full grown yet, expand it.
-     */
-    if (    pPool->cCurPages < pPool->cMaxPages
-#if defined(IN_RC)
-        /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
-        &&  enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
-        &&  (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
-#endif
-        )
-    {
-        STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
-#ifdef IN_RING3
-        int rc = PGMR3PoolGrow(pPool->pVMR3);
-#else
-        int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);
-#endif
-        if (RT_FAILURE(rc))
-            return rc;
-        STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
-        if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
-            return VINF_SUCCESS;
-    }
-
-#ifdef PGMPOOL_WITH_CACHE
-    /*
-     * Free one cached page.
-     */
-    return pgmPoolCacheFreeOne(pPool, iUser);
-#else
-    /*
-     * Flush the pool.
-     *
-     * If we have tracking enabled, it should be possible to come up with
-     * a cheap replacement strategy...
-     */
-    /* @todo This path no longer works (CR3 root pages will be flushed)!! */
-    AssertCompileFailed();
-    Assert(!CPUMIsGuestInLongMode(pVM));
-    pgmPoolFlushAllInt(pPool);
-    return VERR_PGM_POOL_FLUSHED;
-#endif
-}
-
-
-/**
- * Allocates a page from the pool.
- *
- * This page may actually be a cached page and not in need of any processing
- * on the callers part.
- *
- * @returns VBox status code.
- * @retval  VINF_SUCCESS if a NEW page was allocated.
- * @retval  VINF_PGM_CACHED_PAGE if a CACHED page was returned.
- * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
- * @param   pVM         The VM handle.
- * @param   GCPhys      The GC physical address of the page we're gonna shadow.
- *                      For 4MB and 2MB PD entries, it's the first address the
- *                      shadow PT is covering.
- * @param   enmKind     The kind of mapping.
- * @param   iUser       The shadow page pool index of the user table.
- * @param   iUserTable  The index into the user table (shadowed).
- * @param   ppPage      Where to store the pointer to the page. NULL is stored here on failure.
- */
-int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
-{
-    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
-    STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
-    LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
-    *ppPage = NULL;
-    /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
-     *  (TRPMR3SyncIDT) because of FF priority. Try fix that?
-     *  Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL)); */
-
-    pgmLock(pVM);
-
-#ifdef PGMPOOL_WITH_CACHE
-    if (pPool->fCacheEnabled)
-    {
-        int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, iUser, iUserTable, ppPage);
-        if (RT_SUCCESS(rc2))
-        {
-            pgmUnlock(pVM);
-            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
-            LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
-            return rc2;
-        }
-    }
-#endif
-
-    /*
-     * Allocate a new one.
-     */
-    int         rc = VINF_SUCCESS;
-    uint16_t    iNew = pPool->iFreeHead;
-    if (iNew == NIL_PGMPOOL_IDX)
-    {
-        rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
-        if (RT_FAILURE(rc))
-        {
-            pgmUnlock(pVM);
-            Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
-            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
-            return rc;
-        }
-        iNew = pPool->iFreeHead;
-        AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
-    }
-
-    /* unlink the free head */
-    PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
-    pPool->iFreeHead = pPage->iNext;
-    pPage->iNext = NIL_PGMPOOL_IDX;
-
-    /*
-     * Initialize it.
-     */
-    pPool->cUsedPages++;                /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
-    pPage->enmKind = enmKind;
-    pPage->GCPhys = GCPhys;
-    pPage->fSeenNonGlobal = false;      /* Set this to 'true' to disable this feature. */
-    pPage->fMonitored = false;
-    pPage->fCached = false;
-    pPage->fReusedFlushPending = false;
-#ifdef PGMPOOL_WITH_MONITORING
-    pPage->cModifications = 0;
-    pPage->iModifiedNext = NIL_PGMPOOL_IDX;
-    pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
-#else
-    pPage->fCR3Mix = false;
-#endif
-#ifdef PGMPOOL_WITH_USER_TRACKING
-    pPage->cPresent = 0;
-    pPage->iFirstPresent = ~0;
-
-    /*
-     * Insert into the tracking and cache. If this fails, free the page.
-     */
-    int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
-    if (RT_FAILURE(rc3))
-    {
-        pPool->cUsedPages--;
-        pPage->enmKind = PGMPOOLKIND_FREE;
-        pPage->GCPhys = NIL_RTGCPHYS;
-        pPage->iNext = pPool->iFreeHead;
-        pPool->iFreeHead = pPage->idx;
-        pgmUnlock(pVM);
-        STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
-        Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
-        return rc3;
-    }
-#endif /* PGMPOOL_WITH_USER_TRACKING */
-
-    /*
-     * Commit the allocation, clear the page and return.
-     */
-#ifdef VBOX_WITH_STATISTICS
-    if (pPool->cUsedPages > pPool->cUsedPagesHigh)
-        pPool->cUsedPagesHigh = pPool->cUsedPages;
-#endif
-
-    if (!pPage->fZeroed)
-    {
-        STAM_PROFILE_START(&pPool->StatZeroPage, z);
-        void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
-        ASMMemZeroPage(pv);
-        STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
-    }
-
-    *ppPage = pPage;
-    pgmUnlock(pVM);
-    LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
-             rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
-    STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
-    return rc;
-}
-
-
-/**
- * Frees a usage of a pool page.
- *
- * @param   pVM         The VM handle.
- * @param   HCPhys      The HC physical address of the shadow page.
- * @param   iUser       The shadow page pool index of the user table.
- * @param   iUserTable  The index into the user table (shadowed).
- */
-void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
-{
-    LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
-    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
-    pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
-}
-
-/**
- * Internal worker for finding a 'in-use' shadow page give by it's physical address.
- *
- * @returns Pointer to the shadow page structure.
- * @param   pPool       The pool.
- * @param   HCPhys      The HC physical address of the shadow page.
- */
-PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
-{
-    PVM pVM = pPool->CTX_SUFF(pVM);
-
-    /*
-     * Look up the page.
-     */
-    pgmLock(pVM);
-    PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
-    pgmUnlock(pVM);
-
-    AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
-    return pPage;
-}
-
-
-#ifdef IN_RING3
-/**
- * Flushes the entire cache.
- *
- * It will assert a global CR3 flush (FF) and assumes the caller is aware of this
- * and execute this CR3 flush.
- *
- * @param   pPool       The pool.
- */
-void pgmPoolFlushAll(PVM pVM)
-{
-    LogFlow(("pgmPoolFlushAll:\n"));
-    pgmPoolFlushAllInt(pVM->pgm.s.CTX_SUFF(pPool));
 }
 #endif /* IN_RING3 */
