Index: /trunk/include/VBox/pgm.h
===================================================================
--- /trunk/include/VBox/pgm.h	(revision 22694)
+++ /trunk/include/VBox/pgm.h	(revision 22695)
@@ -349,4 +349,5 @@
 VMMDECL(bool)       PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys);
 VMMDECL(bool)       PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr);
+VMMDECL(void)       PGMPoolFlushPage(PVM pVM, RTGCPHYS GCPhys);
 VMMDECL(bool)       PGMPhysIsA20Enabled(PVMCPU pVCpu);
 VMMDECL(bool)       PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys);
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 22694)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 22695)
@@ -1795,4 +1795,6 @@
     /** Profiling pgmPoolFree(). */
     STAMPROFILE                 StatFree;
+    /** Counting explicit flushes by PGMPoolFlushPage(). */
+    STAMCOUNTER                 StatForceFlushPage;
     /** Profiling time spent zeroing pages. */
     STAMPROFILE                 StatZeroPage;
Index: /trunk/src/VBox/VMM/PGMPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGMPhys.cpp	(revision 22694)
+++ /trunk/src/VBox/VMM/PGMPhys.cpp	(revision 22695)
@@ -369,12 +369,17 @@
         AssertFatalRC(rc2);
         PPGMPAGE pPage = pTlbe->pPage;
-#if 1
         if (PGM_PAGE_IS_MMIO(pPage))
-#else
-        if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
-#endif
         {
             PGMPhysReleasePageMappingLock(pVM, pLock);
             rc = VERR_PGM_PHYS_PAGE_RESERVED;
+        }
+        else
+        if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+        {
+            /* We *must* flush any corresponding pgm pool page here, otherwise we'll
+             * not be informed about writes and keep bogus gst->shw mappings around.
+             */
+            PGMPoolFlushPage(pVM, *pGCPhys);
+            Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
         }
     }
@@ -428,11 +433,15 @@
     {
         PPGMPAGE pPage = pTlbe->pPage;
-#if 1
         if (PGM_PAGE_IS_MMIO(pPage))
             rc = VERR_PGM_PHYS_PAGE_RESERVED;
-#else
+        else
         if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
-            rc = VERR_PGM_PHYS_PAGE_RESERVED;
-#endif
+        {
+            /* We *must* flush any corresponding pgm pool page here, otherwise we'll
+             * not be informed about writes and keep bogus gst->shw mappings around.
+             */
+            PGMPoolFlushPage(pVM, GCPhys);
+            Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
+        }
         else
         {
Index: /trunk/src/VBox/VMM/PGMPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGMPool.cpp	(revision 22694)
+++ /trunk/src/VBox/VMM/PGMPool.cpp	(revision 22695)
@@ -325,4 +325,5 @@
     STAM_REG(pVM, &pPool->StatFlushPage,                STAMTYPE_PROFILE,   "/PGM/Pool/FlushPage",      STAMUNIT_TICKS_PER_CALL,    "Profiling of pgmPoolFlushPage.");
     STAM_REG(pVM, &pPool->StatFree,                     STAMTYPE_PROFILE,   "/PGM/Pool/Free",           STAMUNIT_TICKS_PER_CALL,    "Profiling of pgmPoolFree.");
+    STAM_REG(pVM, &pPool->StatForceFlushPage,           STAMTYPE_COUNTER,   "/PGM/Pool/FlushForce",     STAMUNIT_OCCURENCES,        "Counting explicit flushes by PGMPoolFlushPage().");    
     STAM_REG(pVM, &pPool->StatZeroPage,                 STAMTYPE_PROFILE,   "/PGM/Pool/ZeroPage",       STAMUNIT_TICKS_PER_CALL,    "Profiling time spent zeroing pages. Overlaps with Alloc.");
 # ifdef PGMPOOL_WITH_USER_TRACKING
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 22694)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 22695)
@@ -1380,4 +1380,5 @@
 DECLINLINE(void) pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT)
 {
+    unsigned cErrors = 0;
     for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++)
     {
@@ -1386,5 +1387,55 @@
             RTHCPHYS HCPhys = -1;
             int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
-            AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys));
+            if (    rc != VINF_SUCCESS 
+                ||  (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) != HCPhys)
+            {
+                RTHCPHYS HCPhysPT = -1;
+                Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys));
+                cErrors++;
+
+                int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pPage->GCPhys, &HCPhysPT);
+                AssertRC(rc);
+
+                for (unsigned i = 0; i < pPool->cCurPages; i++)
+                {
+                    PPGMPOOLPAGE pTempPage = &pPool->aPages[i];
+
+                    if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
+                    {
+                        PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);
+
+                        for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
+                        {
+                            if (    pShwPT2->a[j].n.u1Present
+                                &&  pShwPT2->a[j].n.u1Write
+                                &&  ((pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT))
+                            {
+                                Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, pShwPT->a[j].u, pShwPT2->a[j].u));
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    Assert(!cErrors);
+}
+
+void pgmPoolTrackCheckAllPTPaePae(pVM)
+{
+    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+
+    for (unsigned i = 0; i < pPool->cCurPages; i++)
+    {
+        PPGMPOOLPAGE pPage = &pPool->aPages[i];
+
+        if (    pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
+            &&  !pPage->fDirty)
+        {
+            void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
+            void *pvGst;
+            int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
+
+            pgmPoolTrackCheckPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst);
         }
     }
@@ -4669,4 +4720,80 @@
 }
 
+/**
+ * Flush the specified page if present
+ *
+ * @param   pVM     The VM handle.
+ * @param   GCPhys  Guest physical address of the page to flush
+ */
+VMMDECL(void) PGMPoolFlushPage(PVM pVM, RTGCPHYS GCPhys)
+{
+#ifdef PGMPOOL_WITH_CACHE
+    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+
+    /*
+     * Look up the GCPhys in the hash.
+     */
+    GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
+    unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
+    if (i == NIL_PGMPOOL_IDX)
+        return;
+
+    do
+    {
+        PPGMPOOLPAGE pPage = &pPool->aPages[i];
+        if (pPage->GCPhys - GCPhys < PAGE_SIZE)
+        {
+            switch (pPage->enmKind)
+            {
+                case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
+                case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
+                case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
+                case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
+                case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
+                case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
+                case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
+                case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
+                case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
+                case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
+                case PGMPOOLKIND_64BIT_PML4:
+                case PGMPOOLKIND_32BIT_PD:
+                case PGMPOOLKIND_PAE_PDPT:
+                {
+                    Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys));
+                    STAM_COUNTER_INC(&pPool->StatForceFlushPage);
+                    Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
+                    pgmPoolMonitorChainFlush(pPool, pPage);
+                    return;
+                }
+
+                /* ignore, no monitoring. */
+                case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
+                case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
+                case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
+                case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
+                case PGMPOOLKIND_PAE_PT_FOR_PHYS:
+                case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
+                case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
+                case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
+                case PGMPOOLKIND_EPT_PD_FOR_PHYS:
+                case PGMPOOLKIND_EPT_PT_FOR_PHYS:
+                case PGMPOOLKIND_ROOT_NESTED:
+                case PGMPOOLKIND_PAE_PD_PHYS:
+                case PGMPOOLKIND_PAE_PDPT_PHYS:
+                case PGMPOOLKIND_32BIT_PD_PHYS:
+                case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
+                    break;
+
+                default:
+                    AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
+            }
+        }
+
+        /* next */
+        i = pPage->iNext;
+    } while (i != NIL_PGMPOOL_IDX);
+#endif
+    return;
+}
 
 #ifdef IN_RING3
