Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 36009)
@@ -2306,5 +2306,5 @@
 VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
 {
-      pVM->fUseLargePages = fUseLargePages;
+    pVM->fUseLargePages = fUseLargePages;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 36009)
@@ -3074,12 +3074,10 @@
     if (BTH_IS_NP_ACTIVE(pVM))
     {
+        /* Check if we allocated a big page before for this 2 MB range. */
         PPGMPAGE pPage;
-
-        /* Check if we allocated a big page before for this 2 MB range. */
         rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & X86_PDE2M_PAE_PG_MASK, &pPage);
         if (RT_SUCCESS(rc))
         {
             RTHCPHYS HCPhys = NIL_RTHCPHYS;
-
             if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
             {
@@ -3091,5 +3089,5 @@
             {
                 /* Recheck the entire 2 MB range to see if we can use it again as a large page. */
-                rc = pgmPhysIsValidLargePage(pVM, GCPtrPage, pPage);
+                rc = pgmPhysRecheckLargePage(pVM, GCPtrPage, pPage);
                 if (RT_SUCCESS(rc))
                 {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 36009)
@@ -383,4 +383,7 @@
 
 # ifdef PGM_WITH_LARGE_PAGES
+    /*
+     * Try allocate a large page if applicable.
+     */
     if (    PGMIsUsingLargePages(pVM)
         &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
@@ -499,4 +502,5 @@
 
 #ifdef PGM_WITH_LARGE_PAGES
+
 /**
  * Replace a 2 MB range of zero pages with new pages that we can write to.
@@ -526,71 +530,68 @@
     Assert(PGMIsUsingLargePages(pVM));
 
-    PPGMPAGE pPage;
-    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
+    PPGMPAGE pFirstPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pFirstPage);
     if (    RT_SUCCESS(rc)
-        &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
-    {
-        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
+        &&  PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
+    {
+        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
 
         /* Don't call this function for already allocated pages. */
         Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
 
-        if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
-             && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
-        {
+        if (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
+            && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
+        {
+            /* Lazy approach: check all pages in the 2 MB range.
+             * The whole range must be ram and unallocated. */ 
+            GCPhys = GCPhysBase;
             unsigned iPage;
-
-            GCPhys = GCPhysBase;
-
-            /* Lazy approach: check all pages in the 2 MB range.
-             * The whole range must be ram and unallocated
-             */
             for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
             {
-                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
+                PPGMPAGE pSubPage;
+                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pSubPage);
                 if  (   RT_FAILURE(rc)
-                     || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM        /* Anything other than ram implies monitoring. */
-                     || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO)   /* allocated, monitored or shared means we can't use a large page here */
+                     || PGM_PAGE_GET_TYPE(pSubPage)  != PGMPAGETYPE_RAM      /* Anything other than ram implies monitoring. */
+                     || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
                 {
-                    LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
+                    LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
                     break;
                 }
-                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
+                Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
                 GCPhys += PAGE_SIZE;
             }
-            /* Fetch the start page of the 2 MB range again. */
-            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
-            AssertRC(rc);   /* can't fail */
-
             if (iPage != _2M/PAGE_SIZE)
             {
                 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
                 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
-                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
+                PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PT);
                 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
             }
-            else
+
+            /*
+             * Do the allocation.
+             */
+# ifdef IN_RING3
+            rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
+# else
+            rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
+# endif
+            if (RT_SUCCESS(rc))
             {
-# ifdef IN_RING3
-                rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
-# else
-                rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
-# endif
-                if (RT_SUCCESS(rc))
-                {
-                    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
-                    STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageAlloc);
-                    return VINF_SUCCESS;
-                }
-                LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
-
-                /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
-                PGMSetLargePageUsage(pVM, false);
-                return rc;
+                Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
+                pVM->pgm.s.cLargePages++;
+                return VINF_SUCCESS;
             }
+
+            /* If we fail once, it most likely means the host's memory is too
+               fragmented; don't bother trying again. */
+            LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
+            PGMSetLargePageUsage(pVM, false);
+            return rc;
         }
     }
     return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
 }
+
 
 /**
@@ -605,8 +606,6 @@
  * @param   pLargePage  Page structure of the base page
  */
-int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
-{
-    unsigned i;
-
+int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
+{
     STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
 
@@ -619,5 +618,5 @@
         ||  PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
     {
-        LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
+        LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
         return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
     }
@@ -625,4 +624,5 @@
     STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
     /* Check all remaining pages in the 2 MB range. */
+    unsigned i;
     GCPhys += PAGE_SIZE;
     for (i = 1; i < _2M/PAGE_SIZE; i++)
@@ -637,5 +637,5 @@
             ||  PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
         {
-            LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
+            LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
             break;
         }
@@ -648,5 +648,6 @@
     {
         PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
-        Log(("pgmPhysIsValidLargePage: page %RGp can be reused!\n", GCPhys - _2M));
+        pVM->pgm.s.cLargePagesDisabled--;
+        Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
         return VINF_SUCCESS;
     }
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 36009)
@@ -3444,27 +3444,27 @@
     if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE)
     {
-        PPGMPAGE pPhysBase;
         RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK;
-
         GCPhysPage &= X86_PDE_PAE_PG_MASK;
 
         /* Fetch the large page base. */
+        PPGMPAGE pLargePage;
         if (GCPhysBase != GCPhysPage)
         {
-            pPhysBase = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
-            AssertFatal(pPhysBase);
+            pLargePage = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
+            AssertFatal(pLargePage);
         }
         else
-            pPhysBase = pPhysPage;
+            pLargePage = pPhysPage;
 
         Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage));
 
-        if (PGM_PAGE_GET_PDE_TYPE(pPhysBase) == PGM_PAGE_PDE_TYPE_PDE)
+        if (PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE)
         {
             /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
-            PGM_PAGE_SET_PDE_TYPE(pPhysBase, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+            PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+            pVM->pgm.s.cLargePagesDisabled++;
 
             /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */
-            rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pPhysBase, fFlushPTEs, pfFlushTLBs);
+            rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pLargePage, fFlushPTEs, pfFlushTLBs);
 
             *pfFlushTLBs = true;
Index: /trunk/src/VBox/VMM/VMMR3/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 36009)
@@ -1608,4 +1608,6 @@
     STAM_REL_REG(pVM, &pPGM->cBalloonedPages,                    STAMTYPE_U32,     "/PGM/Page/cBalloonedPages",          STAMUNIT_COUNT,     "The number of ballooned pages.");
     STAM_REL_REG(pVM, &pPGM->cHandyPages,                        STAMTYPE_U32,     "/PGM/Page/cHandyPages",              STAMUNIT_COUNT,     "The number of handy pages (not included in cAllPages).");
+    STAM_REL_REG(pVM, &pPGM->cLargePages,                        STAMTYPE_U32,     "/PGM/Page/cLargePages",              STAMUNIT_COUNT,     "The number of large pages allocated (includes disabled).");
+    STAM_REL_REG(pVM, &pPGM->cLargePagesDisabled,                STAMTYPE_U32,     "/PGM/Page/cLargePagesDisabled",      STAMUNIT_COUNT,     "The number of disabled large pages.");
     STAM_REL_REG(pVM, &pPGM->cRelocations,                       STAMTYPE_COUNTER, "/PGM/cRelocations",                  STAMUNIT_OCCURENCES,"Number of hypervisor relocations.");
     STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c,                       STAMTYPE_U32,     "/PGM/ChunkR3Map/c",                  STAMUNIT_COUNT,     "Number of mapped chunks.");
@@ -1614,5 +1616,4 @@
     STAM_REL_REG(pVM, &pPGM->cUnmappedChunks,                    STAMTYPE_U32,     "/PGM/ChunkR3Map/Unmapped",           STAMUNIT_COUNT,     "Number of times we unmapped a chunk.");
 
-    STAM_REL_REG(pVM, &pPGM->StatLargePageAlloc,                 STAMTYPE_COUNTER, "/PGM/LargePage/Alloc",               STAMUNIT_OCCURENCES, "The number of large pages we've used.");
     STAM_REL_REG(pVM, &pPGM->StatLargePageReused,                STAMTYPE_COUNTER, "/PGM/LargePage/Reused",              STAMUNIT_OCCURENCES, "The number of times we've reused a large page.");
     STAM_REL_REG(pVM, &pPGM->StatLargePageRefused,               STAMTYPE_COUNTER, "/PGM/LargePage/Refused",             STAMUNIT_OCCURENCES, "The number of times we couldn't use a large page.");
Index: /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 36009)
@@ -995,29 +995,28 @@
             {
                 /*
-                    * A RAM page.
-                    */
+                 * A RAM page.
+                 */
                 switch (PGM_PAGE_GET_STATE(pPage))
                 {
-                case PGM_PAGE_STATE_ALLOCATED:
-                    /** @todo Optimize this: Don't always re-enable write
-                        * monitoring if the page is known to be very busy. */
-                    if (PGM_PAGE_IS_WRITTEN_TO(pPage))
-                    {
-                        PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
-                        /* Remember this dirty page for the next (memory) sync. */
-                        PGM_PAGE_SET_FT_DIRTY(pPage);
-                    }
-
-                    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_WRITE_MONITORED);
-                    pVM->pgm.s.cMonitoredPages++;
-                    break;
-
-                case PGM_PAGE_STATE_SHARED:
-                    AssertFailed();
-                    break;
-
-                case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
-                default:
-                    break;
+                    case PGM_PAGE_STATE_ALLOCATED:
+                        /** @todo Optimize this: Don't always re-enable write
+                         * monitoring if the page is known to be very busy. */
+                        if (PGM_PAGE_IS_WRITTEN_TO(pPage))
+                        {
+                            PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
+                            /* Remember this dirty page for the next (memory) sync. */
+                            PGM_PAGE_SET_FT_DIRTY(pPage);
+                        }
+    
+                        pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
+                        break;
+    
+                    case PGM_PAGE_STATE_SHARED:
+                        AssertFailed();
+                        break;
+    
+                    case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
+                    default:
+                        break;
                 }
             }
@@ -1662,6 +1661,8 @@
                 {
                     case PGMPAGETYPE_RAM:
-                        /* Do not replace pages part of a 2 MB continuous range with zero pages, but zero them instead. */
-                        if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
+                        /* Do not replace pages part of a 2 MB continuous range
+                           with zero pages, but zero them instead. */
+                        if (   PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
+                            || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
                         {
                             void *pvPage;
@@ -1670,12 +1671,10 @@
                             ASMMemZeroPage(pvPage);
                         }
-                        else
-                        if (PGM_PAGE_IS_BALLOONED(pPage))
+                        else if (PGM_PAGE_IS_BALLOONED(pPage))
                         {
                             /* Turn into a zero page; the balloon status is lost when the VM reboots. */
                             PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
                         }
-                        else
-                        if (!PGM_PAGE_IS_ZERO(pPage))
+                        else if (!PGM_PAGE_IS_ZERO(pPage))
                         {
                             rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
@@ -4140,5 +4139,8 @@
         return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
     }
-    Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE);
+
+    /** @todo What about ballooning of large pages??! */
+    Assert(   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
+           && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
 
     if (    PGM_PAGE_IS_ZERO(pPage)
Index: /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp	(revision 36008)
+++ /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp	(revision 36009)
@@ -1359,6 +1359,6 @@
                                 }
 
-                                PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
-                                pVM->pgm.s.cMonitoredPages++;
+                                pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage], 
+                                                        pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
                                 paLSPages[iPage].fWriteMonitored        = 1;
                                 paLSPages[iPage].fWriteMonitoredJustNow = 1;
@@ -2658,5 +2658,6 @@
                         /* Free it only if it's not part of a previously
                            allocated large page (no need to clear the page). */
-                        else if (PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE)
+                        else if (   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
+                                 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
                         {
                             rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys);
Index: /trunk/src/VBox/VMM/include/PGMInline.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInline.h	(revision 36008)
+++ /trunk/src/VBox/VMM/include/PGMInline.h	(revision 36009)
@@ -563,4 +563,37 @@
 
 #endif /* !IN_RC */
+
+
+/**
+ * Enables write monitoring for an allocated page.
+ *  
+ * The caller is responsible for updating the shadow page tables. 
+ *  
+ * @param   pVM         The VM handle.
+ * @param   pPage       The page to write monitor. 
+ * @param   GCPhysPage  The address of the page.
+ */
+DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
+{
+    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
+    Assert(PGMIsLockOwner(pVM));
+
+    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_WRITE_MONITORED);
+    pVM->pgm.s.cMonitoredPages++;
+
+    /* Large pages must disabled. */
+    if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
+    {
+        PPGMPAGE pFirstPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
+        AssertFatal(pFirstPage);
+        if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
+        {
+            PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+            pVM->pgm.s.cLargePagesDisabled++;
+        }
+        else
+            Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
+    }
+}
 
 
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 36008)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 36009)
@@ -3213,4 +3213,6 @@
     uint32_t                        cMappedChunks;      /**< Number of times we mapped a chunk. */
     uint32_t                        cUnmappedChunks;    /**< Number of times we unmapped a chunk. */
+    uint32_t                        cLargePages;        /**< The number of large pages. */
+    uint32_t                        cLargePagesDisabled;/**< The number of disabled large pages. */
 /*    uint32_t                        aAlignment4[1]; */
 
@@ -3218,5 +3220,4 @@
     STAMCOUNTER                     cRelocations;
 
-    STAMCOUNTER                     StatLargePageAlloc;                 /**< The number of large pages we've allocated.*/
     STAMCOUNTER                     StatLargePageReused;                /**< The number of large pages we've reused.*/
     STAMCOUNTER                     StatLargePageRefused;               /**< The number of times we couldn't use a large page.*/
@@ -3766,5 +3767,5 @@
 int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
 int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
-int             pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
+int             pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
 int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
 int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
