Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 26624)
+++ /trunk/include/VBox/err.h	(revision 26625)
@@ -467,4 +467,6 @@
 /** PGM pool flush pending - return to ring 3. */
 #define VINF_PGM_POOL_FLUSH_PENDING             (1644)
+/** Unable to use the range for a large page. */
+#define VERR_PGM_INVALID_LARGE_PAGE_RANGE       (-1645)
 /** @} */
 
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 26624)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 26625)
@@ -3321,5 +3321,5 @@
 
 int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
-int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
+int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS *pHCPhys);
 int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
 int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 26624)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 26625)
@@ -2938,68 +2938,9 @@
 
 # if (PGM_SHW_TYPE == PGM_TYPE_EPT) && (HC_ARCH_BITS == 64) && defined(RT_OS_WINDOWS) && defined(DEBUG_sandervl)
-    PPGMPAGE pPage;
-    rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & SHW_PDE_PG_MASK, &pPage);
-    if (    RT_SUCCESS(rc)
-        &&  PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
-    {
-        RTHCPHYS HCPhys = NIL_RTHCPHYS;
-        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
-
-        if  (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
-        {
-            /* Previously allocated 2 MB range can be reused. */
-            Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
-            HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
-        }
-        else
-        if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
-             && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
-        {
-            RTGCPHYS GCPhysBase = GCPtrPage & SHW_PDE_PG_MASK;
-            RTGCPHYS GCPhys = GCPhysBase;
-            unsigned iPage;
-
-            /* Lazy approach: check all pages in the 2 MB range. 
-             * The whole range must be ram and unallocated
-             */
-            for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
-            {
-                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
-                if  (   RT_FAILURE(rc)
-                     || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM
-                     || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
-                {
-                    LogFlow(("Found page with wrong attributes; cancel check. rc=%d\n", rc));
-                    break;
-                }
-                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
-                GCPhys += PAGE_SIZE;
-            }
-            /* Fetch the start page of the 2 MB range again. */
-            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
-            AssertRC(rc);   /* can't fail */
-
-            if (iPage != _2M/PAGE_SIZE)
-            {
-                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
-                STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
-                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
-            }
-            else
-            {
-                rc = pgmPhysAllocLargePage(pVM, GCPhysBase);
-                if (RT_SUCCESS(rc))
-                {   
-                    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
-                    HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
-                    STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageUsed);
-                }
-                else
-                    LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
-            }
-        }
-
-        if (HCPhys != NIL_RTHCPHYS)
-        {
+    {
+        RTHCPHYS HCPhys;
+        rc = pgmPhysAllocLargePage(pVM, GCPtrPage & SHW_PDE_PG_MASK, &HCPhys);
+        if (RT_SUCCESS(rc))
+        {   
             PdeDst.u &= X86_PDE_AVL_MASK;
             PdeDst.u |= HCPhys;
@@ -3014,5 +2955,8 @@
             STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
             return VINF_SUCCESS;
+
         }
+        else
+            LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
     }
 # endif
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 26624)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 26625)
@@ -457,18 +457,13 @@
  * @param   pVM         The VM address.
  * @param   GCPhys      The address of the page.
+ * @param   pHCPhys     Pointer to HC physical address (out)
  *
  * @remarks Must be called from within the PGM critical section. It may
  *          nip back to ring-3/0 in some cases.
- *
- * @remarks This function shouldn't really fail, however if it does
- *          it probably means we've screwed up the size of handy pages and/or
- *          the low-water mark. Or, that some device I/O is causing a lot of
- *          pages to be allocated while while the host is in a low-memory
- *          condition. This latter should be handled elsewhere and in a more
- *          controlled manner, it's on the @bugref{3170} todo list...
- */
-int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
-{
-    LogFlow(("pgmPhysAllocLargePage: %RGp\n", GCPhys));
+ */
+int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS *pHCPhys)
+{
+    RTGCPHYS GCPhysBase = GCPhys & X86_PDE_PAE_PG_MASK_FULL;
+    LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
 
     /*
@@ -477,11 +472,77 @@
     Assert(PGMIsLocked(pVM));
     Assert((GCPhys & X86_PD_PAE_MASK) == 0);
-
+    AssertPtr(pHCPhys);
+
+    PPGMPAGE pPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
+    if (    RT_SUCCESS(rc)
+        &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
+    {
+        RTHCPHYS HCPhys = NIL_RTHCPHYS;
+        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
+
+        if  (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
+        {
+            /* Previously allocated 2 MB range can be reused. */
+            Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
+
+            *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
+            return VINF_SUCCESS;
+        }
+        else
+        if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
+             && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
+        {
+            unsigned iPage;
+
+            GCPhys = GCPhysBase;
+
+            /* Lazy approach: check all pages in the 2 MB range. 
+             * The whole range must be ram and unallocated
+             */
+            for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
+            {
+                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
+                if  (   RT_FAILURE(rc)
+                     || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM
+                     || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
+                {
+                    LogFlow(("Found page with wrong attributes; cancel check. rc=%d\n", rc));
+                    break;
+                }
+                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
+                GCPhys += PAGE_SIZE;
+            }
+            /* Fetch the start page of the 2 MB range again. */
+            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
+            AssertRC(rc);   /* can't fail */
+
+            if (iPage != _2M/PAGE_SIZE)
+            {
+                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
+                STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
+                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
+                return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
+            }
+            else
+            {
 #ifdef IN_RING3
-    int rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhys);
+                rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
 #else
-    int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhys);
+                rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
 #endif
-    return rc;
+                if (RT_SUCCESS(rc))
+                {   
+                    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
+                    *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
+                    STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageUsed);
+                    return VINF_SUCCESS;
+                }
+                LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
+                return rc;
+            }
+        }
+    }
+    return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
 }
 
