Index: /trunk/include/VBox/vmm/gvm.h
===================================================================
--- /trunk/include/VBox/vmm/gvm.h	(revision 82554)
+++ /trunk/include/VBox/vmm/gvm.h	(revision 82555)
@@ -207,4 +207,12 @@
     union
     {
+#if defined(VMM_INCLUDED_SRC_include_PGMInternal_h) && defined(IN_RING0)
+        struct PGMR0PERVM   s;
+#endif
+        uint8_t             padding[640];
+    } pgmr0;
+
+    union
+    {
 #if defined(VMM_INCLUDED_SRC_include_IOMInternal_h) && defined(IN_RING0)
         struct IOMR0PERVM   s;
@@ -223,7 +231,7 @@
     /** Padding so aCpus starts on a page boundrary.  */
 #ifdef VBOX_WITH_NEM_R0
-    uint8_t         abPadding2[4096 - 64 - 256 - 512 - 256 - 64 - 1792 - 512 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];
-#else
-    uint8_t         abPadding2[4096 - 64 - 256 - 512       - 64 - 1792 - 512 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];
+    uint8_t         abPadding2[4096*2 - 64 - 256 - 512 - 256 - 64 - 1792 - 640 - 512 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];
+#else
+    uint8_t         abPadding2[4096*2 - 64 - 256 - 512       - 64 - 1792 - 640 - 512 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];
 #endif
 
Index: /trunk/include/VBox/vmm/mm.h
===================================================================
--- /trunk/include/VBox/vmm/mm.h	(revision 82554)
+++ /trunk/include/VBox/vmm/mm.h	(revision 82555)
@@ -213,9 +213,10 @@
 VMMDECL(bool)       MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr);
 
-
+#if 0
 VMMDECL(RTHCPHYS)   MMPage2Phys(PVM pVM, void *pvPage);
 VMMDECL(void *)     MMPagePhys2Page(PVM pVM, RTHCPHYS HCPhysPage);
 VMMDECL(int)        MMPagePhys2PageEx(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
 VMMDECL(int)        MMPagePhys2PageTry(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
+#endif
 
 
@@ -289,4 +290,5 @@
 /** @defgroup grp_mm_page   Physical Page Pool
  * @{ */
+#if 0
 VMMR3DECL(void *)   MMR3PageAlloc(PVM pVM);
 VMMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM);
@@ -295,4 +297,5 @@
 VMMR3DECL(void)     MMR3PageFreeLow(PVM pVM, void *pvPage);
 VMMR3DECL(void)     MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage);
+#endif
 VMMR3DECL(void *)   MMR3PageDummyHCPtr(PVM pVM);
 VMMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM);
Index: /trunk/include/VBox/vmm/pgm.h
===================================================================
--- /trunk/include/VBox/vmm/pgm.h	(revision 82554)
+++ /trunk/include/VBox/vmm/pgm.h	(revision 82555)
@@ -675,21 +675,29 @@
  * @{
  */
-VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu);
-VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu);
-VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, VMCPUID idCpu);
-VMMR0_INT_DECL(int) PGMR0PhysMMIO2MapKernel(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
-                                            size_t offSub, size_t cbSub, void **ppvMapping);
-VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM);
-VMMR0DECL(int)      PGMR0SharedModuleCheck(PVMCC pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs);
-VMMR0DECL(int)      PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault);
-VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr);
+VMMR0_INT_DECL(int)  PGMR0InitPerVMData(PGVM pGVM);
+VMMR0_INT_DECL(int)  PGMR0InitVM(PGVM pGVM);
+VMMR0_INT_DECL(void) PGMR0CleanupVM(PGVM pGVM);
+VMMR0_INT_DECL(int)  PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu);
+VMMR0_INT_DECL(int)  PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu);
+VMMR0_INT_DECL(int)  PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, VMCPUID idCpu);
+VMMR0_INT_DECL(int)  PGMR0PhysMMIO2MapKernel(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
+                                             size_t offSub, size_t cbSub, void **ppvMapping);
+VMMR0_INT_DECL(int)  PGMR0PhysSetupIoMmu(PGVM pGVM);
+VMMR0DECL(int)       PGMR0SharedModuleCheck(PVMCC pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule,
+                                            PCRTGCPTR64 paRegionsGCPtrs);
+VMMR0DECL(int)       PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
+                                                    PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault);
+VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode,
+                                                      PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr);
+VMMR0_INT_DECL(int)  PGMR0PoolGrow(PGVM pGVM);
+
 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
-VMMR0DECL(int)      PGMR0DynMapInit(void);
-VMMR0DECL(void)     PGMR0DynMapTerm(void);
-VMMR0DECL(int)      PGMR0DynMapInitVM(PVMCC pVM);
-VMMR0DECL(void)     PGMR0DynMapTermVM(PVMCC pVM);
-VMMR0DECL(int)      PGMR0DynMapAssertIntegrity(void);
-VMMR0DECL(bool)     PGMR0DynMapStartOrMigrateAutoSet(PVMCPUCC pVCpu);
-VMMR0DECL(void)     PGMR0DynMapMigrateAutoSet(PVMCPUCC pVCpu);
+VMMR0DECL(int)       PGMR0DynMapInit(void);
+VMMR0DECL(void)      PGMR0DynMapTerm(void);
+VMMR0DECL(int)       PGMR0DynMapInitVM(PVMCC pVM);
+VMMR0DECL(void)      PGMR0DynMapTermVM(PVMCC pVM);
+VMMR0DECL(int)       PGMR0DynMapAssertIntegrity(void);
+VMMR0DECL(bool)      PGMR0DynMapStartOrMigrateAutoSet(PVMCPUCC pVCpu);
+VMMR0DECL(void)      PGMR0DynMapMigrateAutoSet(PVMCPUCC pVCpu);
 # endif
 /** @} */
@@ -787,5 +795,5 @@
                                                      const char *pszDesc,
                                                      PPGMPHYSHANDLERTYPE phType);
-VMMR3DECL(int)      PGMR3PoolGrow(PVM pVM);
+VMMR3_INT_DECL(int) PGMR3PoolGrow(PVM pVM, PVMCPU pVCpu);
 
 VMMR3DECL(int)      PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv);
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 82554)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 82555)
@@ -311,4 +311,6 @@
     /** Call PGMR0PhysSetupIommu(). */
     VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
+    /** Call PGMR0PoolGrow(). */
+    VMMR0_DO_PGM_POOL_GROW,
 
     /** Call GMMR0InitialReservation(). */
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 82554)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 82555)
@@ -490,4 +490,5 @@
 	VMMR0/PDMR0Driver.cpp \
 	VMMR0/PGMR0.cpp \
+	VMMR0/PGMR0Pool.cpp \
 	VMMR0/PGMR0SharedPage.cpp \
 	VMMR0/VMMR0.cpp \
@@ -514,5 +515,4 @@
 	VMMAll/MMAll.cpp \
 	VMMAll/MMAllHyper.cpp \
-	VMMAll/MMAllPagePool.cpp \
 	VMMAll/NEMAll.cpp \
 	VMMAll/PDMAll.cpp \
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 82555)
@@ -4974,5 +4974,5 @@
         STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
 #ifdef IN_RING3
-        int rc = PGMR3PoolGrow(pVM);
+        int rc = PGMR3PoolGrow(pVM, VMMGetCpu(pVM));
 #else
         int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_POOL_GROW, 0);
@@ -5189,4 +5189,23 @@
     PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
     return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
+}
+
+
+/**
+ * Internal worker for PGM_HCPHYS_2_PTR.
+ *
+ * @returns VBox status code.
+ * @param   pVM         The cross context VM structure.
+ * @param   HCPhys      The HC physical address of the shadow page.
+ * @param   ppv         Where to return the address.
+ */
+int pgmPoolHCPhys2Ptr(PVM pVM, RTHCPHYS HCPhys, void **ppv)
+{
+    PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pVM->pgm.s.CTX_SUFF(pPool)->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
+    AssertMsgReturn(pPage && pPage->enmKind != PGMPOOLKIND_FREE,
+                    ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0),
+                    VERR_PGM_POOL_GET_PAGE_FAILED);
+    *ppv = (uint8_t *)pPage->CTX_SUFF(pvPage) + (HCPhys & PAGE_OFFSET_MASK);
+    return VINF_SUCCESS;
 }
 
@@ -5333,5 +5352,4 @@
         PPGMPOOLPAGE pPage = &pPool->aPages[i];
 
-        Assert(pPage->Core.Key == MMPage2Phys(pVM, pPage->pvPageR3));
         if (pPage->fMonitored)
             pgmPoolMonitorFlush(pPool, pPage);
Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 82555)
@@ -902,125 +902,127 @@
                         RT_BZERO(pGVM, cPages << PAGE_SHIFT);
                         gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession);
+                        pGVM->gvmm.s.VMMemObj  = hVMMemObj;
                         GMMR0InitPerVMData(pGVM);
+                        rc = PGMR0InitPerVMData(pGVM);
                         PDMR0InitPerVMData(pGVM);
                         IOMR0InitPerVMData(pGVM);
-                        pGVM->gvmm.s.VMMemObj  = hVMMemObj;
-
-                        /*
-                         * Allocate page array.
-                         * This currently have to be made available to ring-3, but this is should change eventually.
-                         */
-                        rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
                         if (RT_SUCCESS(rc))
                         {
-                            PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
-                            for (uint32_t iPage = 0; iPage < cPages; iPage++)
-                            {
-                                paPages[iPage].uReserved = 0;
-                                paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
-                                Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
-                            }
-
                             /*
-                             * Map the page array, VM and VMCPU structures into ring-3.
+                             * Allocate page array.
+                             * This currently have to be made available to ring-3, but this is should change eventually.
                              */
-                            AssertCompileSizeAlignment(VM, PAGE_SIZE);
-                            rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
-                                                     RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
-                                                     0 /*offSub*/, sizeof(VM));
-                            for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
-                            {
-                                AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
-                                rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
-                                                         (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
-                                                         RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
-                            }
-                            if (RT_SUCCESS(rc))
-                                rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
-                                                       0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
-                                                       NIL_RTR0PROCESS);
+                            rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
                             if (RT_SUCCESS(rc))
                             {
+                                PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
+                                for (uint32_t iPage = 0; iPage < cPages; iPage++)
+                                {
+                                    paPages[iPage].uReserved = 0;
+                                    paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
+                                    Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
+                                }
+
                                 /*
-                                 * Initialize all the VM pointers.
+                                 * Map the page array, VM and VMCPU structures into ring-3.
                                  */
-                                PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
-                                AssertPtr((void *)pVMR3);
-
-                                for (VMCPUID i = 0; i < cCpus; i++)
+                                AssertCompileSizeAlignment(VM, PAGE_SIZE);
+                                rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
+                                                         RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
+                                                         0 /*offSub*/, sizeof(VM));
+                                for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
                                 {
-                                    pGVM->aCpus[i].pVMR0 = pGVM;
-                                    pGVM->aCpus[i].pVMR3 = pVMR3;
-                                    pGVM->apCpusR3[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
-                                    pGVM->aCpus[i].pVCpuR3 = pGVM->apCpusR3[i];
-                                    pGVM->apCpusR0[i] = &pGVM->aCpus[i];
-                                    AssertPtr((void *)pGVM->apCpusR3[i]);
+                                    AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
+                                    rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
+                                                             (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
+                                                             RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
                                 }
-
-                                pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
-                                AssertPtr((void *)pGVM->paVMPagesR3);
-
-                                /*
-                                 * Complete the handle - take the UsedLock sem just to be careful.
-                                 */
-                                rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
-                                AssertRC(rc);
-
-                                pHandle->pGVM                   = pGVM;
-                                pHandle->hEMT0                  = hEMT0;
-                                pHandle->ProcId                 = ProcId;
-                                pGVM->pVMR3                     = pVMR3;
-                                pGVM->pVMR3Unsafe               = pVMR3;
-                                pGVM->aCpus[0].hEMT             = hEMT0;
-                                pGVM->aCpus[0].hNativeThreadR0  = hEMT0;
-                                pGVMM->cEMTs += cCpus;
-
-                                /* Associate it with the session and create the context hook for EMT0. */
-                                rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
+                                if (RT_SUCCESS(rc))
+                                    rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
+                                                           0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
+                                                           NIL_RTR0PROCESS);
                                 if (RT_SUCCESS(rc))
                                 {
-                                    rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
+                                    /*
+                                     * Initialize all the VM pointers.
+                                     */
+                                    PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
+                                    AssertPtr((void *)pVMR3);
+
+                                    for (VMCPUID i = 0; i < cCpus; i++)
+                                    {
+                                        pGVM->aCpus[i].pVMR0 = pGVM;
+                                        pGVM->aCpus[i].pVMR3 = pVMR3;
+                                        pGVM->apCpusR3[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
+                                        pGVM->aCpus[i].pVCpuR3 = pGVM->apCpusR3[i];
+                                        pGVM->apCpusR0[i] = &pGVM->aCpus[i];
+                                        AssertPtr((void *)pGVM->apCpusR3[i]);
+                                    }
+
+                                    pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
+                                    AssertPtr((void *)pGVM->paVMPagesR3);
+
+                                    /*
+                                     * Complete the handle - take the UsedLock sem just to be careful.
+                                     */
+                                    rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
+                                    AssertRC(rc);
+
+                                    pHandle->pGVM                   = pGVM;
+                                    pHandle->hEMT0                  = hEMT0;
+                                    pHandle->ProcId                 = ProcId;
+                                    pGVM->pVMR3                     = pVMR3;
+                                    pGVM->pVMR3Unsafe               = pVMR3;
+                                    pGVM->aCpus[0].hEMT             = hEMT0;
+                                    pGVM->aCpus[0].hNativeThreadR0  = hEMT0;
+                                    pGVMM->cEMTs += cCpus;
+
+                                    /* Associate it with the session and create the context hook for EMT0. */
+                                    rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
                                     if (RT_SUCCESS(rc))
                                     {
-                                        /*
-                                         * Done!
-                                         */
-                                        VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
-
-                                        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
-                                        gvmmR0CreateDestroyUnlock(pGVMM);
-
-                                        CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
-
-                                        *ppGVM = pGVM;
-                                        Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
-                                        return VINF_SUCCESS;
+                                        rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
+                                        if (RT_SUCCESS(rc))
+                                        {
+                                            /*
+                                             * Done!
+                                             */
+                                            VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
+
+                                            GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
+                                            gvmmR0CreateDestroyUnlock(pGVMM);
+
+                                            CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
+
+                                            *ppGVM = pGVM;
+                                            Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
+                                            return VINF_SUCCESS;
+                                        }
+
+                                        SUPR0SetSessionVM(pSession, NULL, NULL);
                                     }
-
-                                    SUPR0SetSessionVM(pSession, NULL, NULL);
+                                    GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
                                 }
-                                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
-                            }
-
-                            /* Cleanup mappings. */
-                            if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
-                            {
-                                RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
-                                pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
-                            }
-                            for (VMCPUID i = 0; i < cCpus; i++)
-                                if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
+
+                                /* Cleanup mappings. */
+                                if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
                                 {
-                                    RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
-                                    pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
+                                    RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
+                                    pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
                                 }
-                            if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
-                            {
-                                RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
-                                pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
+                                for (VMCPUID i = 0; i < cCpus; i++)
+                                    if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
+                                    {
+                                        RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
+                                        pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
+                                    }
+                                if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
+                                {
+                                    RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
+                                    pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
+                                }
                             }
                         }
                     }
-
                 }
                 /* else: The user wasn't permitted to create this VM. */
@@ -1297,4 +1299,5 @@
     PDMR0CleanupVM(pGVM);
     IOMR0CleanupVM(pGVM);
+    PGMR0CleanupVM(pGVM);
 
     AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 82555)
@@ -33,4 +33,5 @@
 #include <iprt/assert.h>
 #include <iprt/mem.h>
+#include <iprt/memobj.h>
 
 
@@ -54,4 +55,72 @@
 #include "PGMR0Bth.h"
 #undef PGM_BTH_NAME
+
+
+/**
+ * Initializes the per-VM data for the PGM.
+ *
+ * This is called from under the GVMM lock, so it should only initialize the
+ * data so PGMR0CleanupVM and others will work smoothly.
+ *
+ * @returns VBox status code.
+ * @param   pGVM    Pointer to the global VM structure.
+ */
+VMMR0_INT_DECL(int) PGMR0InitPerVMData(PGVM pGVM)
+{
+    AssertCompile(sizeof(pGVM->pgm.s) <= sizeof(pGVM->pgm.padding));
+    AssertCompile(sizeof(pGVM->pgmr0.s) <= sizeof(pGVM->pgmr0.padding));
+
+    AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
+    for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
+    {
+        pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
+        pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
+    }
+    return RTCritSectInit(&pGVM->pgmr0.s.PoolGrowCritSect);
+}
+
+
+/**
+ * Initalize the per-VM PGM for ring-0.
+ *
+ * @returns VBox status code.
+ * @param   pGVM    Pointer to the global VM structure.
+ */
+VMMR0_INT_DECL(int) PGMR0InitVM(PGVM pGVM)
+{
+    int rc = VINF_SUCCESS;
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
+    rc = PGMR0DynMapInitVM(pGVM);
+#endif
+    RT_NOREF(pGVM);
+    return rc;
+}
+
+
+/**
+ * Cleans up any loose ends before the GVM structure is destroyed.
+ */
+VMMR0_INT_DECL(void) PGMR0CleanupVM(PGVM pGVM)
+{
+    for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
+    {
+        if (pGVM->pgmr0.s.ahPoolMapObjs[i] != NIL_RTR0MEMOBJ)
+        {
+            int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMapObjs[i], true /*fFreeMappings*/);
+            AssertRC(rc);
+            pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
+        }
+
+        if (pGVM->pgmr0.s.ahPoolMemObjs[i] != NIL_RTR0MEMOBJ)
+        {
+            int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMemObjs[i], true /*fFreeMappings*/);
+            AssertRC(rc);
+            pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
+        }
+    }
+
+    if (RTCritSectIsInitialized(&pGVM->pgmr0.s.PoolGrowCritSect))
+        RTCritSectDelete(&pGVM->pgmr0.s.PoolGrowCritSect);
+}
 
 
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp	(revision 82555)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp	(revision 82555)
@@ -0,0 +1,145 @@
+/* $Id$ */
+/** @file
+ * PGM Shadow Page Pool, ring-0 specific bits.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+*   Header Files                                                                                                                 *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PGM_POOL
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/hm.h>
+#include "PGMInternal.h"
+#include <VBox/vmm/vmcc.h>
+#include "PGMInline.h"
+
+#include <VBox/log.h>
+#include <VBox/err.h>
+#include <iprt/mem.h>
+#include <iprt/memobj.h>
+
+
+
+/**
+ * Grows the shadow page pool.
+ *
+ * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
+ *
+ * @returns VBox status code.
+ * @param   pGVM    The ring-0 VM structure.
+ */
+VMMR0_INT_DECL(int) PGMR0PoolGrow(PGVM pGVM)
+{
+    PPGMPOOL pPool = pGVM->pgm.s.pPoolR0;
+    AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
+
+    /* With 32-bit guests and no EPT, the CR3 limits the root pages to low
+       (below 4 GB) memory. */
+    /** @todo change the pool to handle ROOT page allocations specially when
+     *        required. */
+    bool const fCanUseHighMemory = HMIsNestedPagingActive(pGVM);
+
+    STAM_REL_PROFILE_START(&pPool->StatGrow, a);
+    int rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);
+    AssertRCReturn(rc, rc);
+
+    /*
+     * Figure out how many pages should allocate.
+     */
+    uint32_t const cMaxPages = RT_MIN(pPool->cMaxPages, PGMPOOL_IDX_LAST);
+    uint32_t const cCurPages = RT_MIN(pPool->cCurPages, cMaxPages);
+    if (cCurPages < cMaxPages)
+    {
+        uint32_t cNewPages = cMaxPages - cCurPages;
+        if (cNewPages > PGMPOOL_CFG_MAX_GROW)
+            cNewPages = PGMPOOL_CFG_MAX_GROW;
+        LogFlow(("PGMR3PoolGrow: Growing the pool by %u (%#x) pages to %u (%#x) pages. fCanUseHighMemory=%RTbool\n",
+                 cNewPages, cNewPages, cCurPages + cNewPages, cCurPages + cNewPages, fCanUseHighMemory));
+
+        /* Check that the handles in the arrays entry are both NIL. */
+        uintptr_t const idxMemHandle = cCurPages / (PGMPOOL_CFG_MAX_GROW);
+        AssertCompile(   (PGMPOOL_IDX_LAST + (PGMPOOL_CFG_MAX_GROW - 1)) / PGMPOOL_CFG_MAX_GROW
+                      <= RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs));
+        AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
+        AssertLogRelMsgReturnStmt(   pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] == NIL_RTR0MEMOBJ
+                                  && pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] == NIL_RTR0MEMOBJ,
+                                  ("idxMemHandle=%#x\n", idxMemHandle), RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect),
+                                  VERR_PGM_POOL_IPE);
+
+        /*
+         * Allocate the new pages and map them into ring-3.
+         */
+        RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
+        if (fCanUseHighMemory)
+            rc = RTR0MemObjAllocPage(&hMemObj, cNewPages * PAGE_SIZE, false /*fExecutable*/);
+        else
+            rc = RTR0MemObjAllocLow(&hMemObj, cNewPages * PAGE_SIZE, false /*fExecutable*/);
+        if (RT_SUCCESS(rc))
+        {
+            RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
+            rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
+            if (RT_SUCCESS(rc))
+            {
+                pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] = hMemObj;
+                pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] = hMapObj;
+
+                uint8_t *pbRing0 = (uint8_t *)RTR0MemObjAddress(hMemObj);
+                RTR3PTR  pbRing3 = RTR0MemObjAddressR3(hMapObj);
+                AssertPtr(pbRing0);
+                Assert(((uintptr_t)pbRing0 & PAGE_OFFSET_MASK) == 0);
+                AssertPtr(pbRing3 != NIL_RTR3PTR);
+                Assert((pbRing3 & PAGE_OFFSET_MASK) == 0);
+
+                /*
+                 * Initialize the new pages.
+                 */
+                for (unsigned iNewPage = 0; iNewPage < cNewPages; iNewPage++)
+                {
+                    PPGMPOOLPAGE pPage = &pPool->aPages[cCurPages + iNewPage];
+                    pPage->pvPageR0         = &pbRing0[iNewPage * PAGE_SIZE];
+                    pPage->pvPageR3         = pbRing3 + iNewPage * PAGE_SIZE;
+                    pPage->Core.Key         = RTR0MemObjGetPagePhysAddr(hMemObj, iNewPage);
+                    AssertFatal(pPage->Core.Key < _4G || fCanUseHighMemory);
+                    pPage->GCPhys           = NIL_RTGCPHYS;
+                    pPage->enmKind          = PGMPOOLKIND_FREE;
+                    pPage->idx              = pPage - &pPool->aPages[0];
+                    LogFlow(("PGMR3PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key));
+                    pPage->iNext            = pPool->iFreeHead;
+                    pPage->iUserHead        = NIL_PGMPOOL_USER_INDEX;
+                    pPage->iModifiedNext    = NIL_PGMPOOL_IDX;
+                    pPage->iModifiedPrev    = NIL_PGMPOOL_IDX;
+                    pPage->iMonitoredNext   = NIL_PGMPOOL_IDX;
+                    pPage->iMonitoredPrev   = NIL_PGMPOOL_IDX;
+                    pPage->iAgeNext         = NIL_PGMPOOL_IDX;
+                    pPage->iAgePrev         = NIL_PGMPOOL_IDX;
+                    /* commit it */
+                    bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
+                    pPool->iFreeHead = cCurPages + iNewPage;
+                    pPool->cCurPages = cCurPages + iNewPage + 1;
+                }
+
+                STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);
+                RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
+                return VINF_SUCCESS;
+            }
+
+            RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
+        }
+    }
+    RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
+    return VINF_SUCCESS;
+}
+
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 82555)
@@ -465,7 +465,5 @@
             {
                 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
-                rc = PGMR0DynMapInitVM(pGVM);
-#endif
+                rc = PGMR0InitVM(pGVM);
                 if (RT_SUCCESS(rc))
                 {
@@ -1800,4 +1798,11 @@
             break;
 
+        case VMMR0_DO_PGM_POOL_GROW:
+            if (idCpu == NIL_VMCPUID)
+                return VERR_INVALID_CPU_ID;
+            rc = PGMR0PoolGrow(pGVM);
+            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
+            break;
+
         /*
          * GMM wrappers.
@@ -2344,4 +2349,5 @@
 
             default:
+            case VMMR0_DO_PGM_POOL_GROW:
                 break;
         }
Index: /trunk/src/VBox/VMM/VMMR3/MM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/MM.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR3/MM.cpp	(revision 82555)
@@ -460,8 +460,10 @@
 VMMR3DECL(int) MMR3Term(PVM pVM)
 {
+#if 0
     /*
      * Destroy the page pool. (first as it used the hyper heap)
      */
     mmR3PagePoolTerm(pVM);
+#endif
 
     /* Clean up the hypervisor heap. */
@@ -740,4 +742,5 @@
 VMMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
 {
+#if 0
     /*
      * Try page tables.
@@ -746,4 +749,5 @@
     if (RT_SUCCESS(rc))
         return rc;
+#endif
 
     /*
Index: /trunk/src/VBox/VMM/VMMR3/MMPagePool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/MMPagePool.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR3/MMPagePool.cpp	(revision 82555)
@@ -379,4 +379,5 @@
 }
 
+#if 0
 
 /**
@@ -483,4 +484,5 @@
 }
 
+#endif
 
 /**
Index: /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp	(revision 82555)
@@ -1116,11 +1116,12 @@
     if (!fIsMapping)
     {
-        int rc = MMPagePhys2PageTry(pState->pVM, HCPhys, &pvPage);
-        if (RT_FAILURE(rc))
+        PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys);
+        if (pPoolPage)
         {
             pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n",
                                     pState->cchAddress, pState->u64Address, pszDesc, HCPhys);
-            return rc;
+            return VERR_PGM_POOL_GET_PAGE_FAILED;
         }
+        pvPage = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & PAGE_OFFSET_MASK);
     }
     else
Index: /trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMPool.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR3/PGMPool.cpp	(revision 82555)
@@ -170,5 +170,7 @@
     AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
                           ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER);
-    cMaxPages = RT_ALIGN(cMaxPages, 16);
+    AssertCompile(RT_IS_POWER_OF_TWO(PGMPOOL_CFG_MAX_GROW));
+    if (cMaxPages < PGMPOOL_IDX_LAST)
+        cMaxPages = RT_ALIGN(cMaxPages, PGMPOOL_CFG_MAX_GROW / 2);
     if (cMaxPages > PGMPOOL_IDX_LAST)
         cMaxPages = PGMPOOL_IDX_LAST;
@@ -314,8 +316,9 @@
     Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fReusedFlushPending);
 
-#ifdef VBOX_WITH_STATISTICS
     /*
      * Register statistics.
      */
+    STAM_REL_REG(pVM, &pPool->StatGrow,                 STAMTYPE_PROFILE,   "/PGM/Pool/Grow",           STAMUNIT_TICKS, "Profiling PGMR0PoolGrow");
+#ifdef VBOX_WITH_STATISTICS
     STAM_REG(pVM, &pPool->cCurPages,                    STAMTYPE_U16,       "/PGM/Pool/cCurPages",      STAMUNIT_PAGES,             "Current pool size.");
     STAM_REG(pVM, &pPool->cMaxPages,                    STAMTYPE_U16,       "/PGM/Pool/cMaxPages",      STAMUNIT_PAGES,             "Max pool size.");
@@ -473,62 +476,11 @@
  * @returns VBox status code.
  * @param   pVM     The cross context VM structure.
+ * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
  */
-VMMR3DECL(int) PGMR3PoolGrow(PVM pVM)
+VMMR3_INT_DECL(int) PGMR3PoolGrow(PVM pVM, PVMCPU pVCpu)
 {
-    PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
-    AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
-
-    /* With 32-bit guests and no EPT, the CR3 limits the root pages to low
-       (below 4 GB) memory. */
-    /** @todo change the pool to handle ROOT page allocations specially when
-     *        required. */
-    bool fCanUseHighMemory = HMIsNestedPagingActive(pVM);
-
-    pgmLock(pVM);
-
-    /*
-     * How much to grow it by?
-     */
-    uint32_t cPages = pPool->cMaxPages - pPool->cCurPages;
-    cPages = RT_MIN(PGMPOOL_CFG_MAX_GROW, cPages);
-    LogFlow(("PGMR3PoolGrow: Growing the pool by %d (%#x) pages. fCanUseHighMemory=%RTbool\n", cPages, cPages, fCanUseHighMemory));
-
-    for (unsigned i = pPool->cCurPages; cPages-- > 0; i++)
-    {
-        PPGMPOOLPAGE pPage = &pPool->aPages[i];
-
-        if (fCanUseHighMemory)
-            pPage->pvPageR3 = MMR3PageAlloc(pVM);
-        else
-            pPage->pvPageR3 = MMR3PageAllocLow(pVM);
-        if (!pPage->pvPageR3)
-        {
-            Log(("We're out of memory!! i=%d fCanUseHighMemory=%RTbool\n", i, fCanUseHighMemory));
-            pgmUnlock(pVM);
-            return i ? VINF_SUCCESS : VERR_NO_PAGE_MEMORY;
-        }
-        pPage->Core.Key  = MMPage2Phys(pVM, pPage->pvPageR3);
-        AssertFatal(pPage->Core.Key < _4G || fCanUseHighMemory);
-        pPage->GCPhys    = NIL_RTGCPHYS;
-        pPage->enmKind   = PGMPOOLKIND_FREE;
-        pPage->idx       = pPage - &pPool->aPages[0];
-        LogFlow(("PGMR3PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key));
-        pPage->iNext     = pPool->iFreeHead;
-        pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
-        pPage->iModifiedNext  = NIL_PGMPOOL_IDX;
-        pPage->iModifiedPrev  = NIL_PGMPOOL_IDX;
-        pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
-        pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
-        pPage->iAgeNext  = NIL_PGMPOOL_IDX;
-        pPage->iAgePrev  = NIL_PGMPOOL_IDX;
-        /* commit it */
-        bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
-        pPool->iFreeHead = i;
-        pPool->cCurPages = i + 1;
-    }
-
-    pgmUnlock(pVM);
-    Assert(pPool->cCurPages <= pPool->cMaxPages);
-    return VINF_SUCCESS;
+    /* This used to do a lot of stuff, but it has moved to ring-0 (PGMR0PoolGrow). */
+    AssertReturn(pVM->pgm.s.pPoolR3->cCurPages < pVM->pgm.s.pPoolR3->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
+    return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_POOL_GROW, 0, NULL);
 }
 
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 82554)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 82555)
@@ -2393,5 +2393,5 @@
         case VMMCALLRING3_PGM_POOL_GROW:
         {
-            pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
+            pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM, pVCpu);
             break;
         }
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 82554)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 82555)
@@ -140,5 +140,5 @@
  * The maximum number of pages to add to the pool in one go.
  */
-#define PGMPOOL_CFG_MAX_GROW            (_256K >> PAGE_SHIFT)
+#define PGMPOOL_CFG_MAX_GROW            (_2M >> PAGE_SHIFT)
 
 /** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
@@ -266,15 +266,7 @@
  *                      this.
  *
- * @remark  Use with care as we don't have so much dynamic mapping space in
- *          ring-0 on 32-bit darwin and in RC.
  * @remark  There is no need to assert on the result.
  */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
-     pgmRZDynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
-#else
-# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
-     MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
-#endif
+#define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) pgmPoolHCPhys2Ptr(pVM, HCPhys, (void **)(ppv))
 
 /** @def PGM_GCPHYS_2_PTR_V2
@@ -2055,12 +2047,7 @@
     AVLOHCPHYSNODECORE  Core;
     /** Pointer to the R3 mapping of the page. */
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     R3PTRTYPE(void *)   pvPageR3;
-#else
-    R3R0PTRTYPE(void *) pvPageR3;
-#endif
-#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
-    uint32_t            Alignment0;
-#endif
+    /** Pointer to the R0 mapping of the page. */
+    R0PTRTYPE(void *)   pvPageR0;
     /** The guest physical address. */
     RTGCPHYS            GCPhys;
@@ -2347,4 +2334,6 @@
     uint32_t                    Alignment3;         /**< Align the next member on a 64-bit boundary. */
 #endif
+    /** Profiling PGMR0PoolGrow(). */
+    STAMPROFILE                 StatGrow;
     /** The AVL tree for looking up a page by its HC physical address. */
     AVLOHCPHYSTREE              HCPhysTree;
@@ -2382,9 +2371,11 @@
 {
     AssertPtr(a_pPage);
-    AssertReleaseMsg(RT_VALID_PTR(a_pPage->pvPageR3), ("enmKind=%d idx=%#x HCPhys=%RHp GCPhys=%RGp caller=%s\n", a_pPage->enmKind, a_pPage->idx, a_pPage->Core.Key, a_pPage->GCPhys, pszCaller));
-    return a_pPage->pvPageR3;
+    AssertMsg(RT_VALID_PTR(a_pPage->CTX_SUFF(pvPage)),
+              ("enmKind=%d idx=%#x HCPhys=%RHp GCPhys=%RGp pvPageR3=%p pvPageR0=%p caller=%s\n",
+               a_pPage->enmKind, a_pPage->idx, a_pPage->Core.Key, a_pPage->GCPhys, a_pPage->pvPageR3, a_pPage->pvPageR0, pszCaller));
+    return a_pPage->CTX_SUFF(pvPage);
 }
 #else
-# define PGMPOOL_PAGE_2_PTR(pVM, a_pPage)       ((a_pPage)->pvPageR3)
+# define PGMPOOL_PAGE_2_PTR(pVM, a_pPage)       ((a_pPage)->CTX_SUFF(pvPage))
 #endif
 
@@ -3834,4 +3825,20 @@
 
 
+/**
+ * PGM GVM instance data.
+ */
+typedef struct PGMR0PERVM
+{
+    /** @name PGM Pool related stuff.
+     * @{ */
+    /** Critical section for serializing pool growth. */
+    RTCRITSECT  PoolGrowCritSect;
+    /** The memory objects for the pool pages. */
+    RTR0MEMOBJ  ahPoolMemObjs[(PGMPOOL_IDX_LAST + PGMPOOL_CFG_MAX_GROW - 1) / PGMPOOL_CFG_MAX_GROW];
+    /** The ring-3 mapping objects for the pool pages. */
+    RTR0MEMOBJ  ahPoolMapObjs[(PGMPOOL_IDX_LAST + PGMPOOL_CFG_MAX_GROW - 1) / PGMPOOL_CFG_MAX_GROW];
+    /** @} */
+} PGMR0PERVM;
+
 RT_C_DECLS_BEGIN
 
@@ -3949,4 +3956,5 @@
 PPGMPOOLPAGE    pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
 PPGMPOOLPAGE    pgmPoolQueryPageForDbg(PPGMPOOL pPool, RTHCPHYS HCPhys);
+int             pgmPoolHCPhys2Ptr(PVM pVM, RTHCPHYS HCPhys, void **ppv);
 int             pgmPoolSyncCR3(PVMCPUCC pVCpu);
 bool            pgmPoolIsDirtyPageSlow(PVM pVM, RTGCPHYS GCPhys);
