Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 38953)
@@ -1452,5 +1452,5 @@
     {
         RTHCPTR HCPtrGuestCR3;
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
         if (RT_SUCCESS(rc))
         {
@@ -1494,5 +1494,5 @@
     {
         RTHCPTR HCPtrGuestCR3;
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
         if (RT_SUCCESS(rc))
         {
@@ -1544,5 +1544,5 @@
         RTHCPTR     HCPtr       = NIL_RTHCPTR;
 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
+        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
         AssertRC(rc);
 #endif
@@ -1604,5 +1604,5 @@
     {
         RTHCPTR HCPtrGuestCR3;
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
         if (RT_SUCCESS(rc))
         {
@@ -2337,5 +2337,5 @@
 VMMDECL(bool) PGMIsLockOwner(PVM pVM)
 {
-    return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
+    return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
 }
 
@@ -2365,5 +2365,5 @@
 int pgmLock(PVM pVM)
 {
-    int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
+    int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
 #if defined(IN_RC) || defined(IN_RING0)
     if (rc == VERR_SEM_BUSY)
@@ -2383,5 +2383,9 @@
 void pgmUnlock(PVM pVM)
 {
-    PDMCritSectLeave(&pVM->pgm.s.CritSect);
+    uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
+    pVM->pgm.s.cDeprecatedPageLocks = 0;
+    int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
+    if (rc == VINF_SEM_NESTED)
+        pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 38953)
@@ -4425,5 +4425,5 @@
     int rc = VINF_SUCCESS;
 # else
-    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
+    int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
 # endif
     pgmUnlock(pVM);
@@ -4473,5 +4473,5 @@
                     int rc2 = VINF_SUCCESS;
 #  else
-                    int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)&HCPtr);
+                    int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
 #  endif
                     pgmUnlock(pVM);
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 38953)
@@ -632,6 +632,5 @@
     pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
 
-    const void *pvSharedPage = NULL;
-
+    void *pvSharedPage = NULL;
     if (PGM_PAGE_IS_SHARED(pPage))
     {
@@ -645,6 +644,6 @@
         pVM->pgm.s.cSharedPages--;
 
-        /* Grab the address of the page so we can make a copy later on. */
-        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
+        /* Grab the address of the page so we can make a copy later on. (safe) */
+        rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvSharedPage);
         AssertRC(rc);
     }
@@ -670,11 +669,11 @@
     {
         /* Get the virtual address of the new page. */
-        void *pvNewPage;
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
-        AssertRC(rc);
-        if (rc == VINF_SUCCESS)
-        {
-            /** @todo todo write ASMMemCopyPage */
-            memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
+        PGMPAGEMAPLOCK  PgMpLck;
+        void           *pvNewPage;
+        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
+        if (RT_SUCCESS(rc))
+        {
+            memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
         }
     }
@@ -922,5 +921,5 @@
  *
  * @remarks Called from within the PGM critical section.  The mapping is only
- *          valid while your inside this section.
+ *          valid while you are inside this section.
  */
 int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
@@ -1120,5 +1119,5 @@
  *
  * @remarks Called from within the PGM critical section.  The mapping is only
- *          valid while your inside this section.
+ *          valid while you are inside section.
  */
 int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
@@ -1156,5 +1155,5 @@
  *
  * @remarks Called from within the PGM critical section.  The mapping is only
- *          valid while your inside this section.
+ *          valid while you are inside section.
  */
 int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
@@ -1184,5 +1183,5 @@
  *
  * @remarks Called from within the PGM critical section.  The mapping is only
- *          valid while your inside this section.
+ *          valid while you are inside this section.
  */
 int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
@@ -1292,10 +1291,12 @@
  *
  * @internal
- */
-int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
+ * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
+ */
+int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
 {
     int rc;
     AssertReturn(pPage, VERR_INTERNAL_ERROR);
     PGM_LOCK_ASSERT_OWNER(pVM);
+    pVM->pgm.s.cDeprecatedPageLocks++;
 
     /*
@@ -1333,8 +1334,78 @@
 }
 
-
-/**
- * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
- * own the PGM lock and therefore not need to lock the mapped page.
+#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+
+/**
+ * Locks a page mapping for writing.
+ *
+ * @param   pVM                 The VM handle.
+ * @param   pPage               The page.
+ * @param   pTlbe               The mapping TLB entry for the page.
+ * @param   pLock               The lock structure (output).
+ */
+DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
+{
+    PPGMPAGEMAP pMap = pTlbe->pMap;
+    if (pMap)
+        pMap->cRefs++;
+
+    unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
+    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+    {
+        if (cLocks == 0)
+            pVM->pgm.s.cWriteLockedPages++;
+        PGM_PAGE_INC_WRITE_LOCKS(pPage);
+    }
+    else if (cLocks != PGM_PAGE_MAX_LOCKS)
+    {
+        PGM_PAGE_INC_WRITE_LOCKS(pPage);
+        AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
+        if (pMap)
+            pMap->cRefs++; /* Extra ref to prevent it from going away. */
+    }
+
+    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
+    pLock->pvMap = pMap;
+}
+
+/**
+ * Locks a page mapping for reading.
+ *
+ * @param   pVM                 The VM handle.
+ * @param   pPage               The page.
+ * @param   pTlbe               The mapping TLB entry for the page.
+ * @param   pLock               The lock structure (output).
+ */
+DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
+{
+    PPGMPAGEMAP pMap = pTlbe->pMap;
+    if (pMap)
+        pMap->cRefs++;
+
+    unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
+    if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
+    {
+        if (cLocks == 0)
+            pVM->pgm.s.cReadLockedPages++;
+        PGM_PAGE_INC_READ_LOCKS(pPage);
+    }
+    else if (cLocks != PGM_PAGE_MAX_LOCKS)
+    {
+        PGM_PAGE_INC_READ_LOCKS(pPage);
+        AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
+        if (pMap)
+            pMap->cRefs++; /* Extra ref to prevent it from going away. */
+    }
+
+    pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
+    pLock->pvMap = pMap;
+}
+
+#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
+
+
+/**
+ * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
+ * own the PGM lock and have access to the page structure.
  *
  * @returns VBox status code.
@@ -1347,8 +1418,74 @@
  * @param   pPage       Pointer to the PGMPAGE structure for the page.
  * @param   ppv         Where to store the address corresponding to GCPhys.
+ * @param   pLock       Where to store the lock information that
+ *                      pgmPhysReleaseInternalPageMappingLock needs.
  *
  * @internal
  */
-int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
+int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
+{
+    int rc;
+    AssertReturn(pPage, VERR_INTERNAL_ERROR);
+    PGM_LOCK_ASSERT_OWNER(pVM);
+
+    /*
+     * Make sure the page is writable.
+     */
+    if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
+    {
+        rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
+        if (RT_FAILURE(rc))
+            return rc;
+        AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
+    }
+    Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
+
+    /*
+     * Do the job.
+     */
+#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+    void *pv;
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+    rc = pgmRZDynMapHCPageInlined(pVCpu,
+                                  PGM_PAGE_GET_HCPHYS(pPage),
+                                  &pv
+                                  RTLOG_COMMA_SRC_POS);
+    if (RT_FAILURE(rc))
+        return rc;
+    *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
+    pLock->pvPage = pv;
+    pLock->pVCpu  = pVCpu;
+
+#else
+    PPGMPAGEMAPTLBE pTlbe;
+    rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
+    if (RT_FAILURE(rc))
+        return rc;
+    pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
+    *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
+#endif
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
+ * own the PGM lock and have access to the page structure.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
+ * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
+ *
+ * @param   pVM         The VM handle.
+ * @param   GCPhys      The guest physical address of the page that should be mapped.
+ * @param   pPage       Pointer to the PGMPAGE structure for the page.
+ * @param   ppv         Where to store the address corresponding to GCPhys.
+ * @param   pLock       Where to store the lock information that
+ *                      pgmPhysReleaseInternalPageMappingLock needs.
+ *
+ * @internal
+ */
+int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
 {
     AssertReturn(pPage, VERR_INTERNAL_ERROR);
@@ -1357,9 +1494,10 @@
 
     /*
-     * Get the mapping address.
+     * Do the job.
      */
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     void *pv;
-    int rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+    int rc = pgmRZDynMapHCPageInlined(pVCpu,
                                       PGM_PAGE_GET_HCPHYS(pPage),
                                       &pv
@@ -1368,4 +1506,7 @@
         return rc;
     *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
+    pLock->pvPage = pv;
+    pLock->pVCpu  = pVCpu;
+
 #else
     PPGMPAGEMAPTLBE pTlbe;
@@ -1373,4 +1514,5 @@
     if (RT_FAILURE(rc))
         return rc;
+    pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
     *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
 #endif
@@ -1382,7 +1524,7 @@
  * Requests the mapping of a guest page into the current context.
  *
- * This API should only be used for very short term, as it will consume
- * scarse resources (R0 and GC) in the mapping cache. When you're done
- * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
+ * This API should only be used for very short term, as it will consume scarse
+ * resources (R0 and GC) in the mapping cache. When you're done with the page,
+ * call PGMPhysReleasePageMappingLock() ASAP to release it.
  *
  * This API will assume your intention is to write to the page, and will
@@ -1396,14 +1538,18 @@
  *
  * @param   pVM         The VM handle.
- * @param   GCPhys      The guest physical address of the page that should be mapped.
+ * @param   GCPhys      The guest physical address of the page that should be
+ *                      mapped.
  * @param   ppv         Where to store the address corresponding to GCPhys.
- * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
+ * @param   pLock       Where to store the lock information that
+ *                      PGMPhysReleasePageMappingLock needs.
  *
  * @remarks The caller is responsible for dealing with access handlers.
  * @todo    Add an informational return code for pages with access handlers?
  *
- * @remark  Avoid calling this API from within critical sections (other than the
- *          PGM one) because of the deadlock risk. External threads may need to
- *          delegate jobs to the EMTs.
+ * @remark  Avoid calling this API from within critical sections (other than
+ *          the PGM one) because of the deadlock risk. External threads may
+ *          need to delegate jobs to the EMTs.
+ * @remarks Only one page is mapped!  Make no assumption about what's after or
+ *          before the returned page!
  * @thread  Any thread.
  */
@@ -1446,8 +1592,4 @@
 
 #else  /* IN_RING3 || IN_RING0 */
-    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-    /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
-    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-
     /*
      * Query the Physical TLB entry for the page (may fail).
@@ -1476,26 +1618,6 @@
              * Now, just perform the locking and calculate the return address.
              */
-            PPGMPAGEMAP pMap = pTlbe->pMap;
-            if (pMap)
-                pMap->cRefs++;
-
-            unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
-            if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
-            {
-                if (cLocks == 0)
-                    pVM->pgm.s.cWriteLockedPages++;
-                PGM_PAGE_INC_WRITE_LOCKS(pPage);
-            }
-            else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
-            {
-                PGM_PAGE_INC_WRITE_LOCKS(pPage);
-                AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
-                if (pMap)
-                    pMap->cRefs++; /* Extra ref to prevent it from going away. */
-            }
-
+            pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
             *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
-            pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
-            pLock->pvMap = pMap;
         }
     }
@@ -1510,7 +1632,7 @@
  * Requests the mapping of a guest page into the current context.
  *
- * This API should only be used for very short term, as it will consume
- * scarse resources (R0 and GC) in the mapping cache. When you're done
- * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
+ * This API should only be used for very short term, as it will consume scarse
+ * resources (R0 and GC) in the mapping cache.  When you're done with the page,
+ * call PGMPhysReleasePageMappingLock() ASAP to release it.
  *
  * @returns VBox status code.
@@ -1520,13 +1642,17 @@
  *
  * @param   pVM         The VM handle.
- * @param   GCPhys      The guest physical address of the page that should be mapped.
+ * @param   GCPhys      The guest physical address of the page that should be
+ *                      mapped.
  * @param   ppv         Where to store the address corresponding to GCPhys.
- * @param   pLock       Where to store the lock information that PGMPhysReleasePageMappingLock needs.
+ * @param   pLock       Where to store the lock information that
+ *                      PGMPhysReleasePageMappingLock needs.
  *
  * @remarks The caller is responsible for dealing with access handlers.
  * @todo    Add an informational return code for pages with access handlers?
  *
- * @remark  Avoid calling this API from within critical sections (other than
+ * @remarks Avoid calling this API from within critical sections (other than
  *          the PGM one) because of the deadlock risk.
+ * @remarks Only one page is mapped!  Make no assumption about what's after or
+ *          before the returned page!
  * @thread  Any thread.
  */
@@ -1567,9 +1693,4 @@
 
 #else  /* IN_RING3 || IN_RING0 */
-
-    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-    /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary.   */
-    /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-
     /*
      * Query the Physical TLB entry for the page (may fail).
@@ -1588,26 +1709,6 @@
              * Now, just perform the locking and calculate the return address.
              */
-            PPGMPAGEMAP pMap = pTlbe->pMap;
-            if (pMap)
-                pMap->cRefs++;
-
-            unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
-            if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
-            {
-                if (cLocks == 0)
-                    pVM->pgm.s.cReadLockedPages++;
-                PGM_PAGE_INC_READ_LOCKS(pPage);
-            }
-            else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
-            {
-                PGM_PAGE_INC_READ_LOCKS(pPage);
-                AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
-                if (pMap)
-                    pMap->cRefs++; /* Extra ref to prevent it from going away. */
-            }
-
+            pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
             *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
-            pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
-            pLock->pvMap = pMap;
         }
     }
@@ -1764,4 +1865,22 @@
     pgmUnlock(pVM);
 #endif /* IN_RING3 */
+}
+
+
+/**
+ * Release the internal mapping of a guest page.
+ *
+ * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
+ * pgmPhysGCPhys2CCPtrInternalReadOnly.
+ *
+ * @param   pVM         The VM handle.
+ * @param   pLock       The lock structure initialized by the mapping function.
+ *
+ * @remarks Caller must hold the PGM lock.
+ */
+void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
+{
+    PGM_LOCK_ASSERT_OWNER(pVM);
+    PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
 }
 
@@ -1780,10 +1899,9 @@
  * @param   pVM         The VM handle.
  * @param   GCPhys      The GC physical address to convert.
- * @param   cbRange     Physical range
  * @param   pR3Ptr      Where to store the R3 pointer on success.
  *
  * @deprecated  Avoid when possible!
  */
-VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
+int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
 {
 /** @todo this is kind of hacky and needs some more work. */
@@ -1792,5 +1910,5 @@
 #endif
 
-    Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
+    Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     AssertFailedReturn(VERR_NOT_IMPLEMENTED);
@@ -1802,5 +1920,5 @@
     int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
     if (RT_SUCCESS(rc))
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
+        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
 
     pgmUnlock(pVM);
@@ -1809,26 +1927,4 @@
 #endif
 }
-
-
-#ifdef VBOX_STRICT
-/**
- * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
- *
- * @returns The R3Ptr, NIL_RTR3PTR on failure.
- * @param   pVM         The VM handle.
- * @param   GCPhys      The GC Physical address.
- * @param   cbRange     Physical range.
- *
- * @deprecated  Avoid when possible.
- */
-VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
-{
-    RTR3PTR R3Ptr;
-    int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
-    if (RT_SUCCESS(rc))
-        return R3Ptr;
-    return NIL_RTR3PTR;
-}
-#endif /* VBOX_STRICT */
 
 
@@ -1931,6 +2027,7 @@
      * Whatever we do we need the source page, map it first.
      */
-    const void *pvSrc = NULL;
-    int         rc    = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
+    PGMPAGEMAPLOCK PgMpLck;
+    const void    *pvSrc = NULL;
+    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
     if (RT_FAILURE(rc))
     {
@@ -1977,4 +2074,5 @@
         /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
         //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
+        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
         return VERR_PGM_PHYS_WR_HIT_HANDLER;
 #endif
@@ -2018,4 +2116,5 @@
         /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
         //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
+        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
         return VERR_PGM_PHYS_WR_HIT_HANDLER;
 #endif
@@ -2027,4 +2126,5 @@
     if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
         memcpy(pvBuf, pvSrc, cb);
+    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
     return rc;
 }
@@ -2094,8 +2194,12 @@
                      * Get the pointer to the page.
                      */
-                    const void *pvSrc;
-                    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
+                    PGMPAGEMAPLOCK PgMpLck;
+                    const void    *pvSrc;
+                    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
                     if (RT_SUCCESS(rc))
+                    {
                         memcpy(pvBuf, pvSrc, cb);
+                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+                    }
                     else
                     {
@@ -2164,6 +2268,7 @@
 static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
 {
-    void *pvDst = NULL;
-    int rc;
+    PGMPAGEMAPLOCK  PgMpLck;
+    void           *pvDst = NULL;
+    int             rc;
 
     /*
@@ -2196,5 +2301,5 @@
             Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
             if (!PGM_PAGE_IS_MMIO(pPage))
-                rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
+                rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
             else
                 rc = VINF_SUCCESS;
@@ -2217,6 +2322,9 @@
                 pCur = NULL; /* might not be valid anymore. */
 # endif
-                if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
-                    memcpy(pvDst, pvBuf, cbRange);
+                if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
+                {
+                    if (pvDst)
+                        memcpy(pvDst, pvBuf, cbRange);
+                }
                 else
                     AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
@@ -2226,5 +2334,9 @@
                                              GCPhys, pPage, rc), rc);
             if (RT_LIKELY(cbRange == cbWrite))
+            {
+                if (pvBuf)
+                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                 return VINF_SUCCESS;
+            }
 
             /* more fun to be had below */
@@ -2262,5 +2374,5 @@
 
             Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
-            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
+            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
             if (RT_SUCCESS(rc))
             {
@@ -2285,5 +2397,9 @@
                                              GCPhys, pPage, rc), rc);
             if (RT_LIKELY(cbRange == cbWrite))
+            {
+                if (pvBuf)
+                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                 return VINF_SUCCESS;
+            }
 
             /* more fun to be had below */
@@ -2304,5 +2420,5 @@
     if (!pvDst)
     {
-        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
+        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
         AssertLogRelMsgReturn(RT_SUCCESS(rc),
                               ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
@@ -2434,4 +2550,5 @@
             NOREF(cbRange);
             //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
             return VERR_PGM_PHYS_WR_HIT_HANDLER;
 #endif
@@ -2463,4 +2580,5 @@
             NOREF(cbRange);
             //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
             return VERR_PGM_PHYS_WR_HIT_HANDLER;
 #endif
@@ -2519,4 +2637,5 @@
             NOREF(cbRange);
             //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
             return VERR_PGM_PHYS_WR_HIT_HANDLER;
 #endif
@@ -2529,5 +2648,8 @@
          */
         if (cbRange >= cbWrite)
+        {
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
             return VINF_SUCCESS;
+        }
 
         cbWrite         -= cbRange;
@@ -2608,14 +2730,15 @@
                      * Get the pointer to the page.
                      */
-                    void *pvDst;
-                    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
+                    PGMPAGEMAPLOCK PgMpLck;
+                    void          *pvDst;
+                    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
                     if (RT_SUCCESS(rc))
                     {
                         Assert(!PGM_PAGE_IS_BALLOONED(pPage));
                         memcpy(pvDst, pvBuf, cb);
+                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                     }
-                    else
                     /* Ignore writes to ballooned pages. */
-                    if (!PGM_PAGE_IS_BALLOONED(pPage))
+                    else if (!PGM_PAGE_IS_BALLOONED(pPage))
                         AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
                                                 pRam->GCPhys + off, pPage, rc));
@@ -3279,6 +3402,7 @@
         {
             /** @todo we should check reserved bits ... */
-            void *pvSrc;
-            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
+            PGMPAGEMAPLOCK PgMpLck;
+            void const    *pvSrc;
+            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
             switch (rc)
             {
@@ -3286,10 +3410,12 @@
                     Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
                     memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
+                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
                     break;
                 case VERR_PGM_PHYS_PAGE_RESERVED:
                 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
-                    memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
+                    memset(pvDst, 0xff, cb);
                     break;
                 default:
+                    Assert(RT_FAILURE_NP(rc));
                     return rc;
             }
@@ -3321,29 +3447,34 @@
             /** @todo we should check reserved bits ... */
             AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
-            void *pvSrc1;
-            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
+            PGMPAGEMAPLOCK PgMpLck;
+            void const *pvSrc1;
+            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
             switch (rc)
             {
                 case VINF_SUCCESS:
                     memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
+                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
                     break;
                 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
-                    memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
+                    memset(pvDst, 0xff, cb1);
                     break;
                 default:
+                    Assert(RT_FAILURE_NP(rc));
                     return rc;
             }
 
-            void *pvSrc2;
-            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
+            void const *pvSrc2;
+            rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
             switch (rc)
             {
                 case VINF_SUCCESS:
                     memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
+                    PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
                     break;
                 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
-                    memset((uint8_t *)pvDst + cb1, 0, cb2);  /** @todo this is wrong, it should be 0xff */
+                    memset((uint8_t *)pvDst + cb1, 0xff, cb2);
                     break;
                 default:
+                    Assert(RT_FAILURE_NP(rc));
                     return rc;
             }
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 38953)
@@ -68,5 +68,5 @@
 VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
 {
-    Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
+    PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
 
     /*
@@ -183,8 +183,10 @@
 VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
 {
-    Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
-
+    PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
     Assert(!pVM->pgm.s.cLargeHandyPages);
-    int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
+
+    int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
+                                    &pVM->pgm.s.aLargeHandyPage[0].idPage,
+                                    &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
     if (RT_SUCCESS(rc))
         pVM->pgm.s.cLargeHandyPages = 1;
Index: /trunk/src/VBox/VMM/VMMR3/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 38953)
@@ -1186,5 +1186,5 @@
     AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
     AssertCompile(sizeof(pVM->aCpus[0].pgm.s) <= sizeof(pVM->aCpus[0].pgm.padding));
-    AssertCompileMemberAlignment(PGM, CritSect, sizeof(uintptr_t));
+    AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
 
     /*
@@ -1342,5 +1342,5 @@
      * Initialize the PGM critical section and flush the phys TLBs
      */
-    rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, RT_SRC_POS, "PGM");
+    rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
     AssertRCReturn(rc, rc);
 
@@ -1454,5 +1454,5 @@
 
     /* Almost no cleanup necessary, MM frees all memory. */
-    PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
+    PDMR3CritSectDelete(&pVM->pgm.s.CritSectX);
 
     return rc;
@@ -2578,5 +2578,5 @@
 
     PGMDeregisterStringFormatTypes();
-    return PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
+    return PDMR3CritSectDelete(&pVM->pgm.s.CritSectX);
 }
 
@@ -2689,7 +2689,7 @@
      * Get page directory addresses.
      */
+    pgmLock(pVM);
     PX86PD     pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
     Assert(pPDSrc);
-    Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
 
     /*
@@ -2715,4 +2715,5 @@
         }
     }
+    pgmUnlock(pVM);
 }
 
@@ -2726,5 +2727,5 @@
 VMMR3DECL(int) PGMR3LockCall(PVM pVM)
 {
-    int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSect, true /* fHostCall */);
+    int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSectX, true /* fHostCall */);
     AssertRC(rc);
     return rc;
Index: /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 38953)
@@ -150,11 +150,15 @@
                  * Simple stuff, go ahead.
                  */
-                size_t   cb    = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
+                size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
                 if (cb > cbRead)
                     cb = cbRead;
-                const void *pvSrc;
-                int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
+                PGMPAGEMAPLOCK PgMpLck;
+                const void    *pvSrc;
+                int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
                 if (RT_SUCCESS(rc))
+                {
                     memcpy(pvBuf, pvSrc, cb);
+                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+                }
                 else
                 {
@@ -290,11 +294,15 @@
                  * Simple stuff, go ahead.
                  */
-                size_t      cb    = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
+                size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
                 if (cb > cbWrite)
                     cb = cbWrite;
-                void *pvDst;
-                int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
+                PGMPAGEMAPLOCK PgMpLck;
+                void          *pvDst;
+                int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
                 if (RT_SUCCESS(rc))
+                {
                     memcpy(pvDst, pvBuf, cb);
+                    pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+                }
                 else
                     AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
@@ -3996,10 +4004,42 @@
         pVM->pgm.s.cMappedChunks++;
 
-        /* If we're running out of virtual address space, then we should unmap another chunk. */
+        /*
+         * If we're running out of virtual address space, then we should
+         * unmap another chunk.
+         *
+         * Currently, an unmap operation requires that all other virtual CPUs
+         * are idling and not by chance making use of the memory we're
+         * unmapping.  So, we create an async unmap operation here.
+         *
+         * Now, when creating or restoring a saved state this wont work very
+         * well since we may want to restore all guest RAM + a little something.
+         * So, we have to do the unmap synchronously.  Fortunately for us
+         * though, during these operations the other virtual CPUs are inactive
+         * and it should be safe to do this.
+         */
+        /** @todo Eventually we should lock all memory when used and do
+         *        map+unmap as one kernel call without any rendezvous or
+         *        other precautions. */
         if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
         {
-            /* Postpone the unmap operation (which requires a rendezvous operation) as we own the PGM lock here. */
-            rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
-            AssertRC(rc);
+            switch (VMR3GetState(pVM))
+            {
+                case VMSTATE_LOADING:
+                case VMSTATE_SAVING:
+                {
+                    PVMCPU pVCpu = VMMGetCpu(pVM);
+                    if (   pVCpu
+                        && pVM->pgm.s.cDeprecatedPageLocks == 0)
+                    {
+                        pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
+                        break;
+                    }
+                    /* fall thru */
+                }
+                default:
+                    rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
+                    AssertRC(rc);
+                    break;
+            }
         }
     }
Index: /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp	(revision 38953)
@@ -1243,9 +1243,13 @@
 static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
 {
-    RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
-    void const *pvPage;
-    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
+    RTGCPHYS        GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
+    PGMPAGEMAPLOCK  PgMpLck;
+    void const     *pvPage;
+    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
     if (RT_SUCCESS(rc))
+    {
         paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
+        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+    }
     else
         paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
@@ -1290,9 +1294,13 @@
     if (paLSPages[iPage].u32Crc != UINT32_MAX)
     {
-        RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
-        void const *pvPage;
-        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
+        RTGCPHYS        GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
+        PGMPAGEMAPLOCK  PgMpLck;
+        void const     *pvPage;
+        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
         if (RT_SUCCESS(rc))
+        {
             pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
+            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+        }
     }
 }
@@ -1333,5 +1341,5 @@
                         && (iPage & 0x7ff) == 0x100
 #endif
-                        && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
+                        && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
                         && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
                     {
@@ -1558,5 +1566,5 @@
                     if (   uPass != SSM_PASS_FINAL
                         && (iPage & 0x7ff) == 0x100
-                        && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
+                        && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
                         && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
                     {
@@ -1622,7 +1630,8 @@
                          * SSM call may block).
                          */
-                        uint8_t     abPage[PAGE_SIZE];
-                        void const *pvPage;
-                        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage);
+                        uint8_t         abPage[PAGE_SIZE];
+                        PGMPAGEMAPLOCK  PgMpLck;
+                        void const     *pvPage;
+                        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
                         if (RT_SUCCESS(rc))
                         {
@@ -1632,4 +1641,5 @@
                                 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
 #endif
+                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                         }
                         pgmUnlock(pVM);
@@ -2231,8 +2241,12 @@
      * Load the page.
      */
-    void *pvPage;
-    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
+    PGMPAGEMAPLOCK PgMpLck;
+    void          *pvPage;
+    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
     if (RT_SUCCESS(rc))
+    {
         rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
+        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
+    }
 
     return rc;
@@ -2677,8 +2691,11 @@
                             || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW)
                         {
-                            void *pvDstPage;
-                            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
+                            PGMPAGEMAPLOCK PgMpLck;
+                            void          *pvDstPage;
+                            rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
                             AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
+
                             ASMMemZeroPage(pvDstPage);
+                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                         }
                         /* Free it only if it's not part of a previously
@@ -2719,8 +2736,10 @@
                     case PGM_STATE_REC_RAM_RAW:
                     {
-                        void *pvDstPage;
-                        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
+                        PGMPAGEMAPLOCK PgMpLck;
+                        void          *pvDstPage;
+                        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
                         AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
                         rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
+                        pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                         if (RT_FAILURE(rc))
                             return rc;
Index: /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp	(revision 38953)
@@ -342,18 +342,17 @@
                     case PGM_PAGE_STATE_WRITE_MONITORED:
                     {
-                        const void *pvPage;
                         /* Check if the page was allocated, but completely zero. */
-                        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
-                        if (    rc == VINF_SUCCESS
+                        PGMPAGEMAPLOCK PgMpLck;
+                        const void    *pvPage;
+                        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
+                        if (    RT_SUCCESS(rc)
                             &&  ASMMemIsZeroPage(pvPage))
-                        {
                             cAllocZero++;
-                        }
-                        else
-                        if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
+                        else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
                             cDuplicate++;
                         else
                             cUnique++;
-
+                        if (RT_SUCCESS(rc))
+                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                         break;
                     }
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 38952)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 38953)
@@ -257,5 +257,5 @@
 #else
 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
-     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
+     pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
 #endif
 
@@ -305,5 +305,5 @@
 #else
 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
-     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
+     pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */
 #endif
 
@@ -3146,8 +3146,13 @@
     /** The address of the ring-0 mapping cache if we're making use of it.  */
     RTR0PTR                         pvR0DynMapUsed;
-#if HC_ARCH_BITS == 32
-    /** Alignment padding that makes the next member start on a 8 byte boundary. */
+
+    /** Hack: Number of deprecated page mapping locks taken by the current lock
+     *  owner via pgmPhysGCPhys2CCPtrInternalDepr. */
+    uint32_t                        cDeprecatedPageLocks;
+#if HC_ARCH_BITS == 64
+    /** Alignment padding.  */
     uint32_t                        u32Alignment2;
 #endif
+
 
     /** PGM critical section.
@@ -3155,5 +3160,5 @@
      * and the page flag updating (some of it anyway).
      */
-    PDMCRITSECT                     CritSect;
+    PDMCRITSECT                     CritSectX;
 
     /**
@@ -3341,5 +3346,5 @@
 AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
 AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
-AssertCompileMemberAlignment(PGM, CritSect, 8);
+AssertCompileMemberAlignment(PGM, CritSectX, 8);
 AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
 AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
@@ -3854,5 +3859,12 @@
  * @param   a_pVM           The VM handle.
  */
-#define PGM_LOCK_ASSERT_OWNER(a_pVM)    Assert(PDMCritSectIsOwner(&(a_pVM)->pgm.s.CritSect))
+#define PGM_LOCK_ASSERT_OWNER(a_pVM)    Assert(PDMCritSectIsOwner(&(a_pVM)->pgm.s.CritSectX))
+/**
+ * Asserts that the caller owns the PDM lock.
+ * This is the internal variant of PGMIsLockOwner.
+ * @param   a_pVM           The VM handle.
+ * @param   a_pVCpu         The current CPU handle.
+ */
+#define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu)  Assert(PDMCritSectIsOwnerEx(&(a_pVM)->pgm.s.CritSectX, pVCpu))
 
 int             pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
@@ -3887,6 +3899,9 @@
 int             pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
 int             pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
-int             pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
-int             pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
+int             pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr);
+int             pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
+int             pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
+int             pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock);
+void            pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock);
 VMMDECL(int)    pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
 VMMDECL(int)    pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
Index: /trunk/src/VBox/VMM/include/internal/pgm.h
===================================================================
--- /trunk/src/VBox/VMM/include/internal/pgm.h	(revision 38952)
+++ /trunk/src/VBox/VMM/include/internal/pgm.h	(revision 38953)
@@ -65,8 +65,4 @@
 VMMDECL(int)        PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock);
 VMMDECL(int)        PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock);
-VMMDECL(int)        PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr);
-#ifdef VBOX_STRICT
-VMMDECL(RTR3PTR)    PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
-#endif
 VMMR3DECL(void)     PGMR3ResetNoMorePhysWritesFlag(PVM pVM);
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp	(revision 38953)
@@ -599,5 +599,5 @@
     GEN_CHECK_OFF(PGMCPU, fA20Enabled);
     GEN_CHECK_OFF(PGMCPU, fSyncFlags);
-    GEN_CHECK_OFF(PGM, CritSect);
+    GEN_CHECK_OFF(PGM, CritSectX);
     GEN_CHECK_OFF(PGM, pPoolR3);
     GEN_CHECK_OFF(PGM, pPoolR0);
Index: /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 38952)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 38953)
@@ -203,5 +203,5 @@
     CHECK_PADDING_VM(64, vmm);
     PRINT_OFFSET(VM, pgm);
-    PRINT_OFFSET(VM, pgm.s.CritSect);
+    PRINT_OFFSET(VM, pgm.s.CritSectX);
     CHECK_PADDING_VM(64, pgm);
     PRINT_OFFSET(VM, hwaccm);
@@ -383,5 +383,5 @@
     CHECK_MEMBER_ALIGNMENT(IOM, CritSect, sizeof(uintptr_t));
     CHECK_MEMBER_ALIGNMENT(EM, CritSectREM, sizeof(uintptr_t));
-    CHECK_MEMBER_ALIGNMENT(PGM, CritSect, sizeof(uintptr_t));
+    CHECK_MEMBER_ALIGNMENT(PGM, CritSectX, sizeof(uintptr_t));
     CHECK_MEMBER_ALIGNMENT(PDM, CritSect, sizeof(uintptr_t));
     CHECK_MEMBER_ALIGNMENT(MMHYPERHEAP, Lock, sizeof(uintptr_t));
