Index: /trunk/src/VBox/VMM/DBGFSym.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGFSym.cpp	(revision 19771)
+++ /trunk/src/VBox/VMM/DBGFSym.cpp	(revision 19772)
@@ -25,5 +25,5 @@
 *******************************************************************************/
 #define LOG_GROUP LOG_GROUP_DBGF
-#if defined(RT_OS_WINDOWS) && 0 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.
+#if defined(RT_OS_WINDOWS) && 1 //defined(DEBUG_bird) // enabled this is you want to debug win32 guests, the hypervisor of EFI.
 # include <Windows.h>
 # define _IMAGEHLP64
Index: /trunk/src/VBox/VMM/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGM.cpp	(revision 19771)
+++ /trunk/src/VBox/VMM/PGM.cpp	(revision 19772)
@@ -1791,5 +1791,4 @@
         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped,            "/PGM/CPU%d/RZ/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
         PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap,               "/PGM/CPU%d/RZ/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
-        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageStale,              "/PGM/CPU%d/RZ/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
         PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage,                 "/PGM/CPU%d/RZ/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
         PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF,            "/PGM/CPU%d/RZ/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
@@ -1836,5 +1835,4 @@
         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped,            "/PGM/CPU%d/R3/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
         PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap,               "/PGM/CPU%d/R3/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
-        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageStale,              "/PGM/CPU%d/R3/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
         PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage,                 "/PGM/CPU%d/R3/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
         PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF,            "/PGM/CPU%d/R3/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 19771)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 19772)
@@ -2784,5 +2784,4 @@
     STAMCOUNTER StatRZDirtyPageSkipped;             /**< RC/R0: The number of pages already dirty or readonly. */
     STAMCOUNTER StatRZDirtyPageTrap;                /**< RC/R0: The number of traps generated for dirty bit tracking. */
-    STAMCOUNTER StatRZDirtyPageStale;               /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
     STAMCOUNTER StatRZDirtyTrackRealPF;             /**< RC/R0: The number of real pages faults during dirty bit tracking. */
     STAMCOUNTER StatRZDirtiedPage;                  /**< RC/R0: The number of pages marked dirty because of write accesses. */
@@ -2829,5 +2828,4 @@
     STAMCOUNTER StatR3DirtyPageSkipped;             /**< R3: The number of pages already dirty or readonly. */
     STAMCOUNTER StatR3DirtyPageTrap;                /**< R3: The number of traps generated for dirty bit tracking. */
-    STAMCOUNTER StatR3DirtyPageStale;               /**< R3: The number of traps generated for dirty bit tracking (stale TLB entries). */
     STAMCOUNTER StatR3DirtyTrackRealPF;             /**< R3: The number of real pages faults during dirty bit tracking. */
     STAMCOUNTER StatR3DirtiedPage;                  /**< R3: The number of pages marked dirty because of write accesses. */
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 19771)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 19772)
@@ -150,5 +150,93 @@
 #  endif /* !PGM_WITH_PAGING */
 
-    /* Fetch the guest PDE */
+
+#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
+    const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
+    PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
+
+#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
+    const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
+
+    PX86PDPAE       pPDDst;
+#    if PGM_GST_TYPE != PGM_TYPE_PAE
+    X86PDPE         PdpeSrc;
+
+    /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
+    PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
+#    endif
+    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
+    if (rc != VINF_SUCCESS)
+    {
+        AssertRC(rc);
+        return rc;
+    }
+    Assert(pPDDst);
+
+#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
+    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
+    PX86PDPAE       pPDDst;
+#   if PGM_GST_TYPE == PGM_TYPE_PROT
+    /* AMD-V nested paging */
+    X86PML4E        Pml4eSrc;
+    X86PDPE         PdpeSrc;
+    PX86PML4E       pPml4eSrc = &Pml4eSrc;
+
+    /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
+    Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
+    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
+#   endif
+
+    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
+    if (rc != VINF_SUCCESS)
+    {
+        AssertRC(rc);
+        return rc;
+    }
+    Assert(pPDDst);
+
+#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
+    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
+    PEPTPD          pPDDst;
+
+    rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
+    if (rc != VINF_SUCCESS)
+    {
+        AssertRC(rc);
+        return rc;
+    }
+    Assert(pPDDst);
+#  endif
+
+#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
+    /*
+     * If we successfully correct the write protection fault due to dirty bit
+     * tracking, or this page fault is a genuine one, then return immediately.
+     */
+    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
+    rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
+    if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
+        ||  rc == VINF_EM_RAW_GUEST_TRAP)
+    {
+        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
+                     = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
+        LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
+        return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
+    }
+
+    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
+#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
+
+    /*
+     * A common case is the not-present error caused by lazy page table syncing.
+     *
+     * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
+     * so we can safely assume that the shadow PT is present when calling SyncPage later.
+     *
+     * On failure, we ASSUME that SyncPT is out of memory or detected some kind
+     * of mapping conflict and defer to SyncCR3 in R3.
+     * (Again, we do NOT support access handlers for non-present guest pages.)
+     *
+     */
 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
@@ -161,120 +249,25 @@
     PdeSrc.n.u1User     = 1;
 #  endif
-
-    pgmLock(pVM);
-    {   /* Force the shadow pointers to go out of scope after releasing the lock. */
-#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
-        const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
-        PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
-
-#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
-        const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
-
-        PX86PDPAE       pPDDst;
-#    if PGM_GST_TYPE != PGM_TYPE_PAE
-        X86PDPE         PdpeSrc;
-
-        /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
-        PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
-#    endif
-        rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
-        if (rc != VINF_SUCCESS)
+    if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
+        &&  !pPDDst->a[iPDDst].n.u1Present
+        &&  PdeSrc.n.u1Present
+       )
+
+    {
+        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
+        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
+        LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
+        pgmLock(pVM);
+        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
+        pgmUnlock(pVM);
+        if (RT_SUCCESS(rc))
         {
-            pgmUnlock(pVM);
-            AssertRC(rc);
+            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
             return rc;
         }
-        Assert(pPDDst);
-
-#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
-        const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
-        PX86PDPAE       pPDDst;
-#   if PGM_GST_TYPE == PGM_TYPE_PROT
-        /* AMD-V nested paging */
-        X86PML4E        Pml4eSrc;
-        X86PDPE         PdpeSrc;
-        PX86PML4E       pPml4eSrc = &Pml4eSrc;
-
-        /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
-        Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
-        PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
-#   endif
-
-        rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
-        if (rc != VINF_SUCCESS)
-        {
-            pgmUnlock(pVM);
-            AssertRC(rc);
-            return rc;
-        }   
-        Assert(pPDDst);
-
-#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
-        const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
-        PEPTPD          pPDDst;
-
-        rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
-        if (rc != VINF_SUCCESS)
-        {
-            pgmUnlock(pVM);
-            AssertRC(rc);
-            return rc;
-        }
-        Assert(pPDDst);
-#  endif
-
-#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
-        /*
-         * If we successfully correct the write protection fault due to dirty bit
-         * tracking, or this page fault is a genuine one, then return immediately.
-         */
-        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
-        rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
-        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
-        if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
-            ||  rc == VINF_EM_RAW_GUEST_TRAP)
-        {
-            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
-                        = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
-            LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
-            pgmUnlock(pVM);
-            return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
-        }
-
-        STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
-#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
-
-        /*
-         * A common case is the not-present error caused by lazy page table syncing.
-         *
-         * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
-         * so we can safely assume that the shadow PT is present when calling SyncPage later.
-         *
-         * On failure, we ASSUME that SyncPT is out of memory or detected some kind
-         * of mapping conflict and defer to SyncCR3 in R3.
-         * (Again, we do NOT support access handlers for non-present guest pages.)
-         *
-         */
-        if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
-            &&  !pPDDst->a[iPDDst].n.u1Present
-            &&  PdeSrc.n.u1Present
-        )
-        {
-            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
-            STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
-            LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
-            rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
-            pgmUnlock(pVM);
-            if (RT_SUCCESS(rc))
-            {
-                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
-                return rc;
-            }
-            Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
-            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
-            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
-            return VINF_PGM_SYNC_CR3;
-        }
-        pgmUnlock(pVM);
+        Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
+        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
+        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
+        return VINF_PGM_SYNC_CR3;
     }
 
@@ -1085,5 +1078,5 @@
                  GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
-        ASMAtomicWriteSize(pPml4eDst, 0);
+        pPml4eDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
         PGM_INVL_GUEST_TLBS();
@@ -1099,5 +1092,5 @@
                  GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
-        ASMAtomicWriteSize(pPml4eDst, 0);
+        pPml4eDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
         PGM_INVL_GUEST_TLBS();
@@ -1111,5 +1104,5 @@
                  GCPtrPage, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
         pgmPoolFreeByPage(pPool, pShwPdpt, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4);
-        ASMAtomicWriteSize(pPml4eDst, 0);
+        pPml4eDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
         PGM_INVL_GUEST_TLBS();
@@ -1125,5 +1118,5 @@
                     GCPtrPage, pShwPde->GCPhys, GCPhysPd, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
-        ASMAtomicWriteSize(pPdpeDst, 0);
+        pPdpeDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
         PGM_INVL_GUEST_TLBS();
@@ -1139,5 +1132,5 @@
                  GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
-        ASMAtomicWriteSize(pPdpeDst, 0);
+        pPdpeDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
         PGM_INVL_GUEST_TLBS();
@@ -1151,5 +1144,5 @@
                  GCPtrPage, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
         pgmPoolFreeByPage(pPool, pShwPde, pShwPdpt->idx, iPdpt);
-        ASMAtomicWriteSize(pPdpeDst, 0);
+        pPdpeDst->u = 0;
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
         PGM_INVL_GUEST_TLBS();
@@ -1183,5 +1176,5 @@
                      GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
-            ASMAtomicWriteSize(pPdeDst, 0);
+            pPdeDst->u = 0;
             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
             PGM_INVL_GUEST_TLBS();
@@ -1195,5 +1188,5 @@
                      GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
-            ASMAtomicWriteSize(pPdeDst, 0);
+            pPdeDst->u = 0;
             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
             PGM_INVL_GUEST_TLBS();
@@ -1239,5 +1232,5 @@
                          GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
                 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
-                ASMAtomicWriteSize(pPdeDst, 0);
+                pPdeDst->u = 0;
                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
                 PGM_INVL_GUEST_TLBS();
@@ -1285,5 +1278,5 @@
                      GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
-            ASMAtomicWriteSize(pPdeDst, 0);
+            pPdeDst->u = 0;
             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
             PGM_INVL_BIG_PG(GCPtrPage);
@@ -1298,5 +1291,5 @@
         {
             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
-            ASMAtomicWriteSize(pPdeDst, 0);
+            pPdeDst->u = 0;
             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
             PGM_INVL_PG(GCPtrPage);
@@ -1910,5 +1903,6 @@
      */
     pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
-    ASMAtomicWriteSize(pPdeDst, 0);
+
+    pPdeDst->u = 0;
 
 # if defined(IN_RC)
@@ -2077,6 +2071,4 @@
     PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
 
-    Assert(PGMIsLockOwner(pVM));
-
     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
     LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
@@ -2164,23 +2156,13 @@
             pPdeSrc->b.u1Dirty = 1;
 
-            if (pPdeDst->n.u1Present)
+            if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
             {
-                if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)
-                {
-                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
-                    Assert(pPdeSrc->b.u1Write);
-
-                    /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
-                     *       fault again and take this path to only invalidate the entry.
-                     */
-                    pPdeDst->n.u1Write      = 1;
-                    pPdeDst->n.u1Accessed   = 1;
-                    pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
-                }
-                else
-                {
-                    /* Stale TLB entry. */
-                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
-                }
+                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
+
+                Assert(pPdeSrc->b.u1Write);
+
+                pPdeDst->n.u1Write      = 1;
+                pPdeDst->n.u1Accessed   = 1;
+                pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
                 PGM_INVL_BIG_PG(GCPtrPage);
                 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
@@ -2274,32 +2256,22 @@
                     PSHWPT      pPTDst   = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
                     PSHWPTE     pPteDst  = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
-                    if (pPteDst->n.u1Present)    /** @todo Optimize accessed bit emulation? */
+                    if (    pPteDst->n.u1Present    /** @todo Optimize accessed bit emulation? */
+                        &&  (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
                     {
-                        if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
-                        {
-                            LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
+                        LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
 #  ifdef VBOX_STRICT
-                            PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
-                            if (pPage)
-                                AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
-                                        ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
+                        PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
+                        if (pPage)
+                            AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
+                                      ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
 #  endif
-                            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
-
-                            Assert(pPteSrc->n.u1Write);
-
-                            /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
-                             *       fault again and take this path to only invalidate the entry.
-                             */
-                            pPteDst->n.u1Write    = 1;
-                            pPteDst->n.u1Dirty    = 1;
-                            pPteDst->n.u1Accessed = 1;
-                            pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
-                        }
-                        else
-                        {
-                            /* Stale TLB entry. */
-                            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
-                        }
+                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
+
+                        Assert(pPteSrc->n.u1Write);
+
+                        pPteDst->n.u1Write    = 1;
+                        pPteDst->n.u1Dirty    = 1;
+                        pPteDst->n.u1Accessed = 1;
+                        pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
                         PGM_INVL_PG(GCPtrPage);
 
@@ -3140,6 +3112,4 @@
     int             rc = VINF_SUCCESS;
 
-    pgmLock(pVM);
-
     /*
      * First check if the shadow pd is present.
@@ -3160,5 +3130,4 @@
     if (rc != VINF_SUCCESS)
     {
-        pgmUnlock(pVM);
         AssertRC(rc);
         return rc;
@@ -3186,5 +3155,4 @@
     if (rc != VINF_SUCCESS)
     {
-        pgmUnlock(pVM);
         AssertRC(rc);
         return rc;
@@ -3201,5 +3169,8 @@
     if (!pPdeDst->n.u1Present)
     {
+        pgmLock(pVM);
         rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
+        pgmUnlock(pVM);
+        AssertRC(rc);
         if (rc != VINF_SUCCESS)
         {
@@ -3208,6 +3179,4 @@
             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
 # endif
-            pgmUnlock(pVM);
-            AssertRC(rc);
             return rc;
         }
@@ -3222,5 +3191,5 @@
     {
         GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
-# else
+#else
     {
         GSTPDE PdeSrc;
@@ -3231,5 +3200,5 @@
         PdeSrc.n.u1User     = 1;
 
-# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
+#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
         Assert(rc != VINF_EM_RAW_GUEST_TRAP);
         if (uErr & X86_TRAP_PF_US)
@@ -3255,5 +3224,4 @@
     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
 # endif
-    pgmUnlock(pVM);
     return rc;
 
Index: /trunk/src/VBox/VMM/VMMAll/TMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 19771)
+++ /trunk/src/VBox/VMM/VMMAll/TMAll.cpp	(revision 19772)
@@ -672,8 +672,10 @@
                     Assert(!pTimer->offPrev);
                     Assert(!pTimer->offNext);
+/*
                     AssertMsg(      pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
                               ||    pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
                               ||    u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
                               ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
+*/
                     pTimer->u64Expire = u64Expire;
                     TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 19771)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 19772)
@@ -607,5 +607,5 @@
     else
     {
-        LogFlow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip));
+        Log(("CPU%d: INJ-EI: %x at %RGv\n", pVCpu->idCpu, iGate, (RTGCPTR)pCtx->rip));
         Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
         Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || pCtx->eflags.u32 & X86_EFL_IF);
@@ -1076,8 +1076,8 @@
 
     u32TrapMask = HWACCM_VMX_TRAP_MASK;
-#ifndef DEBUG
+//#ifndef DEBUG
     if (pVM->hwaccm.s.fNestedPaging)
         u32TrapMask &= ~RT_BIT(X86_XCPT_PF);   /* no longer need to intercept #PF. */
-#endif
+//#endif
 
     /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
@@ -2874,5 +2874,5 @@
             errCode |= X86_TRAP_PF_P;
 
-        Log(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
+        LogFlow(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
 
         /* GCPhys contains the guest physical address of the page fault. */
