Index: /trunk/include/VBox/hwacc_vmx.h
===================================================================
--- /trunk/include/VBox/hwacc_vmx.h	(revision 30888)
+++ /trunk/include/VBox/hwacc_vmx.h	(revision 30889)
@@ -294,5 +294,10 @@
 typedef struct EPTPTEBITS
 {
-    /** Present bit. */
+    /** Present bit.
+     * @remark This is a convenience "misnomer".  The bit actually indicates
+     *         read access and the CPU will consider an entry with any of the
+     *         first three bits set as present.  Since all our valid entries
+     *         will have this bit set, it can be used as a present indicator
+     *         and allow some code sharing. */
     uint64_t    u1Present       : 1;
     /** Writable bit. */
@@ -308,5 +313,5 @@
     /** Physical address of page. Restricted by maximum physical address width of the cpu. */
     uint64_t    u40PhysAddr     : 40;
-    /** Availabe for software. */
+    /** Available for software. */
     uint64_t    u12Available    : 12;
 } EPTPTEBITS;
Index: /trunk/include/VBox/x86.h
===================================================================
--- /trunk/include/VBox/x86.h	(revision 30888)
+++ /trunk/include/VBox/x86.h	(revision 30889)
@@ -1115,5 +1115,5 @@
 
 /** Bits 12-51 - - PAE - Physical Page number of the next level. */
-#if 1 /* we're using this internally and have to mask of the top 16-bit. */
+#if 1 /* we're using this internally and have to mask of the top 16-bit. */ /** @todo this should be safe to ditch now */
 #define X86_PTE_PAE_PG_MASK                 ( 0x0000fffffffff000ULL )
 /** @todo Get rid of the above hack; makes code unreadable. */
@@ -1122,6 +1122,14 @@
 #define X86_PTE_PAE_PG_MASK                 ( 0x000ffffffffff000ULL )
 #endif
-/** Bits 63 - NX - PAE - No execution flag. */
+/** Bits 63 - NX - PAE/LM - No execution flag. */
 #define X86_PTE_PAE_NX                      RT_BIT_64(63)
+/** Bits 62-52 - - PAE - MBZ bits when NX is active. */
+#define X86_PTE_PAE_MBZ_MASK_NX             UINT64_C(0x7ff0000000000000)
+/** Bits 63-52 - - PAE - MBZ bits when no NX. */
+#define X86_PTE_PAE_MBZ_MASK_NO_NX          UINT64_C(0xfff0000000000000)
+/** No bits -    - LM  - MBZ bits when NX is active. */
+#define X86_PTE_LM_MBZ_MASK_NX              UINT64_C(0x0000000000000000)
+/** Bits 63 -    - LM  - MBZ bits when no NX. */
+#define X86_PTE_LM_MBZ_MASK_NO_NX           UINT64_C(0x8000000000000000)
 
 /**
@@ -1317,6 +1325,14 @@
 #define X86_PDE_PAE_PG_MASK                 ( 0x000ffffffffff000ULL )
 #endif
-/** Bits 63 - NX - PAE - No execution flag. */
+/** Bits 63 - NX - PAE/LM - No execution flag. */
 #define X86_PDE_PAE_NX                      RT_BIT_64(63)
+/** Bits 62-52, 7 - - PAE - MBZ bits when NX is active. */
+#define X86_PDE_PAE_MBZ_MASK_NX             UINT64_C(0x7ff0000000000080)
+/** Bits 63-52, 7 - - PAE - MBZ bits when no NX. */
+#define X86_PDE_PAE_MBZ_MASK_NO_NX          UINT64_C(0xfff0000000000080)
+/** Bit 7 -         - LM  - MBZ bits when NX is active. */
+#define X86_PDE_LM_MBZ_MASK_NX              UINT64_C(0x0000000000000080)
+/** Bits 63, 7 -    - LM  - MBZ bits when no NX. */
+#define X86_PDE_LM_MBZ_MASK_NO_NX           UINT64_C(0x8000000000000080)
 
 /**
@@ -1427,14 +1443,24 @@
 /** Bits 22-31 - - Physical Page number. */
 #define X86_PDE4M_PG_MASK                   ( 0xffc00000 )
-/** Bits 13-20 - - Physical Page number high part (32-39 bits). AMD64 hack. */
+/** Bits 20-13 - - Physical Page number high part (32-39 bits). AMD64 hack. */
 #define X86_PDE4M_PG_HIGH_MASK              ( 0x001fe000 )
 /** The number of bits to the high part of the page number. */
 #define X86_PDE4M_PG_HIGH_SHIFT             19
-
-/** Bits 21-51 - - PAE & AMD64 - Physical Page number.
+/** Bit 21 -     - MBZ bits for AMD CPUs, no PSE36. */
+#define X86_PDE4M_MBZ_MASK                  RT_BIT_32(21)
+
+/** Bits 21-51 - - PAE/LM - Physical Page number.
  * (Bits 40-51 (long mode) & bits 36-51 (pae legacy) are reserved according to the Intel docs; AMD allows for more.) */
-#define X86_PDE2M_PAE_PG_MASK               ( 0x000fffffffe00000ULL )
-/** Bits 63 - NX - PAE & AMD64 - No execution flag. */
-#define X86_PDE2M_PAE_NX                    X86_PDE2M_PAE_NX
+#define X86_PDE2M_PAE_PG_MASK               UINT64_C(0x000fffffffe00000)
+/** Bits 63 - NX - PAE/LM - No execution flag. */
+#define X86_PDE2M_PAE_NX                    RT_BIT_64(63)
+/** Bits 62-52, 20-13 - - PAE - MBZ bits when NX is active. */
+#define X86_PDE2M_PAE_MBZ_MASK_NX           UINT64_C(0x7ff00000001fe000)
+/** Bits 63-52, 20-13 - - PAE - MBZ bits when no NX. */
+#define X86_PDE2M_PAE_MBZ_MASK_NO_NX        UINT64_C(0xfff00000001fe000)
+/** Bits 20-13        - - LM  - MBZ bits when NX is active. */
+#define X86_PDE2M_LM_MBZ_MASK_NX            UINT64_C(0x00000000001fe000)
+/** Bits 63, 20-13    - - LM  - MBZ bits when no NX. */
+#define X86_PDE2M_LM_MBZ_MASK_NO_NX         UINT64_C(0x80000000001fe000)
 
 /**
@@ -1627,16 +1653,29 @@
 /** Bit 5 -  A  - Access bit. Long Mode only. */
 #define X86_PDPE_A                          RT_BIT(5)
+/** Bit 7 - PS  - Page size (1GB). Long Mode only. */
+#define X86_PDPE_LM_PS                      RT_BIT(7)
 /** Bits 9-11 - - Available for use to system software. */
 #define X86_PDPE_AVL_MASK                   (RT_BIT(9) | RT_BIT(10) | RT_BIT(11))
 /** Bits 12-51 - - PAE - Physical Page number of the next level. */
 #if 1 /* we're using this internally and have to mask of the top 16-bit. */
-#define X86_PDPE_PG_MASK                    ( 0x0000fffffffff000ULL )
+#define X86_PDPE_PG_MASK                    UINT64_C(0x0000fffffffff000)
 /** @todo Get rid of the above hack; makes code unreadable. */
-#define X86_PDPE_PG_MASK_FULL               ( 0x000ffffffffff000ULL )
+#define X86_PDPE_PG_MASK_FULL               UINT64_C(0x000ffffffffff000)
 #else
-#define X86_PDPE_PG_MASK                    ( 0x000ffffffffff000ULL )
+#define X86_PDPE_PG_MASK                    UINT64_C(0x000ffffffffff000)
 #endif
-/** Bits 63 - NX - PAE - No execution flag. Long Mode only. */
-#define X86_PDPE_NX                         RT_BIT_64(63)
+/** Bits 63-52, 8-5, 2-1 - - PAE - MBZ bits (NX is long mode only). */
+#define X86_PDPE_PAE_MBZ_MASK               UINT64_C(0xfff00000000001e6)
+/** Bits 63 - NX - LM - No execution flag. Long Mode only. */
+#define X86_PDPE_LM_NX                      RT_BIT_64(63)
+/** Bits 8, 7 - - LM - MBZ bits when NX is active. */
+#define X86_PDPE_LM_MBZ_MASK_NX             UINT64_C(0x0000000000000180)
+/** Bits 63, 8, 7 - - LM - MBZ bits when no NX. */
+#define X86_PDPE_LM_MBZ_MASK_NO_NX          UINT64_C(0x8000000000000180)
+/** Bits 29-13 - - LM - MBZ bits for 1GB page entry when NX is active. */
+#define X86_PDPE1G_LM_MBZ_MASK_NX           UINT64_C(0x000000003fffe000)
+/** Bits 63, 29-13 - - LM - MBZ bits for 1GB page entry when no NX. */
+#define X86_PDPE1G_LM_MBZ_MASK_NO_NX        UINT64_C(0x800000003fffe000)
+
 
 /**
@@ -1776,4 +1815,8 @@
 #define X86_PML4E_PG_MASK                   ( 0x000ffffffffff000ULL )
 #endif
+/** Bits 8, 7 - - MBZ bits when NX is active. */
+#define X86_PML4E_MBZ_MASK_NX               UINT64_C(0x0000000000000080)
+/** Bits 63, 7 - - MBZ bits when no NX. */
+#define X86_PML4E_MBZ_MASK_NO_NX            UINT64_C(0x8000000000000080)
 /** Bits 63 - NX - PAE - No execution flag. */
 #define X86_PML4E_NX                        RT_BIT_64(63)
Index: /trunk/src/VBox/VMM/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGM.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/PGM.cpp	(revision 30889)
@@ -593,4 +593,5 @@
 
 #include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
 #include <iprt/assert.h>
 #include <iprt/env.h>
@@ -599,17 +600,4 @@
 #include <iprt/string.h>
 #include <iprt/thread.h>
-
-
-/*******************************************************************************
-*   Defined Constants And Macros                                               *
-*******************************************************************************/
-/** Saved state data unit version for 2.5.x and later. */
-#define PGM_SAVED_STATE_VERSION                 9
-/** Saved state data unit version for 2.2.2 and later. */
-#define PGM_SAVED_STATE_VERSION_2_2_2           8
-/** Saved state data unit version for 2.2.0. */
-#define PGM_SAVED_STATE_VERSION_RR_DESC         7
-/** Saved state data unit version. */
-#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE   6
 
 
@@ -1316,5 +1304,5 @@
 
         /*
-         * Alocate the zero page.
+         * Allocate the zero page.
          */
         rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvZeroPgR3);
@@ -1326,4 +1314,17 @@
         pVM->pgm.s.HCPhysZeroPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvZeroPgR3);
         AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
+
+        /*
+         * Allocate the invalid MMIO page.
+         * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
+         */
+        rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvMmioPgR3);
+    }
+    if (RT_SUCCESS(rc))
+    {
+        ASMMemFill32(pVM->pgm.s.pvMmioPgR3, PAGE_SIZE, 0xfeedface);
+        pVM->pgm.s.HCPhysMmioPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvMmioPgR3);
+        AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
+        pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
 
         /*
@@ -1419,5 +1420,5 @@
  *
  * @returns VBox status code.
- * @param   pVM     VM handle.
+ * @param   pVM                 VM handle.
  */
 static int pgmR3InitPaging(PVM pVM)
@@ -1552,5 +1553,4 @@
                 MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64)));
 #endif
-
         return VINF_SUCCESS;
     }
@@ -2013,13 +2013,66 @@
 
     /*
+     * Determin the max physical address width (MAXPHYADDR) and apply it to
+     * all the mask members and stuff.
+     */
+    uint32_t cMaxPhysAddrWidth;
+    uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
+    if (   uMaxExtLeaf >= 0x80000008
+        && uMaxExtLeaf <= 0x80000fff)
+    {
+        cMaxPhysAddrWidth = ASMCpuId_EAX(0x80000008) & 0xff;
+        LogRel(("PGM: The CPU physical address width is %u bits\n", cMaxPhysAddrWidth));
+        cMaxPhysAddrWidth = RT_MIN(52, cMaxPhysAddrWidth);
+        pVM->pgm.s.fLessThan52PhysicalAddressBits = cMaxPhysAddrWidth < 52;
+        for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 52; iBit++)
+            pVM->pgm.s.HCPhysInvMmioPg |= RT_BIT_64(iBit);
+    }
+    else
+    {
+        LogRel(("PGM: ASSUMING CPU physical address width of 48 bits (uMaxExtLeaf=%#x)\n", uMaxExtLeaf));
+        cMaxPhysAddrWidth = 48;
+        pVM->pgm.s.fLessThan52PhysicalAddressBits = true;
+        pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000);
+    }
+
+    pVM->pgm.s.GCPhysInvAddrMask = 0;
+    for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
+        pVM->pgm.s.GCPhysInvAddrMask |= RT_BIT_64(iBit);
+
+    /*
+     * Initialize the invalid paging entry masks, assuming NX is disabled.
+     */
+    uint64_t fMbzPageFrameMask = pVM->pgm.s.GCPhysInvAddrMask & UINT64_C(0x000ffffffffff000);
+    for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+    {
+        PVMCPU pVCpu = &pVM->aCpus[iCpu];
+
+        /** @todo The manuals are not entirely clear whether the physical
+         *        address width is relevant.  See table 5-9 in the intel
+         *        manual vs the PDE4M descriptions.  Write testcase (NP). */
+        pVCpu->pgm.s.fGst32BitMbzBigPdeMask  = (uint32_t)(fMbzPageFrameMask >> (32 - 13)) | X86_PDE4M_MBZ_MASK;
+
+        pVCpu->pgm.s.fGstPaeMbzPteMask       = fMbzPageFrameMask | X86_PTE_PAE_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstPaeMbzPdeMask       = fMbzPageFrameMask | X86_PDE_PAE_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstPaeMbzBigPdeMask    = fMbzPageFrameMask | X86_PDE2M_PAE_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstPaeMbzPdpeMask      = fMbzPageFrameMask | X86_PDPE_PAE_MBZ_MASK;
+
+        pVCpu->pgm.s.fGstAmd64MbzPteMask     = fMbzPageFrameMask | X86_PTE_LM_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdeMask     = fMbzPageFrameMask | X86_PDE_LM_MBZ_MASK_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdeMask  = fMbzPageFrameMask | X86_PDE2M_LM_MBZ_MASK_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdpeMask    = fMbzPageFrameMask | X86_PDPE_LM_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask = fMbzPageFrameMask | X86_PDPE1G_LM_MBZ_MASK_NO_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPml4eMask   = fMbzPageFrameMask | X86_PML4E_MBZ_MASK_NO_NX;
+    }
+
+    /*
      * Note that AMD uses all the 8 reserved bits for the address (so 40 bits in total);
      * Intel only goes up to 36 bits, so we stick to 36 as well.
-     */
-    /** @todo How to test for the 40 bits support? Long mode seems to be the test criterium. */
+     * Update: More recent intel manuals specifies 40 bits just like AMD.
+     */
     uint32_t u32Dummy, u32Features;
     CPUMGetGuestCpuId(VMMGetCpu(pVM), 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
-
     if (u32Features & X86_CPUID_FEATURE_EDX_PSE36)
-        pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(36) - 1;
+        pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(RT_MAX(36, cMaxPhysAddrWidth)) - 1;
     else
         pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1;
@@ -2478,5 +2531,5 @@
      * Get page directory addresses.
      */
-    PX86PD     pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+    PX86PD     pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
     Assert(pPDSrc);
     Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
@@ -3882,5 +3935,5 @@
 
     int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, cr3 & X86_CR3_PAGE_MASK, (const void **)&pPD, &LockCr3);
-    if (    RT_FAILURE(rc) 
+    if (    RT_FAILURE(rc)
         ||  !pPD)
     {
Index: /trunk/src/VBox/VMM/PGMGstDefs.h
===================================================================
--- /trunk/src/VBox/VMM/PGMGstDefs.h	(revision 30888)
+++ /trunk/src/VBox/VMM/PGMGstDefs.h	(revision 30889)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -28,4 +28,7 @@
 #undef GSTPDE
 #undef PGSTPDE
+#undef GSTPTWALK
+#undef PGSTPTWALK
+#undef PCGSTPTWALK
 #undef GST_BIG_PAGE_SIZE
 #undef GST_BIG_PAGE_OFFSET_MASK
@@ -44,4 +47,13 @@
 #undef GST_PDPE_PG_MASK
 #undef GST_GET_PDE_BIG_PG_GCPHYS
+#undef GST_IS_PTE_VALID
+#undef GST_IS_PDE_VALID
+#undef GST_IS_BIG_PDE_VALID
+#undef GST_IS_PDPE_VALID
+#undef GST_IS_BIG_PDPE_VALID
+#undef GST_IS_PML4E_VALID
+#undef GST_IS_PSE_ACTIVE
+#undef GST_IS_NX_ACTIVE
+#undef BTH_IS_NP_ACTIVE
 
 #if PGM_GST_TYPE == PGM_TYPE_REAL \
@@ -49,82 +61,131 @@
 
 # if PGM_SHW_TYPE == PGM_TYPE_EPT
-#  define GSTPT                      X86PTPAE
-#  define PGSTPT                     PX86PTPAE
-#  define GSTPTE                     X86PTEPAE
-#  define PGSTPTE                    PX86PTEPAE
-#  define GSTPD                      X86PDPAE
-#  define PGSTPD                     PX86PDPAE
-#  define GSTPDE                     X86PDEPAE
-#  define PGSTPDE                    PX86PDEPAE
-#  define GST_PTE_PG_MASK            X86_PTE_PAE_PG_MASK
+#  define GSTPT                                 X86PTPAE
+#  define PGSTPT                                PX86PTPAE
+#  define GSTPTE                                X86PTEPAE
+#  define PGSTPTE                               PX86PTEPAE
+#  define GSTPD                                 X86PDPAE
+#  define PGSTPD                                PX86PDPAE
+#  define GSTPDE                                X86PDEPAE
+#  define PGSTPDE                               PX86PDEPAE
+#  define GST_PTE_PG_MASK                       X86_PTE_PAE_PG_MASK
+#  define GST_IS_NX_ACTIVE(pVCpu)               (true && This_should_perhaps_not_be_used_in_this_context)
+#  define BTH_IS_NP_ACTIVE(pVM)                 (true)
 # else
-#  define GSTPT                      SHWPT
-#  define PGSTPT                     PSHWPT
-#  define GSTPTE                     SHWPTE
-#  define PGSTPTE                    PSHWPTE
-#  define GSTPD                      SHWPD
-#  define PGSTPD                     PSHWPD
-#  define GSTPDE                     SHWPDE
-#  define PGSTPDE                    PSHWPDE
-#  define GST_PTE_PG_MASK            SHW_PTE_PG_MASK
+#  define GSTPT                                 SHWPT
+#  define PGSTPT                                PSHWPT
+#  define GSTPTE                                SHWPTE
+#  define PGSTPTE                               PSHWPTE
+#  define GSTPD                                 SHWPD
+#  define PGSTPD                                PSHWPD
+#  define GSTPDE                                SHWPDE
+#  define PGSTPDE                               PSHWPDE
+#  define GST_PTE_PG_MASK                       SHW_PTE_PG_MASK
+#  define GST_IS_NX_ACTIVE(pVCpu)               (CPUMIsGuestNXEnabled(pVCpu)) /** @todo shadow this variable */
+#  if PGM_GST_TYPE == PGM_TYPE_PROT             /* (comment at top of PGMAllBth.h) */
+#   define BTH_IS_NP_ACTIVE(pVM)                HWACCMIsNestedPagingActive(pVM)
+#  else
+#   define BTH_IS_NP_ACTIVE(pVM)                (false)
+#  endif
 # endif
+# define GST_IS_PTE_VALID(pVCpu, Pte)           (true)
+# define GST_IS_PDE_VALID(pVCpu, Pde)           (true)
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde)       (true)
+# define GST_IS_PDPE_VALID(pVCpu, Pdpe)         (true)
+# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe)     (true)
+# define GST_IS_PML4E_VALID(pVCpu, Pml4e)       (true)
+# define GST_IS_PSE_ACTIVE(pVCpu)               (false && This_should_not_be_used_in_this_context)
+
 #elif PGM_GST_TYPE == PGM_TYPE_32BIT
-# define GSTPT                      X86PT
-# define PGSTPT                     PX86PT
-# define GSTPTE                     X86PTE
-# define PGSTPTE                    PX86PTE
-# define GSTPD                      X86PD
-# define PGSTPD                     PX86PD
-# define GSTPDE                     X86PDE
-# define PGSTPDE                    PX86PDE
-# define GST_BIG_PAGE_SIZE          X86_PAGE_4M_SIZE
-# define GST_BIG_PAGE_OFFSET_MASK   X86_PAGE_4M_OFFSET_MASK
-# define GST_PDE_PG_MASK            X86_PDE_PG_MASK
-# define GST_PDE_BIG_PG_MASK        X86_PDE4M_PG_MASK
-# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst)  pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
-# define GST_PD_SHIFT               X86_PD_SHIFT
-# define GST_PD_MASK                X86_PD_MASK
-# define GST_TOTAL_PD_ENTRIES       X86_PG_ENTRIES
-# define GST_PTE_PG_MASK            X86_PTE_PG_MASK
-# define GST_PT_SHIFT               X86_PT_SHIFT
-# define GST_PT_MASK                X86_PT_MASK
-# define GST_CR3_PAGE_MASK          X86_CR3_PAGE_MASK
+# define GSTPT                                  X86PT
+# define PGSTPT                                 PX86PT
+# define GSTPTE                                 X86PTE
+# define PGSTPTE                                PX86PTE
+# define GSTPD                                  X86PD
+# define PGSTPD                                 PX86PD
+# define GSTPDE                                 X86PDE
+# define PGSTPDE                                PX86PDE
+# define GSTPTWALK                              PGMPTWALKGST32BIT
+# define PGSTPTWALK                             PPGMPTWALKGST32BIT
+# define PCGSTPTWALK                            PCPGMPTWALKGST32BIT
+# define GST_BIG_PAGE_SIZE                      X86_PAGE_4M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK               X86_PAGE_4M_OFFSET_MASK
+# define GST_PDE_PG_MASK                        X86_PDE_PG_MASK
+# define GST_PDE_BIG_PG_MASK                    X86_PDE4M_PG_MASK
+# define GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeGst) pgmGstGet4MBPhysPage(&(pVM)->pgm.s, PdeGst)
+# define GST_PD_SHIFT                           X86_PD_SHIFT
+# define GST_PD_MASK                            X86_PD_MASK
+# define GST_TOTAL_PD_ENTRIES                   X86_PG_ENTRIES
+# define GST_PTE_PG_MASK                        X86_PTE_PG_MASK
+# define GST_PT_SHIFT                           X86_PT_SHIFT
+# define GST_PT_MASK                            X86_PT_MASK
+# define GST_CR3_PAGE_MASK                      X86_CR3_PAGE_MASK
+# define GST_IS_PTE_VALID(pVCpu, Pte)           (true)
+# define GST_IS_PDE_VALID(pVCpu, Pde)           (true)
+# define GST_IS_BIG_PDE_VALID(pVCpu, Pde)       (!( (Pde).u & (pVCpu)->pgm.s.fGst32BitMbzBigPdeMask ))
+//# define GST_IS_PDPE_VALID(pVCpu, Pdpe)         (false)
+//# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe)     (false)
+//# define GST_IS_PML4E_VALID(pVCpu, Pml4e)       (false)
+# define GST_IS_PSE_ACTIVE(pVCpu)               (CPUMIsGuestPageSizeExtEnabled(pVCpu)) /** @todo ( (pVCpu)->pgm.s.fGst32BitPageSizeExtension ) */
+# define GST_IS_NX_ACTIVE(pVCpu)                (false)
+# define BTH_IS_NP_ACTIVE(pVM)                  (false)
 
 #elif   PGM_GST_TYPE == PGM_TYPE_PAE \
      || PGM_GST_TYPE == PGM_TYPE_AMD64
-# define GSTPT                      X86PTPAE
-# define PGSTPT                     PX86PTPAE
-# define GSTPTE                     X86PTEPAE
-# define PGSTPTE                    PX86PTEPAE
-# define GSTPD                      X86PDPAE
-# define PGSTPD                     PX86PDPAE
-# define GSTPDE                     X86PDEPAE
-# define PGSTPDE                    PX86PDEPAE
-# define GST_BIG_PAGE_SIZE          X86_PAGE_2M_SIZE
-# define GST_BIG_PAGE_OFFSET_MASK   X86_PAGE_2M_OFFSET_MASK
-# define GST_PDE_PG_MASK            X86_PDE_PAE_PG_MASK_FULL
-# define GST_PDE_BIG_PG_MASK        X86_PDE2M_PAE_PG_MASK
-# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst)  (PdeGst.u & GST_PDE_BIG_PG_MASK)
-# define GST_PD_SHIFT               X86_PD_PAE_SHIFT
-# define GST_PD_MASK                X86_PD_PAE_MASK
+# define GSTPT                                  X86PTPAE
+# define PGSTPT                                 PX86PTPAE
+# define GSTPTE                                 X86PTEPAE
+# define PGSTPTE                                PX86PTEPAE
+# define GSTPD                                  X86PDPAE
+# define PGSTPD                                 PX86PDPAE
+# define GSTPDE                                 X86PDEPAE
+# define PGSTPDE                                PX86PDEPAE
+# define GST_BIG_PAGE_SIZE                      X86_PAGE_2M_SIZE
+# define GST_BIG_PAGE_OFFSET_MASK               X86_PAGE_2M_OFFSET_MASK
+# define GST_PDE_PG_MASK                        X86_PDE_PAE_PG_MASK_FULL
+# define GST_PDE_BIG_PG_MASK                    X86_PDE2M_PAE_PG_MASK
+# define GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeGst) ((PdeGst).u & GST_PDE_BIG_PG_MASK)
+# define GST_PD_SHIFT                           X86_PD_PAE_SHIFT
+# define GST_PD_MASK                            X86_PD_PAE_MASK
 # if PGM_GST_TYPE == PGM_TYPE_PAE
-#  define GST_TOTAL_PD_ENTRIES      (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
-#  define GST_PDPE_ENTRIES          X86_PG_PAE_PDPE_ENTRIES
-#  define GST_PDPE_PG_MASK          X86_PDPE_PG_MASK_FULL
-#  define GST_PDPT_SHIFT            X86_PDPT_SHIFT
-#  define GST_PDPT_MASK             X86_PDPT_MASK_PAE
-#  define GST_PTE_PG_MASK           X86_PTE_PAE_PG_MASK
-#  define GST_CR3_PAGE_MASK         X86_CR3_PAE_PAGE_MASK
+#  define GSTPTWALK                             PGMPTWALKGSTPAE
+#  define PGSTPTWALK                            PPGMPTWALKGSTPAE
+#  define PCGSTPTWALK                           PCPGMPTWALKGSTPAE
+#  define GST_TOTAL_PD_ENTRIES                  (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
+#  define GST_PDPE_ENTRIES                      X86_PG_PAE_PDPE_ENTRIES
+#  define GST_PDPE_PG_MASK                      X86_PDPE_PG_MASK_FULL
+#  define GST_PDPT_SHIFT                        X86_PDPT_SHIFT
+#  define GST_PDPT_MASK                         X86_PDPT_MASK_PAE
+#  define GST_PTE_PG_MASK                       X86_PTE_PAE_PG_MASK
+#  define GST_CR3_PAGE_MASK                     X86_CR3_PAE_PAGE_MASK
+#  define GST_IS_PTE_VALID(pVCpu, Pte)          (!( (Pte).u   & (pVCpu)->pgm.s.fGstPaeMbzPteMask ))
+#  define GST_IS_PDE_VALID(pVCpu, Pde)          (!( (Pde).u   & (pVCpu)->pgm.s.fGstPaeMbzPdeMask ))
+#  define GST_IS_BIG_PDE_VALID(pVCpu, Pde)      (!( (Pde).u   & (pVCpu)->pgm.s.fGstPaeMbzBigPdeMask ))
+#  define GST_IS_PDPE_VALID(pVCpu, Pdpe)        (!( (Pdpe).u  & (pVCpu)->pgm.s.fGstPaeMbzPdpeMask ))
+//# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe)    (false)
+//# define GST_IS_PML4E_VALID(pVCpu, Pml4e)      (false)
 # else
-#  define GST_TOTAL_PD_ENTRIES      (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
-#  define GST_PDPE_ENTRIES          X86_PG_AMD64_PDPE_ENTRIES
-#  define GST_PDPT_SHIFT            X86_PDPT_SHIFT
-#  define GST_PDPE_PG_MASK          X86_PDPE_PG_MASK_FULL
-#  define GST_PDPT_MASK             X86_PDPT_MASK_AMD64
-#  define GST_PTE_PG_MASK           X86_PTE_PAE_PG_MASK_FULL
-#  define GST_CR3_PAGE_MASK         X86_CR3_AMD64_PAGE_MASK
+#  define GSTPTWALK                             PGMPTWALKGSTAMD64
+#  define PGSTPTWALK                            PPGMPTWALKGSTAMD64
+#  define PCGSTPTWALK                           PCPGMPTWALKGSTAMD64
+#  define GST_TOTAL_PD_ENTRIES                  (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
+#  define GST_PDPE_ENTRIES                      X86_PG_AMD64_PDPE_ENTRIES
+#  define GST_PDPT_SHIFT                        X86_PDPT_SHIFT
+#  define GST_PDPE_PG_MASK                      X86_PDPE_PG_MASK_FULL
+#  define GST_PDPT_MASK                         X86_PDPT_MASK_AMD64
+#  define GST_PTE_PG_MASK                       X86_PTE_PAE_PG_MASK_FULL
+#  define GST_CR3_PAGE_MASK                     X86_CR3_AMD64_PAGE_MASK
+#  define GST_IS_PTE_VALID(pVCpu, Pte)          (!( (Pte).u   & (pVCpu)->pgm.s.fGstAmd64MbzPteMask ))
+#  define GST_IS_PDE_VALID(pVCpu, Pde)          (!( (Pde).u   & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask ))
+#  define GST_IS_BIG_PDE_VALID(pVCpu, Pde)      (!( (Pde).u   & (pVCpu)->pgm.s.fGstAmd64MbzBigPdeMask ))
+#  define GST_IS_PDPE_VALID(pVCpu, Pdpe)        (!( (Pdpe).u  & (pVCpu)->pgm.s.fGstAmd64MbzPdpeMask ))
+#  define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe)    (!( (Pdpe).u  & (pVCpu)->pgm.s.fGstAmd64MbzBigPdpeMask ))
+#  define GST_IS_PML4E_VALID(pVCpu, Pml4e)      (!( (Pml4e).u & (pVCpu)->pgm.s.fGstAmd64MbzPml4eMask ))
 # endif
-# define GST_PT_SHIFT               X86_PT_PAE_SHIFT
-# define GST_PT_MASK                X86_PT_PAE_MASK
+# define GST_PT_SHIFT                           X86_PT_PAE_SHIFT
+# define GST_PT_MASK                            X86_PT_PAE_MASK
+# define GST_IS_PSE_ACTIVE(pVCpu)               (true)
+# define GST_IS_NX_ACTIVE(pVCpu)                (CPUMIsGuestNXEnabled(pVCpu)) /** @todo shadow this variable */
+# define BTH_IS_NP_ACTIVE(pVM)                  (false)
 #endif
 
Index: /trunk/src/VBox/VMM/PGMInline.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInline.h	(revision 30888)
+++ /trunk/src/VBox/VMM/PGMInline.h	(revision 30889)
@@ -572,185 +572,123 @@
 
 /**
- * Gets the page directory entry for the specified address (32-bit paging).
- *
- * @returns The page directory entry in question.
- * @param   pPGM        Pointer to the PGM instance data.
+ * Gets the address the guest page directory (32-bit paging).
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPd        Where to return the mapping. This is always set.
+ */
+DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
+{
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd);
+    if (RT_FAILURE(rc))
+    {
+        *ppPd = NULL;
+        return rc;
+    }
+#else
+    *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
+    if (RT_UNLIKELY(!*ppPd))
+        return pgmGstLazyMap32BitPD(pVCpu, ppPd);
+#endif
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the address the guest page directory (32-bit paging).
+ *
+ * @returns Pointer the page directory entry in question.
+ * @param   pVCpu       The current CPU.
+ */
+DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
+{
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    PX86PD pGuestPD = NULL;
+    int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD);
+    if (RT_FAILURE(rc))
+    {
+        AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+        return NULL;
+    }
+#else
+    PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
+    if (RT_UNLIKELY(!pGuestPD))
+    {
+        int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
+        if (RT_FAILURE(rc))
+            return NULL;
+    }
+#endif
+    return pGuestPD;
+}
+
+
+/**
+ * Gets the guest page directory pointer table.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPdpt      Where to return the mapping.  This is always set.
+ */
+DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
+{
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    int rc = pgmR0DynMapGCPageOffInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt);
+    if (RT_FAILURE(rc))
+    {
+        *ppPdpt = NULL;
+        return rc;
+    }
+#else
+    *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
+    if (RT_UNLIKELY(!*ppPdpt))
+        return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
+#endif
+    return VINF_SUCCESS;
+}
+
+/**
+ * Gets the guest page directory pointer table.
+ *
+ * @returns Pointer to the page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param   pVCpu       The current CPU.
+ */
+DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
+{
+    PX86PDPT pGuestPdpt;
+    int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
+    AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+    return pGuestPdpt;
+}
+
+
+/**
+ * Gets the guest page directory pointer table entry for the specified address.
+ *
+ * @returns Pointer to the page directory in question.
+ * @returns NULL if the page directory is not present or on an invalid page.
+ * @param   pVCpu       The current CPU
  * @param   GCPtr       The address.
  */
-DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
-{
+DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
+{
+    AssertGCPtr32(GCPtr);
+
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PCX86PD pGuestPD = NULL;
-    int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
-    if (RT_FAILURE(rc))
-    {
-        X86PDE ZeroPde = {0};
-        AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
-    }
-#else
-    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
-    if (RT_UNLIKELY(!pGuestPD))
-        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
-#endif
-    return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
-}
-
-
-/**
- * Gets the address of a specific page directory entry (32-bit paging).
- *
- * @returns Pointer the page directory entry in question.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
-{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PD  pGuestPD = NULL;
-    int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
+    PX86PDPT pGuestPDPT = NULL;
+    int rc = pgmR0DynMapGCPageOffInlined(&pVCpu->CTX_SUCC(pVM)->pgm.s, pPGM->GCPhysCR3, (void **)&pGuestPDPT);
     AssertRCReturn(rc, NULL);
 #else
-    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
-    if (RT_UNLIKELY(!pGuestPD))
-        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
-#endif
-    return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
-}
-
-
-/**
- * Gets the address the guest page directory (32-bit paging).
- *
- * @returns Pointer the page directory entry in question.
- * @param   pPGM        Pointer to the PGM instance data.
- */
-DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
-{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PD  pGuestPD = NULL;
-    int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
-    AssertRCReturn(rc, NULL);
-#else
-    PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
-    if (RT_UNLIKELY(!pGuestPD))
-        pGuestPD = pgmGstLazyMap32BitPD(pPGM);
-#endif
-    return pGuestPD;
-}
-
-
-/**
- * Gets the guest page directory pointer table.
- *
- * @returns Pointer to the page directory in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- */
-DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
-{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PDPT pGuestPDPT = NULL;
-    int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
-    AssertRCReturn(rc, NULL);
-#else
-    PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
+    PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
     if (RT_UNLIKELY(!pGuestPDPT))
-        pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
-#endif
-    return pGuestPDPT;
-}
-
-
-/**
- * Gets the guest page directory pointer table entry for the specified address.
- *
- * @returns Pointer to the page directory in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
-{
-    AssertGCPtr32(GCPtr);
-
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PDPT pGuestPDPT = 0;
-    int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
-    AssertRCReturn(rc, 0);
-#else
-    PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
-    if (RT_UNLIKELY(!pGuestPDPT))
-        pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
+    {
+        int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
+        if (RT_FAILURE(rc))
+            return NULL;
+    }
 #endif
     return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
-}
-
-
-/**
- * Gets the page directory for the specified address.
- *
- * @returns Pointer to the page directory in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
-{
-    AssertGCPtr32(GCPtr);
-
-    PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
-    AssertReturn(pGuestPDPT, NULL);
-    const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
-    if (pGuestPDPT->a[iPdpt].n.u1Present)
-    {
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-        PX86PDPAE   pGuestPD = NULL;
-        int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
-        AssertRCReturn(rc, NULL);
-#else
-        PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
-        if (    !pGuestPD
-            ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
-            pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
-#endif
-        return pGuestPD;
-        /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
-    }
-    return NULL;
-}
-
-
-/**
- * Gets the page directory entry for the specified address.
- *
- * @returns Pointer to the page directory entry in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
-{
-    AssertGCPtr32(GCPtr);
-
-    PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
-    AssertReturn(pGuestPDPT, NULL);
-    const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
-    if (pGuestPDPT->a[iPdpt].n.u1Present)
-    {
-        const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-        PX86PDPAE   pGuestPD = NULL;
-        int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
-        AssertRCReturn(rc, NULL);
-#else
-        PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
-        if (    !pGuestPD
-            ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
-            pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
-#endif
-        return &pGuestPD->a[iPD];
-        /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
-    }
-    return NULL;
 }
 
@@ -761,31 +699,38 @@
  * @returns The page directory entry in question.
  * @returns A non-present entry if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
+ * @param   pVCpu       The handle of the virtual CPU.
  * @param   GCPtr       The address.
  */
-DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
+DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
 {
     AssertGCPtr32(GCPtr);
-    X86PDEPAE   ZeroPde = {0};
-    PX86PDPT    pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
+    PX86PDPT    pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
     if (RT_LIKELY(pGuestPDPT))
     {
         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
-        if (pGuestPDPT->a[iPdpt].n.u1Present)
+        if (    pGuestPDPT->a[iPdpt].n.u1Present
+            &&  !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
         {
             const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
             PX86PDPAE   pGuestPD = NULL;
-            int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
-            AssertRCReturn(rc, ZeroPde);
+            int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s,
+                                              pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
+                                              (void **)&pGuestPD);
+            if (RT_SUCCESS(rc))
+                return pGuestPD->a[iPD];
+            AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
 #else
-            PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
+            PX86PDPAE   pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
             if (    !pGuestPD
-                ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
-                pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
-#endif
-            return pGuestPD->a[iPD];
+                ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
+                pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
+            if (pGuestPD)
+                return pGuestPD->a[iPD];
+#endif
         }
     }
+
+    X86PDEPAE ZeroPde = {0};
     return ZeroPde;
 }
@@ -798,36 +743,45 @@
  * @returns Pointer to the page directory in question.
  * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
+ * @param   pVCpu       The current CPU.
  * @param   GCPtr       The address.
  * @param   piPD        Receives the index into the returned page directory
  * @param   pPdpe       Receives the page directory pointer entry. Optional.
  */
-DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
+DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
 {
     AssertGCPtr32(GCPtr);
 
-    PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
-    AssertReturn(pGuestPDPT, NULL);
+    /* The PDPE. */
+    PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
+    if (RT_UNLIKELY(!pGuestPDPT))
+        return NULL;
     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
     if (pPdpe)
         *pPdpe = pGuestPDPT->a[iPdpt];
-    if (pGuestPDPT->a[iPdpt].n.u1Present)
-    {
-        const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+    if (!pGuestPDPT->a[iPdpt].n.u1Present)
+        return NULL;
+    if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
+        return NULL;
+
+    /* The PDE. */
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-        PX86PDPAE   pGuestPD = NULL;
-        int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
-        AssertRCReturn(rc, NULL);
+    PX86PDPAE   pGuestPD = NULL;
+    int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s,
+                                      pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
+                                      (void **)&pGuestPD);
+    if (RT_FAILURE(rc))
+    {
+        AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+        return NULL;
+    }
 #else
-        PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
-        if (    !pGuestPD
-            ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
-            pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
-#endif
-        *piPD = iPD;
-        return pGuestPD;
-        /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
-    }
-    return NULL;
+    PX86PDPAE   pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
+    if (    !pGuestPD
+        ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
+        pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
+#endif
+
+    *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+    return pGuestPD;
 }
 
@@ -837,8 +791,50 @@
  * Gets the page map level-4 pointer for the guest.
  *
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPml4      Where to return the mapping.  Always set.
+ */
+DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
+{
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+    int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pPGM->GCPhysCR3, (void **)ppPml4);
+    if (RT_FAILURE(rc))
+    {
+        *ppPml4 = NULL;
+        return rc;
+    }
+#else
+    *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
+    if (RT_UNLIKELY(!*ppPml4))
+        return pgmGstLazyMapPml4(pVCpu, ppPml4);
+#endif
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the page map level-4 pointer for the guest.
+ *
  * @returns Pointer to the PML4 page.
- * @param   pPGM        Pointer to the PGM instance data.
- */
-DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
+ * @param   pVCpu       The current CPU.
+ */
+DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
+{
+    PX86PML4 pGuestPml4;
+    int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
+    AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
+    return pGuestPml4;
+}
+
+
+/**
+ * Gets the pointer to a page map level-4 entry.
+ *
+ * @returns Pointer to the PML4 entry.
+ * @param   pVCpu       The current CPU.
+ * @param   iPml4       The index.
+ * @remarks Only used by AssertCR3.
+ */
+DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
 {
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
@@ -847,87 +843,12 @@
     AssertRCReturn(rc, NULL);
 #else
-    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
+    PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
     if (RT_UNLIKELY(!pGuestPml4))
-        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
-    Assert(pGuestPml4);
-#endif
-    return pGuestPml4;
-}
-
-
-/**
- * Gets the pointer to a page map level-4 entry.
- *
- * @returns Pointer to the PML4 entry.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   iPml4       The index.
- */
-DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
-{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PML4 pGuestPml4;
-    int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
-    AssertRCReturn(rc, NULL);
-#else
-    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
-    if (RT_UNLIKELY(!pGuestPml4))
-        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
-    Assert(pGuestPml4);
+    {
+         int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
+         AssertRCReturn(rc, NULL);
+    }
 #endif
     return &pGuestPml4->a[iPml4];
-}
-
-
-/**
- * Gets a page map level-4 entry.
- *
- * @returns The PML4 entry.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   iPml4       The index.
- */
-DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
-{
-#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
-    PX86PML4 pGuestPml4;
-    int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
-    if (RT_FAILURE(rc))
-    {
-        X86PML4E ZeroPml4e = {0};
-        AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
-    }
-#else
-    PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
-    if (!pGuestPml4)
-        pGuestPml4 = pgmGstLazyMapPml4(pPGM);
-    Assert(pGuestPml4);
-#endif
-    return pGuestPml4->a[iPml4];
-}
-
-
-/**
- * Gets the page directory pointer entry for the specified address.
- *
- * @returns Pointer to the page directory pointer entry in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- * @param   ppPml4e     Page Map Level-4 Entry (out)
- */
-DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
-{
-    PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
-    if (pPml4e->n.u1Present)
-    {
-        PX86PDPT pPdpt;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
-        AssertRCReturn(rc, NULL);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        return &pPdpt->a[iPdpt];
-    }
-    return NULL;
 }
 
@@ -938,102 +859,42 @@
  * @returns The page directory entry in question.
  * @returns A non-present entry if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
+ * @param   pVCpu       The current CPU.
  * @param   GCPtr       The address.
- * @param   ppPml4e     Page Map Level-4 Entry (out)
- * @param   pPdpe       Page directory pointer table entry (out)
- */
-DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
-{
-    X86PDEPAE       ZeroPde = {0};
-    PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
-    if (pPml4e->n.u1Present)
+ */
+DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
+{
+    /*
+     * Note! To keep things simple, ASSUME invalid physical addresses will
+     *       cause X86_TRAP_PF_RSVD.  This isn't a problem until we start
+     *       supporing 52-bit wide physical guest addresses.
+     */
+    PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
+    const unsigned  iPml4      = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
+    if (    RT_LIKELY(pGuestPml4)
+        &&  pGuestPml4->a[iPml4].n.u1Present
+        &&  !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
     {
         PCX86PDPT   pPdptTemp;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
-        AssertRCReturn(rc, ZeroPde);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        *pPdpe = pPdptTemp->a[iPdpt];
-        if (pPdptTemp->a[iPdpt].n.u1Present)
+        int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
+        if (RT_SUCCESS(rc))
         {
-            PCX86PDPAE pPD;
-            rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
-            AssertRCReturn(rc, ZeroPde);
-
-            const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-            return pPD->a[iPD];
+            const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
+            if (    pPdptTemp->a[iPdpt].n.u1Present
+                &&  !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
+            {
+                PCX86PDPAE pPD;
+                rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
+                if (RT_SUCCESS(rc))
+                {
+                    const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+                    return pPD->a[iPD];
+                }
+            }
         }
-    }
-
+        AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+    }
+
+    X86PDEPAE ZeroPde = {0};
     return ZeroPde;
-}
-
-
-/**
- * Gets the page directory entry for the specified address.
- *
- * @returns The page directory entry in question.
- * @returns A non-present entry if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
-{
-    X86PDEPAE       ZeroPde = {0};
-    PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    if (pGuestPml4->a[iPml4].n.u1Present)
-    {
-        PCX86PDPT   pPdptTemp;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
-        AssertRCReturn(rc, ZeroPde);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        if (pPdptTemp->a[iPdpt].n.u1Present)
-        {
-            PCX86PDPAE pPD;
-            rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
-            AssertRCReturn(rc, ZeroPde);
-
-            const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-            return pPD->a[iPD];
-        }
-    }
-    return ZeroPde;
-}
-
-
-/**
- * Gets the page directory entry for the specified address.
- *
- * @returns Pointer to the page directory entry in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- */
-DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
-{
-    PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    if (pGuestPml4->a[iPml4].n.u1Present)
-    {
-        PCX86PDPT   pPdptTemp;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
-        AssertRCReturn(rc, NULL);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        if (pPdptTemp->a[iPdpt].n.u1Present)
-        {
-            PX86PDPAE pPD;
-            rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
-            AssertRCReturn(rc, NULL);
-
-            const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-            return &pPD->a[iPD];
-        }
-    }
-    return NULL;
 }
 
@@ -1044,5 +905,5 @@
  * @returns The page directory in question.
  * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
+ * @param   pVCpu       The current CPU.
  * @param   GCPtr       The address.
  * @param   ppPml4e     Page Map Level-4 Entry (out)
@@ -1050,28 +911,43 @@
  * @param   piPD        Receives the index into the returned page directory
  */
-DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
-{
-    PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
-    if (pPml4e->n.u1Present)
-    {
-        PCX86PDPT   pPdptTemp;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
-        AssertRCReturn(rc, NULL);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        *pPdpe = pPdptTemp->a[iPdpt];
-        if (pPdptTemp->a[iPdpt].n.u1Present)
-        {
-            PX86PDPAE pPD;
-            rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
-            AssertRCReturn(rc, NULL);
-
-            *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-            return pPD;
-        }
-    }
-    return 0;
+DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
+{
+    /* The PMLE4. */
+    PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
+    if (RT_UNLIKELY(!pGuestPml4))
+        return NULL;
+    const unsigned  iPml4      = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
+    PCX86PML4E      pPml4e     = *ppPml4e = &pGuestPml4->a[iPml4];
+    if (!pPml4e->n.u1Present)
+        return NULL;
+    if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
+        return NULL;
+
+    /* The PDPE. */
+    PCX86PDPT       pPdptTemp;
+    int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
+    if (RT_FAILURE(rc))
+    {
+        AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+        return NULL;
+    }
+    const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
+    *pPdpe = pPdptTemp->a[iPdpt];
+    if (!pPdpe->n.u1Present)
+        return NULL;
+    if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
+        return NULL;
+
+    /* The PDE. */
+    PX86PDPAE pPD;
+    rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
+    if (RT_FAILURE(rc))
+    {
+        AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+        return NULL;
+    }
+
+    *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
+    return pPD;
 }
 
@@ -1271,38 +1147,4 @@
         return NULL;
     return &pShwPml4->a[iPml4];
-}
-
-
-/**
- * Gets the GUEST page directory pointer for the specified address.
- *
- * @returns The page directory in question.
- * @returns NULL if the page directory is not present or on an invalid page.
- * @param   pPGM        Pointer to the PGM instance data.
- * @param   GCPtr       The address.
- * @param   piPD        Receives the index into the returned page directory
- */
-DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
-{
-    PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
-    const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
-    if (pGuestPml4->a[iPml4].n.u1Present)
-    {
-        PCX86PDPT   pPdptTemp;
-        int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
-        AssertRCReturn(rc, NULL);
-
-        const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
-        if (pPdptTemp->a[iPdpt].n.u1Present)
-        {
-            PX86PDPAE pPD;
-            rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
-            AssertRCReturn(rc, NULL);
-
-            *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
-            return pPD;
-        }
-    }
-    return NULL;
 }
 
Index: /trunk/src/VBox/VMM/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/PGMInternal.h	(revision 30888)
+++ /trunk/src/VBox/VMM/PGMInternal.h	(revision 30889)
@@ -91,4 +91,12 @@
 
 /**
+ * Enables optimizations for MMIO handlers that exploits X86_TRAP_PF_RSVD and
+ * VMX_EXIT_EPT_MISCONFIG.
+ */
+#if 0 /* ! remember to disable before committing ! XXX TODO  */
+# define PGM_WITH_MMIO_OPTIMIZATIONS
+#endif
+
+/**
  * Chunk unmapping code activated on 32-bit hosts for > 1.5/2 GB guest memory support
  */
@@ -266,4 +274,24 @@
 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
+#endif
+
+/** @def PGM_GCPHYS_2_PTR_BY_VMCPU
+ * Maps a GC physical page address to a virtual address.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu   The current CPU.
+ * @param   GCPhys  The GC physical address to map to a virtual one.
+ * @param   ppv     Where to store the virtual address. No need to cast this.
+ *
+ * @remark  In RC this uses PGMGCDynMapGCPage(), so it will consume of the
+ *          small page window employeed by that function. Be careful.
+ * @remark  There is no need to assert on the result.
+ */
+#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+# define PGM_RCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
+     pgmR0DynMapGCPageInlined(&(pVCpu)->CTX_SUFF(pVM)->pgm.s, GCPhys, (void **)(ppv))
+#else
+# define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
+     PGM_GCPHYS_2_PTR((pVCpu)->CTX_SUFF(pVM), GCPhys, ppv)
 #endif
 
@@ -1764,5 +1792,5 @@
     /** Shw: PAE page directory;    Gst: PAE page directory. */
     PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
-    /** Shw: PAE page directory;    Gst: no paging. */
+    /** Shw: PAE page directory;    Gst: no paging.             Note: +NP. */
     PGMPOOLKIND_PAE_PD_PHYS,
 
@@ -1881,5 +1909,6 @@
      * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
     bool volatile       fReusedFlushPending;
-    /** Used to mark the page as dirty (write monitoring if temporarily off. */
+    /** Used to mark the page as dirty (write monitoring is temporarily
+     *  off). */
     bool                fDirty;
 
@@ -2269,6 +2298,123 @@
 
 
+/**
+ * Page fault guest state for the AMD64 paging mode.
+ */
+typedef struct PGMPTWALKCORE
+{
+    /** The guest virtual address that is being resolved by the walk
+     *  (input). */
+    RTGCPTR         GCPtr;
+
+    /** The guest physcial address that is the result of the walk.
+     * @remarks only valid if fSucceeded is set.  */
+    RTGCPHYS        GCPhys;
+
+    /** Set if the walk succeeded, i.d. GCPhys is valid. */
+    bool            fSucceeded;
+    /** The level problem arrised at.
+     * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
+     * level 8.  This is 0 on success. */
+    uint8_t         uLevel;
+    /** Set if the page isn't present. */
+    bool            fNotPresent;
+    /** Encountered a bad physical address. */
+    bool            fBadPhysAddr;
+    /** Set if there was reserved bit violations. */
+    bool            fRsvdError;
+    /** Set if it involves a big page (2/4 MB). */
+    bool            fBigPage;
+    /** Set if it involves a gigantic page (1 GB). */
+    bool            fGigantPage;
+#if 0
+    /** Set if write access was attempted and not possible. */
+    bool            fWriteError;
+    /** Set if execute access was attempted and not possible. */
+    bool            fExecuteError;
+#endif
+    /** Unused. */
+    bool            afUnused[3];
+} PGMPTWALKCORE;
+
+
+/**
+ * Guest page table walk for the AMD64 mode.
+ */
+typedef struct PGMPTWALKGSTAMD64
+{
+    /** The common core. */
+    PGMPTWALKCORE   Core;
+
+    PX86PML4        pPml4;
+    PX86PML4E       pPml4e;
+    X86PML4E        Pml4e;
+
+    PX86PDPT        pPdpt;
+    PX86PDPE        pPdpe;
+    X86PDPE         Pdpe;
+
+    PX86PDPAE       pPd;
+    PX86PDEPAE      pPde;
+    X86PDEPAE       Pde;
+
+    PX86PTPAE       pPt;
+    PX86PTEPAE      pPte;
+    X86PTEPAE       Pte;
+} PGMPTWALKGSTAMD64;
+/** Pointer to a AMD64 guest page table walk. */
+typedef PGMPTWALKGSTAMD64 *PPGMPTWALKGSTAMD64;
+/** Pointer to a const AMD64 guest page table walk. */
+typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64;
+
+/**
+ * Guest page table walk for the PAE mode.
+ */
+typedef struct PGMPTWALKGSTPAE
+{
+    /** The common core. */
+    PGMPTWALKCORE   Core;
+
+    PX86PDPT        pPdpt;
+    PX86PDPE        pPdpe;
+    X86PDPE         Pdpe;
+
+    PX86PDPAE       pPd;
+    PX86PDEPAE      pPde;
+    X86PDEPAE       Pde;
+
+    PX86PTPAE       pPt;
+    PX86PTEPAE      pPte;
+    X86PTEPAE       Pte;
+} PGMPTWALKGSTPAE;
+/** Pointer to a PAE guest page table walk. */
+typedef PGMPTWALKGSTPAE *PPGMPTWALKGSTPAE;
+/** Pointer to a const AMD64 guest page table walk. */
+typedef PGMPTWALKGSTPAE const *PCPGMPTWALKGSTPAE;
+
+/**
+ * Guest page table walk for the 32-bit mode.
+ */
+typedef struct PGMPTWALKGST32BIT
+{
+    /** The common core. */
+    PGMPTWALKCORE   Core;
+
+    PX86PD          pPd;
+    PX86PDE         pPde;
+    X86PDE          Pde;
+
+    PX86PT          pPt;
+    PX86PTE         pPte;
+    X86PTE          Pte;
+} PGMPTWALKGST32BIT;
+/** Pointer to a 32-bit guest page table walk. */
+typedef PGMPTWALKGST32BIT *PPGMPTWALKGST32BIT;
+/** Pointer to a const 32-bit guest page table walk. */
+typedef PGMPTWALKGST32BIT const *PCPGMPTWALKGST32BIT;
+
+
 /** @name Paging mode macros
- * @{ */
+ * @{
+ */
 #ifdef IN_RC
 # define PGM_CTX(a,b)                   a##RC##b
@@ -2497,6 +2643,9 @@
      * detection. */
     bool                            fPhysWriteMonitoringEngaged;
+    /** Set if the CPU has less than 52-bit physical address width.
+     * This is used  */
+    bool                            fLessThan52PhysicalAddressBits;
     /** Alignment padding. */
-    bool                            afAlignment0[2];
+    bool                            afAlignment0[1];
 
     /*
@@ -2519,4 +2668,7 @@
     /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
     RTGCPHYS                        GCPhys4MBPSEMask;
+    /** Mask containing the invalid bits of a guest physical address.
+     * @remarks this does not stop at bit 52.  */
+    RTGCPHYS                        GCPhysInvAddrMask;
 
     /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
@@ -2706,4 +2858,18 @@
     RTGCPTR                         pvZeroPgRC;
     /** @}*/
+
+    /** @name   The Invalid MMIO page.
+     * This page is filled with 0xfeedface.
+     * @{ */
+    /** The host physical address of the invalid MMIO page. */
+    RTHCPHYS                        HCPhysMmioPg;
+    /** The host pysical address of the invalid MMIO page pluss all invalid
+     * physical address bits set.  This is used to trigger X86_TRAP_PF_RSVD.
+     * @remarks Check fLessThan52PhysicalAddressBits before use. */
+    RTHCPHYS                        HCPhysInvMmioPg;
+    /** The ring-3 mapping of the invalid MMIO page. */
+    RTR3PTR                         pvMmioPgR3;
+    /** @} */
+
 
     /** The number of handy pages. */
@@ -2920,5 +3086,5 @@
  * @param   pPGM   Pointer to PGMCPU instance data.
  */
-#define PGMCPU2VM(pPGM)  ( (PVM)((char*)pPGM - pPGM->offVM) )
+#define PGMCPU2VM(pPGM)         ( (PVM)((char*)(pPGM) - (pPGM)->offVM) )
 
 /**
@@ -2927,5 +3093,5 @@
  * @param   pPGM   Pointer to PGMCPU instance data.
  */
-#define PGMCPU2PGM(pPGMCpu)  ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
+#define PGMCPU2PGM(pPGMCpu)     ( (PPGM)((char *)(pPGMCpu) - (pPGMCpu)->offPGM) )
 
 /**
@@ -2979,4 +3145,10 @@
     /** The guest's page directory, static RC mapping. */
     RCPTRTYPE(PX86PD)               pGst32BitPdRC;
+    /** Mask containing the MBZ bits of a big page PDE. */
+    uint32_t                        fGst32BitMbzBigPdeMask;
+    /** Set if the page size extension (PSE) is enabled. */
+    bool                            fGst32BitPageSizeExtension;
+    /** Alignment padding. */
+    bool                            afAlignment4[3];
     /** @} */
 
@@ -3009,4 +3181,12 @@
     /** The physical addresses of the monitored guest page directories (PAE). */
     RTGCPHYS                        aGCPhysGstPaePDsMonitored[4];
+    /** Mask containing the MBZ PTE bits. */
+    uint64_t                        fGstPaeMbzPteMask;
+    /** Mask containing the MBZ PDE bits. */
+    uint64_t                        fGstPaeMbzPdeMask;
+    /** Mask containing the MBZ big page PDE bits. */
+    uint64_t                        fGstPaeMbzBigPdeMask;
+    /** Mask containing the MBZ PDPE bits. */
+    uint64_t                        fGstPaeMbzPdpeMask;
     /** @} */
 
@@ -3021,4 +3201,16 @@
     RTR0PTR                         alignment6b; /**< alignment equalizer. */
 #endif
+    /** Mask containing the MBZ PTE bits. */
+    uint64_t                        fGstAmd64MbzPteMask;
+    /** Mask containing the MBZ PDE bits. */
+    uint64_t                        fGstAmd64MbzPdeMask;
+    /** Mask containing the MBZ big page PDE bits. */
+    uint64_t                        fGstAmd64MbzBigPdeMask;
+    /** Mask containing the MBZ PDPE bits. */
+    uint64_t                        fGstAmd64MbzPdpeMask;
+    /** Mask containing the MBZ big page PDPE bits. */
+    uint64_t                        fGstAmd64MbzBigPdpeMask;
+    /** Mask containing the MBZ PML4E bits. */
+    uint64_t                        fGstAmd64MbzPml4eMask;
     /** @} */
 
@@ -3451,8 +3643,8 @@
 int             pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
 
-PX86PD          pgmGstLazyMap32BitPD(PPGMCPU pPGM);
-PX86PDPT        pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
-PX86PDPAE       pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
-PX86PML4        pgmGstLazyMapPml4(PPGMCPU pPGM);
+int             pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd);
+int             pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt);
+int             pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd);
+int             pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4);
 
 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
Index: /trunk/src/VBox/VMM/PGMMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGMMap.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/PGMMap.cpp	(revision 30889)
@@ -1280,5 +1280,5 @@
     {
         unsigned  iPDSrc;
-        PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
+        PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(pVCpu, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
 
         /*
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 30889)
@@ -1006,5 +1006,5 @@
             /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
                if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
-            if (   (uValue                      & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
+            if (   (uOldEFER                    & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
                 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
             {
@@ -1013,7 +1013,7 @@
 
                 /* Notify PGM about NXE changes. */
-                if (   (uValue        & MSR_K6_EFER_NXE)
+                if (   (uOldEFER                    & MSR_K6_EFER_NXE)
                     != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
-                    PGMNotifyNxeChanged(pVCpu, !!(uValue & MSR_K6_EFER_NXE));
+                    PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
             }
             break;
Index: /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp	(revision 30889)
@@ -1035,5 +1035,5 @@
  * @param   pvUser      Pointer to the MMIO ring-3 range entry.
  */
-int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
+static int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
 {
     /* Take the IOM lock before performing any MMIO. */
@@ -1251,4 +1251,10 @@
     LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
              GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
+if (!pvUser)
+{
+    int rc = iomLock(pVM);
+    pvUser = iomMMIOGetRange(&pVM->iom.s, GCPhysFault);
+    iomUnlock(pVM);
+}
     VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, pvUser);
     return VBOXSTRICTRC_VAL(rcStrict);
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 30889)
@@ -615,5 +615,5 @@
     const bool fWrite = !!(fAccess & X86_PTE_RW);
     const bool fUser  = !!(fAccess & X86_PTE_US);
-    if (  !(fPageGst & X86_PTE_P)
+    if (   !(fPageGst & X86_PTE_P)
         || (fWrite  && !(fPageGst & X86_PTE_RW))
         || (fUser   && !(fPageGst & X86_PTE_US)) )
@@ -633,7 +633,7 @@
         {
             /*
-            * Page is not present in our page tables.
-            * Try to sync it!
-            */
+             * Page is not present in our page tables.
+             * Try to sync it!
+             */
             Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
             uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
@@ -650,5 +650,5 @@
      * This check is a bit paranoid, but useful.
      */
-    /** @note this will assert when writing to monitored pages (a bit annoying actually) */
+    /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
     uint64_t fPageShw;
     rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
@@ -1371,84 +1371,115 @@
 
 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
+
 /**
  * Performs the lazy mapping of the 32-bit guest PD.
  *
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPd        Where to return the pointer to the mapping.  This is
+ *                      always set.
+ */
+int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
+{
+    PVM         pVM       = pVCpu->CTX_SUFF(pVM);
+    pgmLock(pVM);
+
+    Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
+
+    RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
+    PPGMPAGE    pPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
+    if (RT_SUCCESS(rc))
+    {
+        RTHCPTR HCPtrGuestCR3;
+        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        if (RT_SUCCESS(rc))
+        {
+            pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
+# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+            pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
+# endif
+            *ppPd = (PX86PD)HCPtrGuestCR3;
+
+            pgmUnlock(pVM);
+            return VINF_SUCCESS;
+        }
+
+        AssertRC(rc);
+    }
+    pgmUnlock(pVM);
+
+    *ppPd = NULL;
+    return rc;
+}
+
+
+/**
+ * Performs the lazy mapping of the PAE guest PDPT.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPdpt      Where to return the pointer to the mapping.  This is
+ *                      always set.
+ */
+int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
+{
+    Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
+    PVM     pVM = pVCpu->CTX_SUFF(pVM);
+    pgmLock(pVM);
+
+    RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
+    PPGMPAGE    pPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
+    if (RT_SUCCESS(rc))
+    {
+        RTHCPTR HCPtrGuestCR3;
+        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        if (RT_SUCCESS(rc))
+        {
+            pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
+# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+            pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
+# endif
+            *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
+
+            pgmUnlock(pVM);
+            return VINF_SUCCESS;
+        }
+
+        AssertRC(rc);
+    }
+
+    pgmUnlock(pVM);
+    *ppPdpt = NULL;
+    return rc;
+}
+
+
+/**
+ * Performs the lazy mapping / updating of a PAE guest PD.
+ *
  * @returns Pointer to the mapping.
- * @param   pPGM        The PGM instance data.
- */
-PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
-{
-    Assert(!pPGM->CTX_SUFF(pGst32BitPd));
-    PVM pVM = PGMCPU2VM(pPGM);
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   iPdpt       Which PD entry to map (0..3).
+ * @param   ppPd        Where to return the pointer to the mapping.  This is
+ *                      always set.
+ */
+int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
+{
+    PVM             pVM         = pVCpu->CTX_SUFF(pVM);
     pgmLock(pVM);
 
-    PPGMPAGE    pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
-    AssertReturn(pPage, NULL);
-
-    RTHCPTR     HCPtrGuestCR3;
-    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
-    AssertRCReturn(rc, NULL);
-
-    pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
-# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
-    pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
-# endif
-
-    pgmUnlock(pVM);
-    return pPGM->CTX_SUFF(pGst32BitPd);
-}
-
-
-/**
- * Performs the lazy mapping of the PAE guest PDPT.
- *
- * @returns Pointer to the mapping.
- * @param   pPGM        The PGM instance data.
- */
-PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
-{
-    Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
-    PVM pVM = PGMCPU2VM(pPGM);
-    pgmLock(pVM);
-
-    PPGMPAGE    pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
-    AssertReturn(pPage, NULL);
-
-    RTHCPTR     HCPtrGuestCR3;
-    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
-    AssertRCReturn(rc, NULL);
-
-    pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
-# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
-    pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
-# endif
-
-    pgmUnlock(pVM);
-    return pPGM->CTX_SUFF(pGstPaePdpt);
-}
-
-
-/**
- * Performs the lazy mapping / updating of a PAE guest PD.
- *
- * @returns Pointer to the mapping.
- * @param   pPGM        The PGM instance data.
- * @param   iPdpt       Which PD entry to map (0..3).
- */
-PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
-{
-    PVM             pVM         = PGMCPU2VM(pPGM);
-    pgmLock(pVM);
-
-    PX86PDPT        pGuestPDPT  = pPGM->CTX_SUFF(pGstPaePdpt);
+    PX86PDPT        pGuestPDPT  = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
     Assert(pGuestPDPT);
     Assert(pGuestPDPT->a[iPdpt].n.u1Present);
-    RTGCPHYS        GCPhys      = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
-    bool const      fChanged    = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
-
-    PPGMPAGE        pPage       = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
-    if (RT_LIKELY(pPage))
-    {
-        int         rc          = VINF_SUCCESS;
+    RTGCPHYS        GCPhys      = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK_FULL;
+    bool const      fChanged    = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
+
+    PPGMPAGE        pPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
+    if (RT_SUCCESS(rc))
+    {
         RTRCPTR     RCPtr       = NIL_RTRCPTR;
         RTHCPTR     HCPtr       = NIL_RTHCPTR;
@@ -1464,60 +1495,74 @@
         if (RT_SUCCESS(rc))
         {
-            pPGM->apGstPaePDsR3[iPdpt]          = (R3PTRTYPE(PX86PDPAE))HCPtr;
+            pVCpu->pgm.s.apGstPaePDsR3[iPdpt]          = (R3PTRTYPE(PX86PDPAE))HCPtr;
 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
-            pPGM->apGstPaePDsR0[iPdpt]          = (R0PTRTYPE(PX86PDPAE))HCPtr;
+            pVCpu->pgm.s.apGstPaePDsR0[iPdpt]          = (R0PTRTYPE(PX86PDPAE))HCPtr;
 # endif
             if (fChanged)
             {
-                pPGM->aGCPhysGstPaePDs[iPdpt]   = GCPhys;
-                pPGM->apGstPaePDsRC[iPdpt]      = (RCPTRTYPE(PX86PDPAE))RCPtr;
+                pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]   = GCPhys;
+                pVCpu->pgm.s.apGstPaePDsRC[iPdpt]      = (RCPTRTYPE(PX86PDPAE))RCPtr;
             }
 
+            *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
             pgmUnlock(pVM);
-            return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
+            return VINF_SUCCESS;
         }
     }
 
     /* Invalid page or some failure, invalidate the entry. */
-    pPGM->aGCPhysGstPaePDs[iPdpt]   = NIL_RTGCPHYS;
-    pPGM->apGstPaePDsR3[iPdpt]      = 0;
+    pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]   = NIL_RTGCPHYS;
+    pVCpu->pgm.s.apGstPaePDsR3[iPdpt]      = 0;
 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
-    pPGM->apGstPaePDsR0[iPdpt]      = 0;
+    pVCpu->pgm.s.apGstPaePDsR0[iPdpt]      = 0;
 # endif
-    pPGM->apGstPaePDsRC[iPdpt]      = 0;
+    pVCpu->pgm.s.apGstPaePDsRC[iPdpt]      = 0;
 
     pgmUnlock(pVM);
-    return NULL;
-}
+    return rc;
+}
+
 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
-
 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
+
 /**
  * Performs the lazy mapping of the 32-bit guest PD.
  *
- * @returns Pointer to the mapping.
- * @param   pPGM        The PGM instance data.
- */
-PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
-{
-    Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
-    PVM pVM = PGMCPU2VM(pPGM);
+ * @returns VBox status code.
+ * @param   pVCpu       The current CPU.
+ * @param   ppPml4      Where to return the pointer to the mapping.  This will
+ *                      always be set.
+ */
+int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
+{
+    Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
+    PVM         pVM = pVCpu->CTX_SUFF(pVM);
     pgmLock(pVM);
 
-    PPGMPAGE    pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
-    AssertReturn(pPage, NULL);
-
-    RTHCPTR     HCPtrGuestCR3;
-    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
-    AssertRCReturn(rc, NULL);
-
-    pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
-# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
-    pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
-# endif
+    RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
+    PPGMPAGE    pPage;
+    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
+    if (RT_SUCCESS(rc))
+    {
+        RTHCPTR HCPtrGuestCR3;
+        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
+        if (RT_SUCCESS(rc))
+        {
+            pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
+#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+            pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
+#  endif
+            *ppPml4 = (PX86PML4)HCPtrGuestCR3;
+
+            pgmUnlock(pVM);
+            return VINF_SUCCESS;
+        }
+    }
 
     pgmUnlock(pVM);
-    return pPGM->CTX_SUFF(pGstAmd64Pml4);
-}
+    *ppPml4 = NULL;
+    return rc;
+}
+
 #endif
 
@@ -1532,5 +1577,5 @@
 {
     Assert(iPdpt <= 3);
-    return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
+    return pgmGstGetPaePDPTPtr(pVCpu)->a[iPdpt & 3];
 }
 
@@ -1684,4 +1729,107 @@
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
+
+    /*
+     * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
+     */
+    int rc = VINF_SUCCESS;
+    RTGCPHYS GCPhysCR3;
+    switch (pVCpu->pgm.s.enmGuestMode)
+    {
+        case PGMMODE_PAE:
+        case PGMMODE_PAE_NX:
+            GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
+            break;
+        case PGMMODE_AMD64:
+        case PGMMODE_AMD64_NX:
+            GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
+            break;
+        default:
+            GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
+            break;
+    }
+
+    if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
+    {
+        RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
+        pVCpu->pgm.s.GCPhysCR3  = GCPhysCR3;
+        rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
+        if (RT_LIKELY(rc == VINF_SUCCESS))
+        {
+            if (pgmMapAreMappingsFloating(&pVM->pgm.s))
+                pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
+        }
+        else
+        {
+            AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
+            Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
+            pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
+            pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
+            if (pgmMapAreMappingsFloating(&pVM->pgm.s))
+                pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
+        }
+
+        if (fGlobal)
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
+        else
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
+    }
+    else
+    {
+# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
+        PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+        if (pPool->cDirtyPages)
+        {
+            pgmLock(pVM);
+            pgmPoolResetDirtyPages(pVM);
+            pgmUnlock(pVM);
+        }
+# endif
+        /*
+         * Check if we have a pending update of the CR3 monitoring.
+         */
+        if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
+        {
+            pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
+            Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
+        }
+        if (fGlobal)
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
+        else
+            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
+    }
+
+    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
+    return rc;
+}
+
+
+/**
+ * Performs and schedules necessary updates following a CR3 load or reload when
+ * using nested or extended paging.
+ *
+ * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
+ * TLB and triggering a SyncCR3.
+ *
+ * This will normally involve mapping the guest PD or nPDPT
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS.
+ * @retval  (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
+ *          requires a CR3 sync. This can safely be ignored and overridden since
+ *          the FF will be set too then.)
+ * @param   pVCpu       VMCPU handle.
+ * @param   cr3         The new cr3.
+ */
+VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+
+    LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
+
+    /* We assume we're only called in nested paging mode. */
+    Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
+    Assert(pVM->pgm.s.fMappingsDisabled);
+    Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
 
     /*
@@ -1704,107 +1852,4 @@
         break;
     }
-
-    if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
-    {
-        RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
-        pVCpu->pgm.s.GCPhysCR3  = GCPhysCR3;
-        rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
-        if (RT_LIKELY(rc == VINF_SUCCESS))
-        {
-            if (pgmMapAreMappingsFloating(&pVM->pgm.s))
-                pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
-        }
-        else
-        {
-            AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
-            Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
-            pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
-            pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
-            if (pgmMapAreMappingsFloating(&pVM->pgm.s))
-                pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
-        }
-
-        if (fGlobal)
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
-        else
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
-    }
-    else
-    {
-# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
-        PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
-        if (pPool->cDirtyPages)
-        {
-            pgmLock(pVM);
-            pgmPoolResetDirtyPages(pVM);
-            pgmUnlock(pVM);
-        }
-# endif
-        /*
-         * Check if we have a pending update of the CR3 monitoring.
-         */
-        if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
-        {
-            pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
-            Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
-        }
-        if (fGlobal)
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
-        else
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
-    }
-
-    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
-    return rc;
-}
-
-
-/**
- * Performs and schedules necessary updates following a CR3 load or reload when
- * using nested or extended paging.
- *
- * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
- * TLB and triggering a SyncCR3.
- *
- * This will normally involve mapping the guest PD or nPDPT
- *
- * @returns VBox status code.
- * @retval  VINF_SUCCESS.
- * @retval  (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
- *          requires a CR3 sync. This can safely be ignored and overridden since
- *          the FF will be set too then.)
- * @param   pVCpu       VMCPU handle.
- * @param   cr3         The new cr3.
- */
-VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
-{
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-
-    LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
-
-    /* We assume we're only called in nested paging mode. */
-    Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
-    Assert(pVM->pgm.s.fMappingsDisabled);
-    Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
-
-    /*
-     * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
-     */
-    int rc = VINF_SUCCESS;
-    RTGCPHYS GCPhysCR3;
-    switch (pVCpu->pgm.s.enmGuestMode)
-    {
-    case PGMMODE_PAE:
-    case PGMMODE_PAE_NX:
-        GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
-        break;
-    case PGMMODE_AMD64:
-    case PGMMODE_AMD64_NX:
-        GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
-        break;
-    default:
-        GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
-        break;
-    }
     if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
     {
@@ -2113,5 +2158,33 @@
 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
 {
-    /* later */
+    Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
+    if (fNxe)
+    {
+        /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
+        pVCpu->pgm.s.fGstPaeMbzPteMask       &= ~X86_PTE_PAE_NX;
+        pVCpu->pgm.s.fGstPaeMbzPdeMask       &= ~X86_PDE_PAE_NX;
+        pVCpu->pgm.s.fGstPaeMbzBigPdeMask    &= ~X86_PDE2M_PAE_NX;
+        /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
+        pVCpu->pgm.s.fGstAmd64MbzPteMask     &= ~X86_PTE_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdeMask     &= ~X86_PDE_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdeMask  &= ~X86_PDE2M_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdpeMask    &= ~X86_PDPE_LM_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPml4eMask   &= ~X86_PML4E_NX;
+    }
+    else
+    {
+        /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
+        pVCpu->pgm.s.fGstPaeMbzPteMask       |= X86_PTE_PAE_NX;
+        pVCpu->pgm.s.fGstPaeMbzPdeMask       |= X86_PDE_PAE_NX;
+        pVCpu->pgm.s.fGstPaeMbzBigPdeMask    |= X86_PDE2M_PAE_NX;
+        /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
+        pVCpu->pgm.s.fGstAmd64MbzPteMask     |= X86_PTE_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdeMask     |= X86_PDE_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdeMask  |= X86_PDE2M_PAE_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPdpeMask    |= X86_PDPE_LM_NX;
+        pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
+        pVCpu->pgm.s.fGstAmd64MbzPml4eMask   |= X86_PML4E_NX;
+    }
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 30889)
@@ -3,9 +3,18 @@
  * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
  *
- * This file is a big challenge!
+ * @remarks The nested page tables on AMD makes use of PGM_SHW_TYPE in
+ *          {PGM_TYPE_AMD64, PGM_TYPE_PAE and PGM_TYPE_32BIT} and PGM_GST_TYPE
+ *          set to PGM_TYPE_PROT.  Half of the code in this file is not
+ *          exercised with PGM_SHW_TYPE set to PGM_TYPE_NESTED.
+ *
+ * @remarks Extended page tables (intel) are built with PGM_GST_TYPE set to
+ *          PGM_TYPE_PROT (and PGM_SHW_TYPE set to PGM_TYPE_EPT).
+ *
+ * @remarks This file is one big \#ifdef-orgy!
+ *
  */
 
 /*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -17,4 +26,5 @@
  * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
  */
+
 
 /*******************************************************************************
@@ -40,5 +50,8 @@
 
 
-/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
+/*
+ * Filter out some illegal combinations of guest and shadow paging, so we can
+ * remove redundant checks inside functions.
+ */
 #if      PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
 # error "Invalid combination; PAE guest implies PAE shadow"
@@ -83,5 +96,6 @@
 # endif
 
-# if  (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
+# if  (   PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
+       || PGM_GST_TYPE == PGM_TYPE_PAE   || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     && PGM_SHW_TYPE != PGM_TYPE_NESTED    \
     && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
@@ -106,5 +120,5 @@
 #   if PGM_GST_TYPE == PGM_TYPE_32BIT
     const unsigned  iPDSrc = pvFault >> GST_PD_SHIFT;
-    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
 
 #   elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
@@ -113,13 +127,13 @@
     unsigned        iPDSrc = 0;                 /* initialized to shut up gcc */
     X86PDPE         PdpeSrc;
-    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
+    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(pVCpu, pvFault, &iPDSrc, &PdpeSrc);
 
 #    elif PGM_GST_TYPE == PGM_TYPE_AMD64
     unsigned        iPDSrc = 0;                 /* initialized to shut up gcc */
-    PX86PML4E       pPml4eSrc;
+    PX86PML4E       pPml4eSrc = NULL;           /* ditto */
     X86PDPE         PdpeSrc;
     PGSTPD          pPDSrc;
 
-    pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
+    pPDSrc = pgmGstGetLongModePDPtr(pVCpu, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
     Assert(pPml4eSrc);
 #    endif
@@ -158,5 +172,10 @@
 # endif
 
-    /* First check for a genuine guest page fault. */
+    /*
+     * First check for a genuine guest page fault.
+     */
+    /** @todo This duplicates the page table walk we're doing below. Need to
+     *        find some way to avoid this double work, probably by caching
+     *        the data. */
 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
@@ -165,6 +184,5 @@
     if (rc == VINF_EM_RAW_GUEST_TRAP)
     {
-        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
-                    = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
+        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
         return rc;
     }
@@ -175,5 +193,7 @@
     pgmLock(pVM);
 
-    /* Fetch the guest PDE */
+    /*
+     * Fetch the guest PDE, PDPE and PML4E.
+     */
 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
@@ -283,5 +303,5 @@
     if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
         &&  !pPDDst->a[iPDDst].n.u1Present
-    )
+       )
     {
         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
@@ -390,5 +410,5 @@
 #   endif
             )
-            GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc)
+            GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc)
                     | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
         else
@@ -1098,5 +1118,5 @@
      */
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-    PGSTPD          pPDSrc      = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+    PGSTPD          pPDSrc      = pgmGstGet32bitPDPtr(pVCpu);
     const unsigned  iPDSrc      = GCPtrPage >> GST_PD_SHIFT;
     GSTPDE          PdeSrc      = pPDSrc->a[iPDSrc];
@@ -1105,9 +1125,9 @@
 #  if PGM_GST_TYPE == PGM_TYPE_PAE
     X86PDPE         PdpeSrc;
-    PX86PDPAE       pPDSrc      = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
+    PX86PDPAE       pPDSrc      = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc);
 #  else /* AMD64 */
     PX86PML4E       pPml4eSrc;
     X86PDPE         PdpeSrc;
-    PX86PDPAE       pPDSrc      = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
+    PX86PDPAE       pPDSrc      = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
 #  endif
     GSTPDE          PdeSrc;
@@ -1224,5 +1244,5 @@
             /* Before freeing the page, check if anything really changed. */
             PPGMPOOLPAGE    pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
-            RTGCPHYS        GCPhys   = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
+            RTGCPHYS        GCPhys   = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc);
 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
             /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
@@ -1384,9 +1404,74 @@
 
 /**
+ * Modifies a shadow PTE to account for access handlers.
+ *
+ * @param   pVM         The VM handle.
+ * @param   pPage       The page in question.
+ * @param   fPteSrc     The flags of the source PTE.
+ * @param   pPteDst     The shadow PTE (output).
+ */
+DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVM pVM, PCPGMPAGE pPage, uint32_t fPteSrc, PSHWPTE pPteDst)
+{
+    /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No.
+     *  Update: \#PF should deal with this before or after calling the handlers. It has all the info to do the job efficiently. */
+    if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
+    {
+#if PGM_SHW_TYPE == PGM_TYPE_EPT
+        pPteDst->u             = PGM_PAGE_GET_HCPHYS(pPage);
+        pPteDst->n.u1Present   = 1;
+        pPteDst->n.u1Execute   = 1;
+        pPteDst->n.u1IgnorePAT = 1;
+        pPteDst->n.u3EMT       = VMX_EPT_MEMTYPE_WB;
+        /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
+#else
+        pPteDst->u = (fPteSrc & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
+                   | PGM_PAGE_GET_HCPHYS(pPage);
+#endif
+    }
+#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
+# if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
+    else if (   PGM_PAGE_IS_MMIO(pPage)
+#  if PGM_SHW_TYPE != PGM_TYPE_EPT
+             && (       (fPteSrc & (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/ | X86_PTE_US )) /* #PF handles D & A first. */
+                    ==             (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/)
+                 || BTH_IS_NP_ACTIVE(pVM) )
+#  endif
+#  if PGM_SHW_TYPE == PGM_TYPE_AMD64
+             && pVM->pgm.s.fLessThan52PhysicalAddressBits
+#  endif
+            )
+    {
+        LogFlow(("SyncHandlerPte: MMIO page -> invalid \n"));
+#  if PGM_SHW_TYPE == PGM_TYPE_EPT
+        /* 25.2.3.1: Reserved physical address bit -> EPT Misconfiguration (exit 49) */
+        pPteDst->u = pVM->pgm.s.HCPhysInvMmioPg;
+        /* 25.2.3.1: bits 2:0 = 010b -> EPT Misconfiguration (exit 49) */
+        pPteDst->n.u1Present = 0;
+        pPteDst->n.u1Write   = 1;
+        pPteDst->n.u1Execute = 0;
+        /* 25.2.3.1: leaf && 2:0 != 0 && u3Emt in {2, 3, 7} -> EPT Misconfiguration */
+        pPteDst->n.u3EMT     = 7;
+#  else
+        /* Set high page frame bits that MBZ (bankers on PAE, CPU dependent on AMD64).  */
+        pPteDst->u = pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX | X86_PTE_P;
+#  endif
+    }
+# endif
+#endif /* PGM_WITH_MMIO_OPTIMIZATIONS */
+    else
+    {
+        LogFlow(("SyncHandlerPte: monitored page (%R[pgmpage]) -> mark not present\n", pPage));
+        pPteDst->u = 0;
+    }
+    /** @todo count these kinds of entries. */
+}
+
+
+/**
  * Creates a 4K shadow page for a guest page.
  *
  * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
- * physical address. The PdeSrc argument only the flags are used. No page structured
- * will be mapped in this function.
+ * physical address.  The PdeSrc argument only the flags are used.  No page
+ * structured will be mapped in this function.
  *
  * @param   pVCpu       The VMCPU handle.
@@ -1400,13 +1485,15 @@
  * @remark  Not used for 2/4MB pages!
  */
-DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
+DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc,
+                                              PPGMPOOLPAGE pShwPage, unsigned iPTDst)
 {
-    if (PteSrc.n.u1Present)
+    if (   PteSrc.n.u1Present
+        && GST_IS_PTE_VALID(pVCpu, PteSrc))
     {
         PVM pVM = pVCpu->CTX_SUFF(pVM);
 
-# if    defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)                            \
-     && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)                         \
-     && (PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64)
+# if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) \
+  && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
+  && (PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64)
         if (pShwPage->fDirty)
         {
@@ -1425,10 +1512,15 @@
         if (RT_SUCCESS(rc))
         {
-            /* Ignore ballooned pages. Don't return errors or use a fatal assert here as part of a shadow sync range might included ballooned pages. */
+            /* Ignore ballooned pages.
+               Don't return errors or use a fatal assert here as part of a
+               shadow sync range might included ballooned pages. */
             if (PGM_PAGE_IS_BALLOONED(pPage))
+            {
+                Assert(!pPteDst->n.u1Present); /** @todo user tracking needs updating if this triggers. */
                 return;
+            }
 
 #ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
-            /* Try to make the page writable if necessary. */
+            /* Make the page writable if necessary. */
             if (    PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM
                 &&  (   PGM_PAGE_IS_ZERO(pPage)
@@ -1450,5 +1542,4 @@
 #endif
 
-            /** @todo investiage PWT, PCD and PAT. */
             /*
              * Make page table entry.
@@ -1456,27 +1547,8 @@
             SHWPTE PteDst;
             if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
-            {
-                /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
-                if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
-                {
-#if PGM_SHW_TYPE == PGM_TYPE_EPT
-                    PteDst.u             = PGM_PAGE_GET_HCPHYS(pPage);
-                    PteDst.n.u1Present   = 1;
-                    PteDst.n.u1Execute   = 1;
-                    PteDst.n.u1IgnorePAT = 1;
-                    PteDst.n.u3EMT       = VMX_EPT_MEMTYPE_WB;
-                    /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
-#else
-                    PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
-                             | PGM_PAGE_GET_HCPHYS(pPage);
-#endif
-                }
-                else
-                {
-                    LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", PGM_PAGE_GET_HCPHYS(pPage)));
-                    PteDst.u = 0;
-                }
-                /** @todo count these two kinds. */
-            }
+                PGM_BTH_NAME(SyncHandlerPte)(pVM, pPage,
+                                             PteSrc.u & ~(  X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT
+                                                          | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW),
+                                             &PteDst);
             else
             {
@@ -1492,10 +1564,9 @@
                     PteDst.u = 0;
                 }
-                else
                 /*
                  * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
                  * when the page is modified.
                  */
-                if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
+                else if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))
                 {
                     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPage));
@@ -1521,17 +1592,17 @@
 #endif
                 }
-            }
-
-            /*
-             * Make sure only allocated pages are mapped writable.
-             */
-            if (    PteDst.n.u1Write
-                &&  PteDst.n.u1Present
-                &&  PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
-            {
-                /* Still applies to shared pages. */
-                Assert(!PGM_PAGE_IS_ZERO(pPage));
-                PteDst.n.u1Write = 0;   /** @todo this isn't quite working yet. */
-                Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
+
+                /*
+                 * Make sure only allocated pages are mapped writable.
+                 */
+                if (    PteDst.n.u1Write
+                    &&  PteDst.n.u1Present
+                    &&  PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
+                {
+                    /* Still applies to shared pages. */
+                    Assert(!PGM_PAGE_IS_ZERO(pPage));
+                    PteDst.n.u1Write = 0;   /** @todo this isn't quite working yet. Why, isn't it? */
+                    Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
+                }
             }
 
@@ -1564,23 +1635,25 @@
 #endif
             ASMAtomicWriteSize(pPteDst, PteDst.u);
+            return;
         }
-        /* else MMIO or invalid page, we must handle them manually in the #PF handler. */
-        /** @todo count these. */
-    }
+
+/** @todo count these three different kinds. */
+        Log2(("SyncPageWorker: invalid address in Pte\n"));
+    }
+    else if (!PteSrc.n.u1Present)
+        Log2(("SyncPageWorker: page not present in Pte\n"));
     else
-    {
-        /*
-         * Page not-present.
-         */
-        Log2(("SyncPageWorker: page not present in Pte\n"));
-        /* Keep user track up to date. */
-        if (pPteDst->n.u1Present)
-        {
-            Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
-            PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst);
-        }
-        ASMAtomicWriteSize(pPteDst, 0);
-        /** @todo count these. */
-    }
+        Log2(("SyncPageWorker: invalid Pte\n"));
+
+    /*
+     * The page is not present or the PTE is bad. Replace the shadow PTE by
+     * an empty entry, making sure to keep the user tracking up to date.
+     */
+    if (pPteDst->n.u1Present)
+    {
+        Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
+        PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst);
+    }
+    ASMAtomicWriteSize(pPteDst, 0);
 }
 
@@ -1591,4 +1664,7 @@
  * There are no conflicts at this point, neither is there any need for
  * page table allocations.
+ *
+ * When called in PAE or AMD64 guest mode, the guest PDPE shall be valid.
+ * When called in AMD64 guest mode, the guest PML4E shall be valid.
  *
  * @returns VBox status code.
@@ -1686,8 +1762,9 @@
      */
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-    const bool      fBigPage = PdeSrc.b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu);
+    const bool      fBigPage  = PdeSrc.b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu);
 # else
-    const bool      fBigPage = PdeSrc.b.u1Size;
-# endif
+    const bool      fBigPage  = PdeSrc.b.u1Size;
+# endif
+    const bool      fPdeValid = !fBigPage ? GST_IS_PDE_VALID(pVCpu, PdeSrc) : GST_IS_BIG_PDE_VALID(pVCpu, PdeSrc);
     RTGCPHYS        GCPhys;
     if (!fBigPage)
@@ -1701,5 +1778,5 @@
     else
     {
-        GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
+        GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc);
 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
         /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
@@ -1707,7 +1784,8 @@
 # endif
     }
-    if (    pShwPage->GCPhys == GCPhys
+    if (    fPdeValid
+        &&  pShwPage->GCPhys == GCPhys
         &&  PdeSrc.n.u1Present
-        &&  (PdeSrc.n.u1User == PdeDst.n.u1User)
+        &&  PdeSrc.n.u1User == PdeDst.n.u1User
         &&  (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write)
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
@@ -1819,5 +1897,5 @@
                  */
                 /* Calculate the GC physical address of this 4KB shadow page. */
-                GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
+                GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
                 /* Find ram range. */
                 PPGMPAGE pPage;
@@ -1852,13 +1930,12 @@
                      */
                     SHWPTE PteDst;
-                    PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
-                             | PGM_PAGE_GET_HCPHYS(pPage);
                     if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
-                    {
-                        if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
-                            PteDst.n.u1Write = 0;
-                        else
-                            PteDst.u = 0;
-                    }
+                        PGM_BTH_NAME(SyncHandlerPte)(pVM, pPage,
+                                                     PdeSrc.u & ~(  X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK
+                                                                  | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT),
+                                                     &PteDst);
+                    else
+                        PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
+                                 | PGM_PAGE_GET_HCPHYS(pPage);
 
                     const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
@@ -1909,5 +1986,8 @@
                 }
                 else
+                {
                     LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc));
+                    /** @todo must wipe the shadow page table in this case. */
+                }
             }
 # if defined(IN_RC)
@@ -1917,7 +1997,8 @@
             return VINF_SUCCESS;
         }
+
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs));
     }
-    else
+    else if (fPdeValid)
     {
         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
@@ -1925,7 +2006,13 @@
               GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
     }
+    else
+    {
+/// @todo        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync));
+        Log2(("SyncPage: Bad PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
+              GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys));
+    }
 
     /*
-     * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
+     * Mark the PDE not present.  Restart the instruction and let #PF call SyncPT.
      * Yea, I'm lazy.
      */
@@ -1939,4 +2026,5 @@
     PGM_INVL_VCPU_TLBS(pVCpu);
     return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
+
 
 #elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
@@ -1949,11 +2037,11 @@
      * Get the shadow PDE, find the shadow page table in the pool.
      */
-# if PGM_SHW_TYPE == PGM_TYPE_32BIT
+#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
     X86PDE          PdeDst = pgmShwGet32BitPDE(&pVCpu->pgm.s, GCPtrPage);
 
-# elif PGM_SHW_TYPE == PGM_TYPE_PAE
+#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
     X86PDEPAE       PdeDst = pgmShwGetPaePDE(&pVCpu->pgm.s, GCPtrPage);
 
-# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
+#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     const unsigned  iPDDst   = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
     const unsigned  iPdpt    = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt);
@@ -1966,5 +2054,5 @@
     Assert(pPDDst && pPdptDst);
     PdeDst = pPDDst->a[iPDDst];
-# elif PGM_SHW_TYPE == PGM_TYPE_EPT
+#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
     const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
     PEPTPD          pPDDst;
@@ -1979,5 +2067,5 @@
     Assert(pPDDst);
     PdeDst = pPDDst->a[iPDDst];
-# endif
+#  endif
     /* In the guest SMP case we could have blocked while another VCPU reused this page table. */
     if (!PdeDst.n.u1Present)
@@ -2024,7 +2112,6 @@
             if (!pPTDst->a[iPTDst].n.u1Present)
             {
-                GSTPTE PteSrc;
-
                 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
+                GSTPTE  PteSrc;
 
                 /* Fake the page table entry */
@@ -2056,7 +2143,7 @@
 # endif /* PGM_SYNC_N_PAGES */
     {
-        GSTPTE PteSrc;
-        const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
-        RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
+        const unsigned  iPTDst       = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
+        RTGCPTR         GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
+        GSTPTE          PteSrc;
 
         /* Fake the page table entry */
@@ -2087,11 +2174,83 @@
 
 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
+
 /**
- * Investigate page fault and handle write protection page faults caused by
- * dirty bit tracking.
+ * CheckPageFault helper for returning a page fault indicating a non-present
+ * (NP) entry in the page translation structures.
+ *
+ * @returns VINF_EM_RAW_GUEST_TRAP.
+ * @param   pVCpu           The virtual CPU to operate on.
+ * @param   uErr            The error code of the shadow fault.  Corrections to
+ *                          TRPM's copy will be made if necessary.
+ * @param   GCPtrPage       For logging.
+ * @param   uPageFaultLevel For logging.
+ */
+DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnNP)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
+{
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
+    AssertMsg(!(uErr & X86_TRAP_PF_P), ("%#x\n", uErr));
+    AssertMsg(!(uErr & X86_TRAP_PF_RSVD), ("%#x\n", uErr));
+    if (uErr & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
+        TRPMSetErrorCode(pVCpu, uErr & ~(X86_TRAP_PF_RSVD | X86_TRAP_PF_P));
+
+    Log(("CheckPageFault: real page fault (notp) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
+    return VINF_EM_RAW_GUEST_TRAP;
+}
+
+
+/**
+ * CheckPageFault helper for returning a page fault indicating a reserved bit
+ * (RSVD) error in the page translation structures.
+ *
+ * @returns VINF_EM_RAW_GUEST_TRAP.
+ * @param   pVCpu           The virtual CPU to operate on.
+ * @param   uErr            The error code of the shadow fault.  Corrections to
+ *                          TRPM's copy will be made if necessary.
+ * @param   GCPtrPage       For logging.
+ * @param   uPageFaultLevel For logging.
+ */
+DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnRSVD)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
+{
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
+    if ((uErr & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
+        TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_RSVD | X86_TRAP_PF_P);
+
+    Log(("CheckPageFault: real page fault (rsvd) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
+    return VINF_EM_RAW_GUEST_TRAP;
+}
+
+
+/**
+ * CheckPageFault helper for returning a page protection fault (P).
+ *
+ * @returns VINF_EM_RAW_GUEST_TRAP.
+ * @param   pVCpu           The virtual CPU to operate on.
+ * @param   uErr            The error code of the shadow fault.  Corrections to
+ *                          TRPM's copy will be made if necessary.
+ * @param   GCPtrPage       For logging.
+ * @param   uPageFaultLevel For logging.
+ */
+DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnProt)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
+{
+    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
+    AssertMsg(uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID), ("%#x\n", uErr));
+    if ((uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) != X86_TRAP_PF_P)
+        TRPMSetErrorCode(pVCpu, (uErr & ~X86_TRAP_PF_RSVD) | X86_TRAP_PF_P);
+
+    Log(("CheckPageFault: real page fault (prot) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
+    return VINF_EM_RAW_GUEST_TRAP;
+}
+
+
+/**
+ * Investigate a page fault to identify ones targetted at the guest and to
+ * handle write protection page faults caused by dirty bit tracking.
+ *
+ * This will do detect invalid entries and raise X86_TRAP_PF_RSVD.
  *
  * @returns VBox status code.
  * @param   pVCpu       The VMCPU handle.
- * @param   uErr        Page fault error code.
+ * @param   uErr        Page fault error code.  The X86_TRAP_PF_RSVD flag
+ *                      cannot be trusted as it is used for MMIO optimizations.
  * @param   pPdeSrc     Guest page directory entry.
  * @param   GCPtrPage   Guest context page address.
@@ -2099,214 +2258,185 @@
 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
 {
-    bool fUserLevelFault      = !!(uErr & X86_TRAP_PF_US);
-    bool fWriteFault          = !!(uErr & X86_TRAP_PF_RW);
-    bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu));
+    bool        fUserLevelFault      = !!(uErr & X86_TRAP_PF_US);
+    bool        fWriteFault          = !!(uErr & X86_TRAP_PF_RW);
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
-    bool fMaybeNXEFault       = (uErr & X86_TRAP_PF_ID) && CPUMIsGuestNXEnabled(pVCpu);
-# endif
-    unsigned uPageFaultLevel;
-    int rc;
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    bool        fMaybeNXEFault       =   (uErr & X86_TRAP_PF_ID) && CPUMIsGuestNXEnabled(pVCpu);
+# endif
+    bool        fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu));
+    PVM         pVM                  = pVCpu->CTX_SUFF(pVM);
+    int         rc;
 
     LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
 
-# if    PGM_GST_TYPE == PGM_TYPE_PAE \
-     || PGM_GST_TYPE == PGM_TYPE_AMD64
-
-#  if PGM_GST_TYPE == PGM_TYPE_AMD64
-    PX86PML4E    pPml4eSrc;
-    PX86PDPE     pPdpeSrc;
-
-    pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc);
-    Assert(pPml4eSrc);
-
+    /*
+     * Note! For PAE it is safe to assume that bad guest physical addresses
+     *       (which returns all FFs) in the translation tables will cause
+     *       #PF(RSVD).  The same will be the case for long mode provided the
+     *       physical address width is less than 52 bits - this we ASSUME.
+     *
+     * Note! No convenient shortcuts here, we have to validate everything!
+     */
+
+# if PGM_GST_TYPE == PGM_TYPE_AMD64
     /*
      * Real page fault? (PML4E level)
      */
-    if (    (uErr & X86_TRAP_PF_RSVD)
-        ||  !pPml4eSrc->n.u1Present
-        ||  (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write)
-        ||  (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute)
-        ||  (fUserLevelFault && !pPml4eSrc->n.u1User)
-       )
-    {
-        uPageFaultLevel = 0;
-        goto l_UpperLevelPageFault;
-    }
-    Assert(pPdpeSrc);
-
-#  else  /* PAE */
-    PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, GCPtrPage);
-#  endif /* PAE */
+    PX86PML4    pPml4Src  = pgmGstGetLongModePML4Ptr(pVCpu);
+    if (RT_UNLIKELY(!pPml4Src))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);
+
+    PX86PML4E   pPml4eSrc = &pPml4Src->a[(GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK];
+    if (!pPml4eSrc->n.u1Present)
+        return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 0);
+    if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, *pPml4eSrc)))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);
+    if (   (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write)
+        || (fMaybeNXEFault       &&  pPml4eSrc->n.u1NoExecute)
+        || (fUserLevelFault      && !pPml4eSrc->n.u1User) )
+        return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);
 
     /*
      * Real page fault? (PDPE level)
      */
-    if (    (uErr & X86_TRAP_PF_RSVD)
-        ||  !pPdpeSrc->n.u1Present
-# if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */
-        ||  (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write)
-        ||  (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute)
-        ||  (fUserLevelFault && !pPdpeSrc->lm.u1User)
-# endif
-       )
-    {
-        uPageFaultLevel = 1;
-        goto l_UpperLevelPageFault;
-    }
-# endif
+    PX86PDPT pPdptSrc;
+    rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK, &pPdptSrc);
+    if (RT_FAILURE(rc))
+    {
+        AssertMsgReturn(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc), rc);
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
+    }
+
+    PX86PDPE pPdpeSrc = &pPdptSrc->a[(GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64];
+    if (!pPdpeSrc->n.u1Present)
+        return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);
+    if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
+    if (   (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write)
+        || (fMaybeNXEFault       &&  pPdpeSrc->lm.u1NoExecute)
+        || (fUserLevelFault      && !pPdpeSrc->lm.u1User) )
+        return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 1);
+
+# elif PGM_GST_TYPE == PGM_TYPE_PAE
+    /*
+     * Real page fault? (PDPE level)
+     */
+    PX86PDPT pPdptSrc = pgmGstGetPaePDPTPtr(pVCpu);
+    if (RT_UNLIKELY(!pPdptSrc))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
+/** @todo Handle bad CR3 address. */
+    PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(pVCpu, GCPtrPage);
+    if (!pPdpeSrc->n.u1Present)
+        return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);
+    if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
+# endif /* PGM_GST_TYPE == PGM_TYPE_PAE */
 
     /*
      * Real page fault? (PDE level)
      */
-    if (    (uErr & X86_TRAP_PF_RSVD)
-        ||  !pPdeSrc->n.u1Present
-        ||  (fMaybeWriteProtFault && !pPdeSrc->n.u1Write)
+    if (!pPdeSrc->n.u1Present)
+        return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 2);
+# if PGM_GST_TYPE == PGM_TYPE_32BIT
+    bool const fBigPage = pPdeSrc->b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu);
+# else
+    bool const fBigPage = pPdeSrc->b.u1Size;
+# endif
+    if (!fBigPage ? !GST_IS_PDE_VALID(pVCpu, *pPdeSrc) : !GST_IS_BIG_PDE_VALID(pVCpu, *pPdeSrc))
+        return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 2);
+    if (   (fMaybeWriteProtFault && !pPdeSrc->n.u1Write)
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
-        ||  (fMaybeNXEFault && pPdeSrc->n.u1NoExecute)
-# endif
-        ||  (fUserLevelFault && !pPdeSrc->n.u1User) )
-    {
-        uPageFaultLevel = 2;
-        goto l_UpperLevelPageFault;
-    }
+        || (fMaybeNXEFault       &&  pPdeSrc->n.u1NoExecute)
+# endif
+        || (fUserLevelFault      && !pPdeSrc->n.u1User) )
+        return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 2);
 
     /*
-     * First check the easy case where the page directory has been marked read-only to track
-     * the dirty bit of an emulated BIG page
-     */
-    if (   pPdeSrc->b.u1Size
-#  if PGM_GST_TYPE == PGM_TYPE_32BIT
-        && CPUMIsGuestPageSizeExtEnabled(pVCpu)
-#  endif
-       )
+     * First check the easy case where the page directory has been marked
+     * read-only to track the dirty bit of an emulated BIG page.
+     */
+    if (fBigPage)
     {
         /* Mark guest page directory as accessed */
-#  if PGM_GST_TYPE == PGM_TYPE_AMD64
+# if PGM_GST_TYPE == PGM_TYPE_AMD64
         pPml4eSrc->n.u1Accessed = 1;
         pPdpeSrc->lm.u1Accessed = 1;
-#  endif
+# endif
         pPdeSrc->b.u1Accessed   = 1;
 
+        /* Mark the entry guest PDE dirty it it's a write access. */
+        if (fWriteFault)
+            pPdeSrc->b.u1Dirty = 1;
+    }
+    else
+    {
         /*
-         * Only write protection page faults are relevant here.
+         * Map the guest page table.
+         */
+        PGSTPT  pPTSrc;
+        PGSTPTE pPteSrc;
+        GSTPTE  PteSrc;
+        rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
+        if (RT_SUCCESS(rc))
+        {
+            pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
+            PteSrc.u = pPteSrc->u;
+        }
+        else if (rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS)
+        {
+            /* All bits in the PTE are set. */
+# if PGM_GST_TYPE == PGM_TYPE_32BIT
+            PteSrc.u = UINT32_MAX;
+# else
+            PteSrc.u = UINT64_MAX;
+# endif
+            pPteSrc = &PteSrc;
+        }
+        else
+            AssertRCReturn(rc, rc);
+
+        /*
+         * Real page fault?
+         */
+        if (!PteSrc.n.u1Present)
+            return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 3);
+        if (!GST_IS_PTE_VALID(pVCpu, PteSrc))
+            return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 3);
+        if (   (fMaybeWriteProtFault && !PteSrc.n.u1Write)
+# if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
+            || (fMaybeNXEFault       &&  PteSrc.n.u1NoExecute)
+# endif
+            || (fUserLevelFault      && !PteSrc.n.u1User) )
+            return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);
+
+        LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
+
+        /*
+         * Set the accessed bits in the page directory and the page table.
+         */
+# if PGM_GST_TYPE == PGM_TYPE_AMD64
+        pPml4eSrc->n.u1Accessed = 1;
+        pPdpeSrc->lm.u1Accessed = 1;
+# endif
+        pPdeSrc->n.u1Accessed   = 1;
+        pPteSrc->n.u1Accessed   = 1;
+
+        /*
+         * Set the dirty flag in the PTE if it's a write access.
          */
         if (fWriteFault)
         {
-            /* Mark guest page directory as dirty (BIG page only). */
-            pPdeSrc->b.u1Dirty = 1;
-        }
-        return VINF_SUCCESS;
-    }
-    /* else: 4KB page table */
-
-    /*
-     * Map the guest page table.
-     */
-    PGSTPT pPTSrc;
-    rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
-    if (RT_SUCCESS(rc))
-    {
-        /*
-         * Real page fault?
-         */
-        PGSTPTE        pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
-        const GSTPTE   PteSrc = *pPteSrc;
-        if (    !PteSrc.n.u1Present
-            ||  (fMaybeWriteProtFault && !PteSrc.n.u1Write)
-#  if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
-            ||  (fMaybeNXEFault && PteSrc.n.u1NoExecute)
-#  endif
-            ||  (fUserLevelFault && !PteSrc.n.u1User)
-           )
-        {
-            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
-            LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u));
-
-            /* Check the present bit as the shadow tables can cause different error codes by being out of sync.
-             * See the 2nd case above as well.
-             */
-            if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present)
-                TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
-
-            return VINF_EM_RAW_GUEST_TRAP;
-        }
-        LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
-
-        /*
-         * Set the accessed bits in the page directory and the page table.
-         */
-#  if PGM_GST_TYPE == PGM_TYPE_AMD64
-        pPml4eSrc->n.u1Accessed = 1;
-        pPdpeSrc->lm.u1Accessed = 1;
-#  endif
-        pPdeSrc->n.u1Accessed   = 1;
-        pPteSrc->n.u1Accessed   = 1;
-
-        /*
-         * Only write protection page faults are relevant here.
-         */
-        if (fWriteFault)
-        {
-            /* Write access, so mark guest entry as dirty. */
-#  ifdef VBOX_WITH_STATISTICS
+# ifdef VBOX_WITH_STATISTICS
             if (!pPteSrc->n.u1Dirty)
                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtiedPage));
             else
                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty));
-#  endif
+# endif
 
             pPteSrc->n.u1Dirty = 1;
         }
-        return VINF_SUCCESS;
-    }
-    AssertRC(rc);
-    return rc;
-
-
-l_UpperLevelPageFault:
-    /*
-     * Pagefault detected while checking the PML4E, PDPE or PDE.
-     * Single exit handler to get rid of duplicate code paths.
-     */
-    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF));
-    Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel));
-
-    if (    1
-#  if PGM_GST_TYPE == PGM_TYPE_AMD64
-         && pPml4eSrc->n.u1Present
-#  endif
-#  if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
-         && pPdpeSrc->n.u1Present
-#  endif
-         && pPdeSrc->n.u1Present)
-    {
-        /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
-        if (   pPdeSrc->b.u1Size
-#  if PGM_GST_TYPE == PGM_TYPE_32BIT
-            && CPUMIsGuestPageSizeExtEnabled(pVCpu)
-#  endif
-           )
-        {
-            TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
-        }
-        else
-        {
-            /*
-             * Map the guest page table.
-             */
-            PGSTPT pPTSrc2;
-            rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc2);
-            if (RT_SUCCESS(rc))
-            {
-                PGSTPTE pPteSrc = &pPTSrc2->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
-                if (pPteSrc->n.u1Present)
-                    TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */
-            }
-            AssertRC(rc);
-        }
-    }
-    return VINF_EM_RAW_GUEST_TRAP;
+    }
+    return VINF_SUCCESS;
 }
+
 
 /**
@@ -2323,13 +2453,16 @@
 {
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-    const bool fBigPagesSupported = CPUMIsGuestPageSizeExtEnabled(pVCpu);
+    const bool  fBigPagesSupported = CPUMIsGuestPageSizeExtEnabled(pVCpu);
 # else
-    const bool fBigPagesSupported = true;
-# endif
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+    const bool  fBigPagesSupported = true;
+# endif
+    PVM         pVM   = pVCpu->CTX_SUFF(pVM);
+    PPGMPOOL    pPool = pVM->pgm.s.CTX_SUFF(pPool);
 
     Assert(PGMIsLockOwner(pVM));
 
+    /*
+     * Handle big page.
+     */
     if (pPdeSrc->b.u1Size && fBigPagesSupported)
     {
@@ -2343,5 +2476,5 @@
 
             /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
-             *       fault again and take this path to only invalidate the entry.
+             *       fault again and take this path to only invalidate the entry (see below).
              */
             PdeDst.n.u1Write      = 1;
@@ -2352,6 +2485,6 @@
             return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;    /* restarts the instruction. */
         }
+
 # ifdef IN_RING0
-        else
         /* Check for stale TLB entry; only applies to the SMP guest case. */
         if (    pVM->cCpus > 1
@@ -2383,103 +2516,108 @@
     PGSTPT pPTSrc;
     int rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
-    if (RT_SUCCESS(rc))
-    {
-        if (pPdeDst->n.u1Present)
+    if (RT_FAILURE(rc))
+    {
+        AssertRC(rc);
+        return rc;
+    }
+
+    if (pPdeDst->n.u1Present)
+    {
+        PGSTPTE        pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
+        const GSTPTE   PteSrc = *pPteSrc;
+
+#ifndef IN_RING0
+        /* Bail out here as pgmPoolGetPage will return NULL and we'll crash below.
+         * Our individual shadow handlers will provide more information and force a fatal exit.
+         */
+        if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
         {
-            PGSTPTE        pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
-            const GSTPTE   PteSrc = *pPteSrc;
-#ifndef IN_RING0
-            /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
-             * Our individual shadow handlers will provide more information and force a fatal exit.
-             */
-            if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))
+            LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
+            return VINF_PGM_NO_DIRTY_BIT_TRACKING;
+        }
+#endif
+        /*
+         * Map shadow page table.
+         */
+        PPGMPOOLPAGE    pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);
+        if (pShwPage)
+        {
+            PSHWPT      pPTDst   = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
+            PSHWPTE     pPteDst  = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
+            if (pPteDst->n.u1Present)    /** @todo Optimize accessed bit emulation? */
             {
-                LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));
-                return VINF_PGM_NO_DIRTY_BIT_TRACKING;
-            }
-#endif
-            /*
-             * Map shadow page table.
-             */
-            PPGMPOOLPAGE    pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);
-            if (pShwPage)
-            {
-                PSHWPT      pPTDst   = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
-                PSHWPTE     pPteDst  = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
-                if (pPteDst->n.u1Present)    /** @todo Optimize accessed bit emulation? */
+                if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
                 {
-                    if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
+                    PPGMPAGE pPage  = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
+                    SHWPTE   PteDst = *pPteDst;
+
+                    LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
+                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
+
+                    Assert(pPteSrc->n.u1Write);
+
+                    /* Note: No need to invalidate this entry on other VCPUs as a stale TLB
+                     *       entry will not harm; write access will simply fault again and
+                     *       take this path to only invalidate the entry.
+                     */
+                    if (RT_LIKELY(pPage))
                     {
-                        PPGMPAGE pPage  = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
-                        SHWPTE   PteDst = *pPteDst;
-
-                        LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
-                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
-
-                        Assert(pPteSrc->n.u1Write);
-
-                        /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
-                         *       fault again and take this path to only invalidate the entry.
-                         */
-                        if (RT_LIKELY(pPage))
+                        if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
                         {
-                            if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
+                            AssertMsgFailed(("%R[pgmpage] - we don't set PGM_PTFLAGS_TRACK_DIRTY for these pages\n", pPage));
+                            Assert(!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage));
+                            /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */
+                            PteDst.n.u1Write = 0;
+                        }
+                        else
+                        {
+                            if (   PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
+                                && PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
                             {
-                                /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */
+                                rc = pgmPhysPageMakeWritable(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK);
+                                AssertRC(rc);
+                            }
+                            if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
+                                PteDst.n.u1Write = 1;
+                            else
+                            {
+                                /* Still applies to shared pages. */
+                                Assert(!PGM_PAGE_IS_ZERO(pPage));
                                 PteDst.n.u1Write = 0;
                             }
-                            else
-                            {
-                                if (   PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
-                                    && PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
-                                {
-                                    rc = pgmPhysPageMakeWritable(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK);
-                                    AssertRC(rc);
-                                }
-                                if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
-                                {
-                                    PteDst.n.u1Write = 1;
-                                }
-                                else
-                                {
-                                    /* Still applies to shared pages. */
-                                    Assert(!PGM_PAGE_IS_ZERO(pPage));
-                                    PteDst.n.u1Write = 0;
-                                }
-                            }
                         }
-                        else
-                            PteDst.n.u1Write = 1;
-
-                        PteDst.n.u1Dirty    = 1;
-                        PteDst.n.u1Accessed = 1;
-                        PteDst.au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
-                        ASMAtomicWriteSize(pPteDst, PteDst.u);
-                        PGM_INVL_PG(pVCpu, GCPtrPage);
-                        return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;    /* restarts the instruction. */
                     }
+                    else
+                        PteDst.n.u1Write = 1;  /** @todo r=bird: This doesn't make sense to me. */
+
+                    PteDst.n.u1Dirty    = 1;
+                    PteDst.n.u1Accessed = 1;
+                    PteDst.au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
+                    ASMAtomicWriteSize(pPteDst, PteDst.u);
+                    PGM_INVL_PG(pVCpu, GCPtrPage);
+                    return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;    /* restarts the instruction. */
+                }
+
 # ifdef IN_RING0
-                    else
-                    /* Check for stale TLB entry; only applies to the SMP guest case. */
-                    if (    pVM->cCpus > 1
-                        &&  pPteDst->n.u1Write == 1
-                        &&  pPteDst->n.u1Accessed == 1)
-                    {
-                        /* Stale TLB entry. */
-                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
-                        PGM_INVL_PG(pVCpu, GCPtrPage);
-                        return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;    /* restarts the instruction. */
-                    }
-# endif
+                /* Check for stale TLB entry; only applies to the SMP guest case. */
+                if (    pVM->cCpus > 1
+                    &&  pPteDst->n.u1Write == 1
+                    &&  pPteDst->n.u1Accessed == 1)
+                {
+                    /* Stale TLB entry. */
+                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
+                    PGM_INVL_PG(pVCpu, GCPtrPage);
+                    return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;    /* restarts the instruction. */
                 }
+# endif
             }
-            else
-                AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
         }
-        return VINF_PGM_NO_DIRTY_BIT_TRACKING;
-    }
-    AssertRC(rc);
-    return rc;
+        else
+            AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));
+    }
+
+    return VINF_PGM_NO_DIRTY_BIT_TRACKING;
 }
+
 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
 
@@ -2500,6 +2638,6 @@
 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
 {
-    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
-    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
+    PVM             pVM      = pVCpu->CTX_SUFF(pVM);
+    PPGMPOOL        pPool    = pVM->pgm.s.CTX_SUFF(pPool);
 
     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
@@ -2517,5 +2655,5 @@
     && PGM_SHW_TYPE != PGM_TYPE_EPT
 
-    int             rc = VINF_SUCCESS;
+    int             rc       = VINF_SUCCESS;
 
     /*
@@ -2532,6 +2670,6 @@
 
 # elif PGM_SHW_TYPE == PGM_TYPE_PAE
-    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
-    PPGMPOOLPAGE    pShwPde = NULL;
+    const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
+    PPGMPOOLPAGE    pShwPde  = NULL;
     PX86PDPAE       pPDDst;
     PSHWPDE         pPdeDst;
@@ -2553,11 +2691,11 @@
     AssertRCSuccessReturn(rc, rc);
     Assert(pPDDst);
-    PSHWPDE         pPdeDst = &pPDDst->a[iPDDst];
-# endif
-    SHWPDE          PdeDst = *pPdeDst;
+    PSHWPDE         pPdeDst  = &pPDDst->a[iPDDst];
+# endif
+    SHWPDE          PdeDst   = *pPdeDst;
 
 # if PGM_GST_TYPE == PGM_TYPE_AMD64
     /* Fetch the pgm pool shadow descriptor. */
-    PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
+    PPGMPOOLPAGE    pShwPde  = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
     Assert(pShwPde);
 # endif
@@ -2566,6 +2704,6 @@
     /*
      * Check for conflicts.
-     * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
-     * HC: Simply resolve the conflict.
+     * RC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
+     * R3: Simply resolve the conflict.
      */
     if (PdeDst.u & PGM_PDFLAGS_MAPPING)
@@ -2576,5 +2714,6 @@
         STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
         return VERR_ADDRESS_CONFLICT;
-#  else
+
+#  else  /* IN_RING3 */
         PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage);
         Assert(pMapping);
@@ -2592,5 +2731,5 @@
         }
         PdeDst = *pPdeDst;
-#  endif
+#  endif /* IN_RING3 */
     }
 # endif /* !PGM_WITHOUT_MAPPINGS */
@@ -2637,5 +2776,5 @@
 # endif
 
-            GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
+            GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc);
 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
             /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
@@ -2648,5 +2787,5 @@
                     enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_RW_NX : PGMPOOLACCESS_USER_RW;
                 else
-                    enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_R_NX : PGMPOOLACCESS_USER_R;
+                    enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_R_NX  : PGMPOOLACCESS_USER_R;
             }
             else
@@ -2655,5 +2794,5 @@
                     enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_RW_NX : PGMPOOLACCESS_SUPERVISOR_RW;
                 else
-                    enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_R_NX : PGMPOOLACCESS_SUPERVISOR_R;
+                    enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_R_NX  : PGMPOOLACCESS_SUPERVISOR_R;
             }
             rc = pgmPoolAllocEx(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, enmAccess, pShwPde->idx, iPDDst, &pShwPage);
@@ -2765,5 +2904,5 @@
                     const GSTPTE   PteSrc = pPTSrc->a[iPTSrc];
 
-                    if (PteSrc.n.u1Present) /* we've already cleared it above */
+                    if (PteSrc.n.u1Present)
                     {
 # ifndef IN_RING0
@@ -2790,4 +2929,5 @@
                               (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) ));
                     }
+                    /* else: the page table was cleared by the pool */
                 } /* for PTEs */
             }
@@ -2897,6 +3037,5 @@
                                 PteDst.u = 0;
                         }
-                        else
-                        if (PGM_PAGE_IS_BALLOONED(pPage))
+                        else if (PGM_PAGE_IS_BALLOONED(pPage))
                         {
                             /* Skip ballooned pages. */
@@ -3041,8 +3180,6 @@
     Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
 
-# if defined(PGM_WITH_LARGE_PAGES) && (PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE)
-#  if  (PGM_SHW_TYPE != PGM_TYPE_EPT)   /* PGM_TYPE_EPT implies nested paging */
-    if (HWACCMIsNestedPagingActive(pVM))
-#  endif
+# if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
+    if (BTH_IS_NP_ACTIVE(pVM))
     {
         PPGMPAGE pPage;
@@ -3060,6 +3197,5 @@
                 HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
             }
-            else
-            if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
+            else if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
             {
                 /* Recheck the entire 2 MB range to see if we can use it again as a large page. */
@@ -3072,6 +3208,5 @@
                 }
             }
-            else
-            if (PGMIsUsingLargePages(pVM))
+            else if (PGMIsUsingLargePages(pVM))
             {
                 rc = pgmPhysAllocLargePage(pVM, GCPtrPage);
@@ -3174,6 +3309,12 @@
 PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage)
 {
-#if   (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
-    && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
+#if (   PGM_GST_TYPE == PGM_TYPE_32BIT \
+     || PGM_GST_TYPE == PGM_TYPE_REAL \
+     || PGM_GST_TYPE == PGM_TYPE_PROT \
+     || PGM_GST_TYPE == PGM_TYPE_PAE \
+     || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \
+ && PGM_SHW_TYPE != PGM_TYPE_NESTED \
+ && PGM_SHW_TYPE != PGM_TYPE_EPT
+
     /*
      * Check that all Guest levels thru the PDE are present, getting the
@@ -3184,9 +3325,9 @@
 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
     const unsigned  iPDSrc = GCPtrPage >> GST_PD_SHIFT;
-    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
 #  elif PGM_GST_TYPE == PGM_TYPE_PAE
     unsigned        iPDSrc;
     X86PDPE         PdpeSrc;
-    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
+    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc);
     if (!pPDSrc)
         return VINF_SUCCESS; /* not present */
@@ -3195,5 +3336,5 @@
     PX86PML4E       pPml4eSrc;
     X86PDPE         PdpeSrc;
-    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
+    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
     if (!pPDSrc)
         return VINF_SUCCESS; /* not present */
@@ -3269,12 +3410,13 @@
             if (!PdeDst.n.u1Present)
             {
-                /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
+                /** @todo r=bird: This guy will set the A bit on the PDE,
+                 *    probably harmless. */
                 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
             }
             else
             {
-                /** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
-                 *        R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
-                 *        makes no sense to prefetch more than one page.
+                /* Note! We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
+                 *       R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
+                 *       makes no sense to prefetch more than one page.
                  */
                 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
@@ -3289,4 +3431,6 @@
 #elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
     return VINF_SUCCESS; /* ignore */
+#else
+    AssertCompile(0);
 #endif
 }
@@ -3303,4 +3447,6 @@
  * @param   fPage       The effective guest page flags.
  * @param   uErr        The trap error code.
+ * @remarks This will normally never be called on invalid guest page
+ *          translation entries.
  */
 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
@@ -3311,6 +3457,11 @@
 
     Assert(!HWACCMIsNestedPagingActive(pVM));
-#if   (PGM_GST_TYPE == PGM_TYPE_32BIT ||  PGM_GST_TYPE == PGM_TYPE_REAL ||  PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
-    && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
+#if   (   PGM_GST_TYPE == PGM_TYPE_32BIT \
+       || PGM_GST_TYPE == PGM_TYPE_REAL \
+       || PGM_GST_TYPE == PGM_TYPE_PROT \
+       || PGM_GST_TYPE == PGM_TYPE_PAE \
+       || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \
+    && PGM_SHW_TYPE != PGM_TYPE_NESTED \
+    && PGM_SHW_TYPE != PGM_TYPE_EPT
 
 # ifndef IN_RING0
@@ -3329,24 +3480,27 @@
      * Get guest PD and index.
      */
+    /** @todo Performance: We've done all this a jiffy ago in the
+     *        PGMGstGetPage call. */
 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
     const unsigned  iPDSrc = GCPtrPage >> GST_PD_SHIFT;
-    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+    PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
+
 #  elif PGM_GST_TYPE == PGM_TYPE_PAE
-    unsigned        iPDSrc  = 0;
+    unsigned        iPDSrc = 0;
     X86PDPE         PdpeSrc;
-    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
-
-    if (pPDSrc)
+    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc);
+    if (RT_UNLIKELY(!pPDSrc))
     {
         Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
         return VINF_EM_RAW_GUEST_TRAP;
     }
+
 #  elif PGM_GST_TYPE == PGM_TYPE_AMD64
     unsigned        iPDSrc;
     PX86PML4E       pPml4eSrc;
     X86PDPE         PdpeSrc;
-    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
-    if (!pPDSrc)
+    PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
+    if (RT_UNLIKELY(!pPDSrc))
     {
         Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
@@ -3354,8 +3508,9 @@
     }
 #  endif
-# else
+
+# else  /* !PGM_WITH_PAGING */
     PGSTPD          pPDSrc = NULL;
     const unsigned  iPDSrc = 0;
-# endif
+# endif /* !PGM_WITH_PAGING */
     int             rc = VINF_SUCCESS;
 
@@ -3367,4 +3522,5 @@
 # if PGM_SHW_TYPE == PGM_TYPE_32BIT
     PX86PDE         pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage);
+
 # elif PGM_SHW_TYPE == PGM_TYPE_PAE
     PX86PDEPAE      pPdeDst;
@@ -3372,7 +3528,6 @@
     PX86PDPAE       pPDDst;
 #   if PGM_GST_TYPE != PGM_TYPE_PAE
+    /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
     X86PDPE         PdpeSrc;
-
-    /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
     PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
 #   endif
@@ -3393,12 +3548,10 @@
 
 #  if PGM_GST_TYPE == PGM_TYPE_PROT
-    /* AMD-V nested paging */
+    /* AMD-V nested paging: Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     X86PML4E        Pml4eSrc;
     X86PDPE         PdpeSrc;
     PX86PML4E       pPml4eSrc = &Pml4eSrc;
-
-    /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
-    PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
+    PdpeSrc.u  = X86_PDPE_P  | X86_PDPE_RW  | X86_PDPE_US  | X86_PDPE_A;
 #  endif
 
@@ -3440,8 +3593,9 @@
         Log(("PGMVerifyAccess: success (dirty)\n"));
     else
-    {
-        GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
+# endif
+    {
+# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
+        GSTPDE PdeSrc       = pPDSrc->a[iPDSrc];
 # else
-    {
         GSTPDE PdeSrc;
         PdeSrc.u            = 0; /* faked so we don't have to #ifdef everything */
@@ -3450,6 +3604,6 @@
         PdeSrc.n.u1Accessed = 1;
         PdeSrc.n.u1User     = 1;
-
-# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
+# endif
+
         Assert(rc != VINF_EM_RAW_GUEST_TRAP);
         if (uErr & X86_TRAP_PF_US)
@@ -3467,5 +3621,5 @@
         else
         {
-            Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", GCPtrPage, rc));
+            Log(("PGMVerifyAccess: access violation for %RGv rc=%Rrc\n", GCPtrPage, rc));
             rc = VINF_EM_RAW_GUEST_TRAP;
         }
@@ -3478,9 +3632,9 @@
     return rc;
 
-#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
+#else  /* PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_NESTED */
 
     AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
     return VERR_INTERNAL_ERROR;
-#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
+#endif /* PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_NESTED */
 }
 
@@ -3652,5 +3806,5 @@
     AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
 #  if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3)
-    pgmGstGet32bitPDPtr(pPGM);
+    pgmGstGet32bitPDPtr(pVCpu);
     RTGCPHYS GCPhys;
     rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys);
@@ -3692,5 +3846,5 @@
         RTGCPHYS        GCPhysPdptSrc;
 
-        pPml4eSrc     = pgmGstGetLongModePML4EPtr(&pVCpu->pgm.s, iPml4);
+        pPml4eSrc     = pgmGstGetLongModePML4EPtr(pVCpu, iPml4);
         pPml4eDst     = pgmShwGetLongModePML4EPtr(&pVCpu->pgm.s, iPml4);
 
@@ -3748,5 +3902,5 @@
 #  if PGM_GST_TYPE == PGM_TYPE_PAE
             X86PDPE         PdpeSrc;
-            PGSTPD          pPDSrc    = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);
+            PGSTPD          pPDSrc    = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPDSrc, &PdpeSrc);
             PX86PDPT        pPdptDst  = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
 #  else
@@ -3755,5 +3909,5 @@
             PX86PDPT        pPdptDst;
             PX86PDPAE       pPDDst;
-            PGSTPD          pPDSrc    = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtr, &pPml4eSrcIgn, &PdpeSrc, &iPDSrc);
+            PGSTPD          pPDSrc    = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eSrcIgn, &PdpeSrc, &iPDSrc);
 
             rc = pgmShwGetLongModePDPtr(pVCpu, GCPtr, NULL, &pPdptDst, &pPDDst);
@@ -3815,5 +3969,5 @@
 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-            GSTPD const    *pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+            GSTPD const    *pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
             PCX86PD         pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
@@ -3902,5 +4056,5 @@
                         }
 # endif
-                        GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
+                        GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc);
 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
                         GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT);
@@ -4072,5 +4226,9 @@
                                 else
                                 {
-                                    if (PteDst.n.u1Present)
+                                    if (   PteDst.n.u1Present
+# if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
+                                        && !PGM_PAGE_IS_MMIO(pPhysPage)
+# endif
+                                       )
                                     {
                                         AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
@@ -4303,5 +4461,9 @@
                                 else
                                 {
-                                    if (PteDst.n.u1Present)
+                                    if (   PteDst.n.u1Present
+# if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
+                                        && !PGM_PAGE_IS_MMIO(pPhysPage)
+# endif
+                                        )
                                     {
                                         AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
@@ -4412,5 +4574,5 @@
              * Map the 4 PDs too.
              */
-            PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVCpu->pgm.s);
+            PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
             RTGCPTR  GCPtr      = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
             for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 30889)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2007 Oracle Corporation
+ * Copyright (C) 2006-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -21,11 +21,187 @@
 *******************************************************************************/
 RT_C_DECLS_BEGIN
-PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
-PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
-PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
+#if PGM_GST_TYPE == PGM_TYPE_32BIT \
+ || PGM_GST_TYPE == PGM_TYPE_PAE \
+ || PGM_GST_TYPE == PGM_TYPE_AMD64
+PGM_GST_DECL(int,  Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
+#endif
+PGM_GST_DECL(int,  GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
+PGM_GST_DECL(int,  ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
+PGM_GST_DECL(int,  GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
 PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
 RT_C_DECLS_END
 
 
+#if PGM_GST_TYPE == PGM_TYPE_32BIT \
+ || PGM_GST_TYPE == PGM_TYPE_PAE \
+ || PGM_GST_TYPE == PGM_TYPE_AMD64
+
+
+DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel)
+{
+    NOREF(iLevel);
+    pWalk->Core.fNotPresent     = true;
+    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    return VERR_PAGE_TABLE_NOT_PRESENT;
+}
+
+DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPU pVCpu, PGSTPTWALK pWalk, int rc, int iLevel)
+{
+    AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
+    pWalk->Core.fBadPhysAddr    = true;
+    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    return VERR_PAGE_TABLE_NOT_PRESENT;
+}
+
+DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel)
+{
+    pWalk->Core.fRsvdError      = true;
+    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    return VERR_PAGE_TABLE_NOT_PRESENT;
+}
+
+
+/**
+ * Performs a guest page table walk.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_PAGE_TABLE_NOT_PRESENT on failure.  Check pWalk for details.
+ *
+ * @param   pVCpu       The current CPU.
+ * @param   GCPtr       The guest virtual address to walk by.
+ * @param   pWalk       Where to return the walk result. This is always set.
+ */
+PGM_GST_DECL(int, Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
+{
+    int rc;
+
+    /*
+     * Init the walking structure.
+     */
+    RT_ZERO(*pWalk);
+    pWalk->Core.GCPtr = GCPtr;
+
+# if PGM_GST_TYPE == PGM_TYPE_32BIT \
+  || PGM_GST_TYPE == PGM_TYPE_PAE
+    /*
+     * Boundary check for PAE and 32-bit (prevents trouble further down).
+     */
+    if (RT_UNLIKELY(GCPtr >= _4G))
+        return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
+# endif
+
+    {
+# if PGM_GST_TYPE == PGM_TYPE_AMD64
+        /*
+         * The PMLE4.
+         */
+        rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
+
+        PX86PML4  register  pPml4 = pWalk->pPml4;
+        X86PML4E  register  Pml4e;
+        PX86PML4E register  pPml4e;
+
+        pWalk->pPml4e  = pPml4e  = &pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
+        pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
+        if (!Pml4e.n.u1Present)
+            return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
+        if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, Pml4e)))
+            return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
+
+        /*
+         * The PDPE.
+         */
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK_FULL, &pWalk->pPdpt);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
+
+# elif PGM_GST_TYPE == PGM_TYPE_PAE
+        rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
+# endif
+    }
+    {
+# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
+        PX86PDPT register   pPdpt = pWalk->pPdpt;
+        PX86PDPE register   pPdpe;
+        X86PDPE  register   Pdpe;
+
+        pWalk->pPdpe  = pPdpe  = &pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
+        pWalk->Pdpe.u = Pdpe.u = pPdpe->u;
+        if (!Pdpe.n.u1Present)
+            return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
+        if (RT_UNLIKELY(!GST_IS_PDPE_VALID(pVCpu, Pdpe)))
+            return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
+
+        /*
+         * The PDE.
+         */
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK_FULL, &pWalk->pPd);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
+# elif PGM_GST_TYPE == PGM_TYPE_32BIT
+        rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
+# endif
+    }
+    {
+        PGSTPD  register    pPd = pWalk->pPd;
+        PGSTPDE register    pPde;
+        GSTPDE  register    Pde;
+
+        pWalk->pPde  = pPde  = &pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
+        pWalk->Pde.u = Pde.u = pPde->u;
+        if (!Pde.n.u1Present)
+            return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
+        if (Pde.n.u1Size && GST_IS_PSE_ACTIVE(pVCpu))
+        {
+            if (RT_UNLIKELY(!GST_IS_BIG_PDE_VALID(pVCpu, Pde)))
+                return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
+
+            pWalk->Core.GCPhys     = GST_GET_PDE_BIG_PG_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
+                                   | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
+            pWalk->Core.fBigPage   = true;
+            pWalk->Core.fSucceeded = true;
+            return VINF_SUCCESS;
+        }
+
+        if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
+            return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
+
+        /*
+         * The PTE.
+         */
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pde.u & GST_PDE_PG_MASK, &pWalk->pPt);
+        if (RT_FAILURE(rc))
+            return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
+    }
+    {
+        PGSTPT  register    pPt = pWalk->pPt;
+        PGSTPTE register    pPte;
+        GSTPTE  register    Pte;
+
+        pWalk->pPte  = pPte  = &pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
+        pWalk->Pte.u = Pte.u = pPte->u;
+        if (!Pte.n.u1Present)
+            return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
+        if (RT_UNLIKELY(!GST_IS_PTE_VALID(pVCpu, Pte)))
+            return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
+
+        /*
+         * We're done.
+         */
+        pWalk->Core.GCPhys     = (Pte.u & GST_PDE_PG_MASK)
+                               | (GCPtr & PAGE_OFFSET_MASK);
+        pWalk->Core.fSucceeded = true;
+        return VINF_SUCCESS;
+    }
+}
+
+#endif /* 32BIT, PAE, AMD64 */
 
 /**
@@ -57,100 +233,65 @@
     return VINF_SUCCESS;
 
-#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
-
-# if PGM_GST_TYPE != PGM_TYPE_AMD64
-    /* Boundary check. */
-    if (GCPtr >= _4G)
-        return VERR_PAGE_TABLE_NOT_PRESENT;
-# endif
-
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    /*
-     * Get the PDE.
-     */
-# if PGM_GST_TYPE == PGM_TYPE_32BIT
-    X86PDE      Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr);
-
-#elif PGM_GST_TYPE == PGM_TYPE_PAE
-    /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present.
-     * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */
-    X86PDEPAE   Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
-
-#elif PGM_GST_TYPE == PGM_TYPE_AMD64
-    PX86PML4E   pPml4e;
-    X86PDPE     Pdpe;
-    X86PDEPAE   Pde = pgmGstGetLongModePDEEx(&pVCpu->pgm.s, GCPtr, &pPml4e, &Pdpe);
-
-    Assert(pPml4e);
-    if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
-        return VERR_PAGE_TABLE_NOT_PRESENT;
-
-    /* Merge accessed, write, user and no-execute bits into the PDE. */
-    Pde.n.u1Accessed  &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
-    Pde.n.u1Write     &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
-    Pde.n.u1User      &= pPml4e->n.u1User & Pdpe.lm.u1User;
-    Pde.n.u1NoExecute |= pPml4e->n.u1NoExecute | Pdpe.lm.u1NoExecute;
-# endif
-
-    /*
-     * Lookup the page.
-     */
-    if (!Pde.n.u1Present)
-        return VERR_PAGE_TABLE_NOT_PRESENT;
-
-    if (    !Pde.b.u1Size
-# if PGM_GST_TYPE == PGM_TYPE_32BIT
-        ||  !CPUMIsGuestPageSizeExtEnabled(pVCpu)
-# endif
-        )
-    {
-        PGSTPT pPT;
-        int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
-        if (RT_FAILURE(rc))
-            return rc;
-
-        /*
-         * Get PT entry and check presence.
-         */
-        const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
-        if (!Pte.n.u1Present)
-            return VERR_PAGE_NOT_PRESENT;
-
-        /*
-         * Store the result.
-         * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
-         * where the PDPE is simplified.
-         */
-        if (pfFlags)
-        {
-            *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
-                     & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
+#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
+   || PGM_GST_TYPE == PGM_TYPE_PAE \
+   || PGM_GST_TYPE == PGM_TYPE_AMD64
+
+    GSTPTWALK Walk;
+    int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    if (pGCPhys)
+        *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
+
+    if (pfFlags)
+    {
+        /* The RW and US flags are determined via bitwise AND across all levels. */
+        uint64_t fUpperRwUs = (X86_PTE_RW | X86_PTE_US)
+#  if PGM_GST_TYPE == PGM_TYPE_AMD64
+                            & Walk.Pml4e.u
+                            & Walk.Pdpe.u
+#  endif
+                            & Walk.Pde.u;
+        fUpperRwUs |= ~(uint64_t)(X86_PTE_RW | X86_PTE_US);
+
+        /* The RW and US flags are determined via bitwise AND across all levels. */
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
-            /* The NX bit is determined by a bitwise OR between the PT and PD */
-            if (((Pte.u | Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
+        bool    fUpperNx    = 0
+#  if PGM_GST_TYPE == PGM_TYPE_AMD64
+                            | Walk.Pml4e.n.u1NoExecute
+                            | Walk.Pdpe.lm.u1NoExecute
+#  endif
+                            | Walk.Pde.n.u1NoExecute;
+# endif
+
+        if (!Walk.Core.fBigPage)
+        {
+            *pfFlags = (Walk.Pte.u & ~GST_PTE_PG_MASK) & fUpperRwUs;
+# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
+            if (Walk.Pte.n.u1NoExecute || fUpperNx)
+            {
+                Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */
                 *pfFlags |= X86_PTE_PAE_NX;
-# endif
-        }
-        if (pGCPhys)
-            *pGCPhys = Pte.u & GST_PTE_PG_MASK;
-    }
-    else
-    {
-        /*
-         * Map big to 4k PTE and store the result
-         */
-        if (pfFlags)
-        {
-            *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
-                     | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
+            }
+# endif
+        }
+        else
+        {
+            *pfFlags = (  (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
+                        | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT))
+                     & fUpperRwUs;
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
-            if ((Pde.u & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
+            if (fUpperNx)
+            {
+                Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */
                 *pfFlags |= X86_PTE_PAE_NX;
-# endif
-        }
-        if (pGCPhys)
-            *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
-    }
+            }
+# endif
+        }
+    }
+
     return VINF_SUCCESS;
+
 #else
 # error "shouldn't be here!"
@@ -175,68 +316,30 @@
 PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
 {
+    Assert((cb & PAGE_OFFSET_MASK) == 0);
+
 #if PGM_GST_TYPE == PGM_TYPE_32BIT \
  || PGM_GST_TYPE == PGM_TYPE_PAE \
  || PGM_GST_TYPE == PGM_TYPE_AMD64
-
-    Assert((cb & PAGE_OFFSET_MASK) == 0);
-
-# if PGM_GST_TYPE != PGM_TYPE_AMD64
-    /* Boundary check. */
-    if (GCPtr >= _4G)
-        return VERR_PAGE_TABLE_NOT_PRESENT;
-# endif
-
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
     for (;;)
     {
-        /*
-         * Get the PD entry.
-         */
-# if PGM_GST_TYPE == PGM_TYPE_32BIT
-        PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVCpu->pgm.s, GCPtr);
-
-# elif PGM_GST_TYPE == PGM_TYPE_PAE
-        /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
-         * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
-         */
-        PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVCpu->pgm.s, GCPtr);
-        Assert(pPde);
-        if (!pPde)
-            return VERR_PAGE_TABLE_NOT_PRESENT;
-# elif PGM_GST_TYPE == PGM_TYPE_AMD64
-        /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
-        PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVCpu->pgm.s, GCPtr);
-        Assert(pPde);
-        if (!pPde)
-            return VERR_PAGE_TABLE_NOT_PRESENT;
-# endif
-        GSTPDE Pde = *pPde;
-        Assert(Pde.n.u1Present);
-        if (!Pde.n.u1Present)
-            return VERR_PAGE_TABLE_NOT_PRESENT;
-
-        if (    !Pde.b.u1Size
-# if PGM_GST_TYPE == PGM_TYPE_32BIT
-            ||  !CPUMIsGuestPageSizeExtEnabled(pVCpu)
-# endif
-            )
+        GSTPTWALK Walk;
+        int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
+        if (RT_FAILURE(rc))
+            return rc;
+
+        if (!Walk.Core.fBigPage)
         {
             /*
-             * 4KB Page table
+             * 4KB Page table, process
              *
-             * Walk page tables and pages till we're done.
+             * Walk pages till we're done.
              */
-            PGSTPT pPT;
-            int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
-            if (RT_FAILURE(rc))
-                return rc;
-
             unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
-            while (iPTE < RT_ELEMENTS(pPT->a))
+            while (iPTE < RT_ELEMENTS(Walk.pPt->a))
             {
-                GSTPTE Pte = pPT->a[iPTE];
+                GSTPTE Pte = Walk.pPt->a[iPTE];
                 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
                       | (fFlags & ~GST_PTE_PG_MASK);
-                pPT->a[iPTE] = Pte;
+                Walk.pPt->a[iPTE] = Pte;
 
                 /* next page */
@@ -253,12 +356,13 @@
              * 4MB Page table
              */
+            GSTPDE PdeNew;
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-            Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
+            PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
 # else
-            Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
-# endif
-                  | (fFlags & ~GST_PTE_PG_MASK)
-                  | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
-            *pPde = Pde;
+            PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
+# endif
+                     | (fFlags & ~GST_PTE_PG_MASK)
+                     | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
+            *Walk.pPde = PdeNew;
 
             /* advance */
@@ -279,10 +383,10 @@
 
 /**
- * Retrieve guest PDE information
+ * Retrieve guest PDE information.
  *
  * @returns VBox status code.
  * @param   pVCpu       The VMCPU handle.
- * @param   GCPtr       Guest context pointer
- * @param   pPDE        Pointer to guest PDE structure
+ * @param   GCPtr       Guest context pointer.
+ * @param   pPDE        Pointer to guest PDE structure.
  */
 PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE)
@@ -294,18 +398,30 @@
 # if PGM_GST_TYPE != PGM_TYPE_AMD64
     /* Boundary check. */
-    if (GCPtr >= _4G)
+    if (RT_UNLIKELY(GCPtr >= _4G))
         return VERR_PAGE_TABLE_NOT_PRESENT;
 # endif
 
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-    X86PDE    Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr);
+    unsigned    iPd = (GCPtr >> GST_PD_SHIFT) & GST_PD_MASK;
+    PX86PD      pPd = pgmGstGet32bitPDPtr(pVCpu);
+
 # elif PGM_GST_TYPE == PGM_TYPE_PAE
-    X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
+    unsigned    iPd;
+    PCX86PDPAE  pPd = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPd, NULL);
+
 # elif PGM_GST_TYPE == PGM_TYPE_AMD64
-    X86PDEPAE Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr);
-# endif
-
-    pPDE->u = (X86PGPAEUINT)Pde.u;
+    PX86PML4E   pPml4eIgn;
+    X86PDPE     PdpeIgn;
+    unsigned    iPd;
+    PCX86PDPAE  pPd = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eIgn, &PdpeIgn, &iPd);
+    /* Note! We do not return an effective PDE here like we do for the PTE in GetPage method. */
+# endif
+
+    if (RT_LIKELY(pPd))
+        pPDE->u = (X86PGPAEUINT)pPd->a[iPd].u;
+    else
+        pPDE->u = 0;
     return VINF_SUCCESS;
+
 #else
     AssertFailed();
@@ -333,14 +449,14 @@
     Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
 
-#if PGM_GST_TYPE == PGM_TYPE_32BIT
-    PX86PD          pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
-#endif
+# if PGM_GST_TYPE == PGM_TYPE_32BIT
+    PX86PD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
+# endif
 
     RTGCPTR         GCPtr  = pCur->Core.Key;
-#if PGM_GST_TYPE != PGM_TYPE_AMD64
+# if PGM_GST_TYPE != PGM_TYPE_AMD64
     /* skip all stuff above 4GB if not AMD64 mode. */
-    if (GCPtr >= _4G)
+    if (RT_UNLIKELY(GCPtr >= _4G))
         return 0;
-#endif
+# endif
 
     unsigned        offPage = GCPtr & PAGE_OFFSET_MASK;
@@ -348,18 +464,22 @@
     while (iPage < pCur->cPages)
     {
-#if PGM_GST_TYPE == PGM_TYPE_32BIT
+# if PGM_GST_TYPE == PGM_TYPE_32BIT
         X86PDE      Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
-#elif PGM_GST_TYPE == PGM_TYPE_PAE
-        X86PDEPAE   Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
-#elif PGM_GST_TYPE == PGM_TYPE_AMD64
-        X86PDEPAE   Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr);
-#endif
-        if (Pde.n.u1Present)
-        {
-            if (    !Pde.b.u1Size
+# elif PGM_GST_TYPE == PGM_TYPE_PAE
+        X86PDEPAE   Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
+# elif PGM_GST_TYPE == PGM_TYPE_AMD64
+        X86PDEPAE   Pde = pgmGstGetLongModePDE(pVCpu, GCPtr);
+# endif
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-                ||  !(pState->cr4 & X86_CR4_PSE)
-# endif
-                )
+        bool const  fBigPage = Pde.b.u1Size;
+# else
+        bool const  fBigPage = Pde.b.u1Size && !(pState->cr4 & X86_CR4_PSE);
+# endif
+        if (    Pde.n.u1Present
+            &&  (  !fBigPage
+                 ? GST_IS_PDE_VALID(pVCpu, Pde)
+                 : GST_IS_BIG_PDE_VALID(pVCpu, Pde)) )
+        {
+            if (!fBigPage)
             {
                 /*
@@ -448,5 +568,5 @@
         else
         {
-            /* not-present. */
+            /* not-present / invalid. */
             for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
                  cPages && iPage < pCur->cPages;
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 30889)
@@ -292,5 +292,5 @@
                     else
                     {
-                        PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);
+                        PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(pVCpu, iPdPt << X86_PDPT_SHIFT);
                         if (pGstPdpe)
                             GstPdpe = *pGstPdpe;
@@ -721,5 +721,5 @@
          * Resolve the page directory.
          */
-        PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+        PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
         Assert(pPD);
 
@@ -759,5 +759,5 @@
             while (iPT-- > 0)
             {
-                X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
+                X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
 
                 if (   Pde.n.u1Present
@@ -810,5 +810,5 @@
          * Resolve the page directory.
          */
-        PX86PD pPD = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
+        PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu);
         Assert(pPD);
 
@@ -862,5 +862,5 @@
             while (iPT-- > 0)
             {
-                X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
+                X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr);
 
                 if (   Pde.n.u1Present
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 30889)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2007 Oracle Corporation
+ * Copyright (C) 2007-2010 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -30,5 +30,8 @@
 #include <iprt/mem.h>
 
-RT_C_DECLS_BEGIN
+
+/*
+ * Instantiate the ring-0 header/code templates.
+ */
 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_32BIT_PROT(name)
 #include "PGMR0Bth.h"
@@ -46,6 +49,4 @@
 #include "PGMR0Bth.h"
 #undef PGM_BTH_NAME
-
-RT_C_DECLS_END
 
 
@@ -162,4 +163,5 @@
 }
 
+
 /**
  * Worker function for PGMR3PhysAllocateLargeHandyPage
@@ -186,4 +188,5 @@
     return rc;
 }
+
 
 /**
@@ -208,5 +211,7 @@
 
     /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
-    AssertMsg(enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, ("enmShwPagingMode=%d\n", enmShwPagingMode));
+    AssertMsg(   enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE      || enmShwPagingMode == PGMMODE_PAE_NX
+              || enmShwPagingMode == PGMMODE_AMD64  || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
+              ("enmShwPagingMode=%d\n", enmShwPagingMode));
 
 #ifdef VBOX_WITH_STATISTICS
@@ -253,28 +258,28 @@
      * Call the worker.
      *
-     * We pretend the guest is in protected mode without paging, so we can use existing code to build the
-     * nested page tables.
+     * Note! We pretend the guest is in protected mode without paging, so we
+     *       can use existing code to build the nested page tables.
      */
     bool fLockTaken = false;
     switch(enmShwPagingMode)
     {
-    case PGMMODE_32_BIT:
-        rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
-        break;
-    case PGMMODE_PAE:
-    case PGMMODE_PAE_NX:
-        rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
-        break;
-    case PGMMODE_AMD64:
-    case PGMMODE_AMD64_NX:
-        rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
-        break;
-    case PGMMODE_EPT:
-        rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
-        break;
-    default:
-        AssertFailed();
-        rc = VERR_INVALID_PARAMETER;
-        break;
+        case PGMMODE_32_BIT:
+            rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
+            break;
+        case PGMMODE_PAE:
+        case PGMMODE_PAE_NX:
+            rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
+            break;
+        case PGMMODE_AMD64:
+        case PGMMODE_AMD64_NX:
+            rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
+            break;
+        case PGMMODE_EPT:
+            rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
+            break;
+        default:
+            AssertFailed();
+            rc = VERR_INVALID_PARAMETER;
+            break;
     }
     if (fLockTaken)
@@ -283,15 +288,16 @@
         pgmUnlock(pVM);
     }
+
     if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
         rc = VINF_SUCCESS;
-    else
     /* Note: hack alert for difficult to reproduce problem. */
-    if (    rc == VERR_PAGE_NOT_PRESENT                 /* SMP only ; disassembly might fail. */
-        ||  rc == VERR_PAGE_TABLE_NOT_PRESENT           /* seen with UNI & SMP */
-        ||  rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT   /* seen with SMP */
-        ||  rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)     /* precaution */
+    else if (   rc == VERR_PAGE_NOT_PRESENT                 /* SMP only ; disassembly might fail. */
+             || rc == VERR_PAGE_TABLE_NOT_PRESENT           /* seen with UNI & SMP */
+             || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT   /* seen with SMP */
+             || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)     /* precaution */
     {
         Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
-        /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
+        /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
+           single VCPU VMs though. */
         rc = VINF_SUCCESS;
     }
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 30888)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 30889)
@@ -1309,6 +1309,6 @@
 }
 
-
 #ifdef LOG_ENABLED
+
 /**
  * Disables flushing of the ring-0 debug log.
@@ -1335,5 +1335,6 @@
         pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
 }
-#endif
+
+#endif /* LOG_ENABLED */
 
 /**
