Index: /trunk/include/VBox/vmm/pgm.h
===================================================================
--- /trunk/include/VBox/vmm/pgm.h	(revision 92425)
+++ /trunk/include/VBox/vmm/pgm.h	(revision 92426)
@@ -34,4 +34,5 @@
 #include <VBox/vmm/vmapi.h>
 #include <VBox/vmm/gmm.h>               /* for PGMMREGISTERSHAREDMODULEREQ */
+#include <VBox/vmm/hm_vmx.h>
 #include <iprt/x86.h>
 #include <VBox/param.h>
@@ -295,4 +296,228 @@
 } PGMSLAT;
 
+
+/** @name PGMPTATTRS - PGM page-table attributes.
+ *
+ * This is VirtualBox's combined page table attributes. It combines regular page
+ * table and Intel EPT attributes. It's 64-bit in size so there's ample room for
+ * bits added in the future to EPT or regular page tables (for e.g. Protection Key).
+ *
+ * The following bits map 1:1 (shifted by PGM_PTATTRS_EPT_SHIFT) to the Intel EPT
+ * attributes as these are unique to EPT and fit within 64-bits despite the shift:
+ *   - EPT_R         : Read access.
+ *   - EPT_W         : Write access.
+ *   - EPT_X_SUPER   : Execute or execute for supervisor-mode linear addr access.
+ *   - EPT_MEMTYPE   : EPT memory type.
+ *   - EPT_IGNORE_PAT: Ignore PAT memory type.
+ *   - EPT_X_USER    : Execute access for user-mode linear addresses.
+ *
+ * For regular page tables, the R bit is always 1 (same as P bit).
+ * For Intel EPT, the EPT_R and EPT_W bits are copied to R and W bits respectively.
+ *
+ * The following EPT attributes are mapped to the following positions because they
+ * exist in the regular page tables at these positions OR are exclusive to EPT and
+ * have been mapped to arbitrarily chosen positions:
+ *   - EPT_A               : Accessed                (EPT bit  8 maps to bit  5).
+ *   - EPT_D               : Dirty                   (EPT bit  9 maps to bit  6).
+ *   - EPT_SUPER_SHW_STACK : Supervisor Shadow Stack (EPT bit 60 maps to bit 24).
+ *   - EPT_SUPPRESS_VE_XCPT: Suppress \#VE exception (EPT bit 63 maps to bit 25).
+ *
+ * Bits 12, 11:9 and 43 are deliberately kept unused (correspond to bit PS and bits
+ * 11:9 in the regular page-table structures and to bit 11 in the EPT structures
+ * respectively) as bit 12 is the page-size bit and bits 11:9 are reserved for
+ * use by software and we may want to use/preserve them in the future.
+ *
+ * @{ */
+typedef uint64_t PGMPTATTRS;
+/** Pointer to a PGMPTATTRS type. */
+typedef PGMPTATTRS *PPGMPTATTRS;
+
+/** Read bit (always 1 for regular PT, copy of EPT_R for EPT). */
+#define PGM_PTATTRS_R_SHIFT                         0
+#define PGM_PTATTRS_R_MASK                          RT_BIT_64(PGM_PTATTRS_R_SHIFT)
+/** Write access bit (aka read/write bit for regular PT). */
+#define PGM_PTATTRS_W_SHIFT                         1
+#define PGM_PTATTRS_W_MASK                          RT_BIT_64(PGM_PTATTRS_W_SHIFT)
+/** User-mode access bit. */
+#define PGM_PTATTRS_US_SHIFT                        2
+#define PGM_PTATTRS_US_MASK                         RT_BIT_64(PGM_PTATTRS_US_SHIFT)
+/** Write through cache bit. */
+#define PGM_PTATTRS_PWT_SHIFT                       3
+#define PGM_PTATTRS_PWT_MASK                        RT_BIT_64(PGM_PTATTRS_PWT_SHIFT)
+/** Cache disabled bit. */
+#define PGM_PTATTRS_PCD_SHIFT                       4
+#define PGM_PTATTRS_PCD_MASK                        RT_BIT_64(PGM_PTATTRS_PCD_SHIFT)
+/** Accessed bit. */
+#define PGM_PTATTRS_A_SHIFT                         5
+#define PGM_PTATTRS_A_MASK                          RT_BIT_64(PGM_PTATTRS_A_SHIFT)
+/** Dirty bit. */
+#define PGM_PTATTRS_D_SHIFT                         6
+#define PGM_PTATTRS_D_MASK                          RT_BIT_64(PGM_PTATTRS_D_SHIFT)
+/** The PAT bit. */
+#define PGM_PTATTRS_PAT_SHIFT                       7
+#define PGM_PTATTRS_PAT_MASK                        RT_BIT_64(PGM_PTATTRS_PAT_SHIFT)
+/** The global bit. */
+#define PGM_PTATTRS_G_SHIFT                         8
+#define PGM_PTATTRS_G_MASK                          RT_BIT_64(PGM_PTATTRS_G_SHIFT)
+/** Reserved (bits 12:9) unused. */
+#define PGM_PTATTRS_RSVD_12_9_SHIFT                 9
+#define PGM_PTATTRS_RSVD_12_9_MASK                  UINT64_C(0x0000000000001e00)
+/** Read access bit - EPT only. */
+#define PGM_PTATTRS_EPT_R_SHIFT                     13
+#define PGM_PTATTRS_EPT_R_MASK                      RT_BIT_64(PGM_PTATTRS_EPT_R_SHIFT)
+/** Write access bit - EPT only. */
+#define PGM_PTATTRS_EPT_W_SHIFT                     14
+#define PGM_PTATTRS_EPT_W_MASK                      RT_BIT_64(PGM_PTATTRS_EPT_W_SHIFT)
+/** Execute or execute access for supervisor-mode linear addresses - EPT only. */
+#define PGM_PTATTRS_EPT_X_SUPER_SHIFT               15
+#define PGM_PTATTRS_EPT_X_SUPER_MASK                RT_BIT_64(PGM_PTATTRS_EPT_X_SUPER_SHIFT)
+/** EPT memory type - EPT only. */
+#define PGM_PTATTRS_EPT_MEMTYPE_SHIFT               16
+#define PGM_PTATTRS_EPT_MEMTYPE_MASK                UINT64_C(0x0000000000070000)
+/** Ignore PAT memory type - EPT only. */
+#define PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT            19
+#define PGM_PTATTRS_EPT_IGNORE_PAT_MASK             RT_BIT_64(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT)
+/** Reserved (bits 22:20) unused. */
+#define PGM_PTATTRS_RSVD_22_20_SHIFT                20
+#define PGM_PTATTRS_RSVD_22_20_MASK                 UINT64_C(0x0000000000700000)
+/** Execute access for user-mode linear addresses - EPT only. */
+#define PGM_PTATTRS_EPT_X_USER_SHIFT                23
+#define PGM_PTATTRS_EPT_X_USER_MASK                 RT_BIT_64(PGM_PTATTRS_EPT_X_USER_SHIFT)
+/** Reserved (bit 23) - unused. */
+#define PGM_PTATTRS_RSVD_23_SHIFT                   24
+#define PGM_PTATTRS_RSVD_23_MASK                    UINT64_C(0x0000000001000000)
+/** Supervisor shadow stack - EPT only. */
+#define PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT       25
+#define PGM_PTATTRS_EPT_SUPER_SHW_STACK_MASK        RT_BIT_64(PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT)
+/** Suppress \#VE exception - EPT only. */
+#define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT      26
+#define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_MASK       RT_BIT_64(PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT)
+/** Reserved (bits 62:27) - unused. */
+#define PGM_PTATTRS_RSVD_62_27_SHIFT                27
+#define PGM_PTATTRS_RSVD_62_27_MASK                 UINT64_C(0x7ffffffff8000000)
+/** No-execute bit. */
+#define PGM_PTATTRS_NX_SHIFT                        63
+#define PGM_PTATTRS_NX_MASK                         RT_BIT_64(PGM_PTATTRS_NX_SHIFT)
+
+RT_BF_ASSERT_COMPILE_CHECKS(PGM_PTATTRS_, UINT64_C(0), UINT64_MAX,
+                            (R, W, US, PWT, PCD, A, D, PAT, G, RSVD_12_9, EPT_R, EPT_W, EPT_X_SUPER, EPT_MEMTYPE, EPT_IGNORE_PAT,
+                             RSVD_22_20, EPT_X_USER, RSVD_23, EPT_SUPER_SHW_STACK, EPT_SUPPRESS_VE_XCPT, RSVD_62_27, NX));
+
+/** The bit position where the EPT specific attributes begin. */
+#define PGM_PTATTRS_EPT_SHIFT                       PGM_PTATTRS_EPT_R_SHIFT
+/** The mask of EPT bits (bits 26:ATTR_SHIFT). In the future we might choose to
+ *  use higher unused bits for something else, in that case adjust this mask. */
+#define PGM_PTATTRS_EPT_MASK                        UINT64_C(0x0000000007ffe000)
+
+/** The mask of all PGM page attribute bits for regular page-tables. */
+#define PGM_PTATTRS_PT_VALID_MASK                   (  PGM_PTATTRS_R_MASK \
+                                                     | PGM_PTATTRS_W_MASK \
+                                                     | PGM_PTATTRS_US_MASK \
+                                                     | PGM_PTATTRS_PWT_MASK \
+                                                     | PGM_PTATTRS_PCD_MASK \
+                                                     | PGM_PTATTRS_A_MASK \
+                                                     | PGM_PTATTRS_D_MASK \
+                                                     | PGM_PTATTRS_PAT_MASK \
+                                                     | PGM_PTATTRS_G_MASK \
+                                                     | PGM_PTATTRS_NX_MASK)
+
+/** The mask of all PGM page attribute bits for EPT. */
+#define PGM_PTATTRS_EPT_VALID_MASK                  (  PGM_PTATTRS_R_MASK \
+                                                     | PGM_PTATTRS_W_MASK \
+                                                     | PGM_PTATTRS_A_MASK \
+                                                     | PGM_PTATTRS_D_MASK \
+                                                     | PGM_PTATTRS_EPT_R_MASK \
+                                                     | PGM_PTATTRS_EPT_W_MASK \
+                                                     | PGM_PTATTRS_EPT_X_SUPER \
+                                                     | PGM_PTATTRS_EPT_MEMTYPE \
+                                                     | PGM_PTATTRS_EPT_IGNORE_PAT \
+                                                     | PGM_PTATTRS_EPT_X_USER \
+                                                     | PGM_PTATTRS_EPT_SUPER_SHW_STACK \
+                                                     | PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT)
+
+/* The mask of all PGM page attribute bits (combined). */
+#define PGM_PTATTRS_VALID_MASK                      (PGM_PTATTRS_PT_VALID_MASK | PGM_PTATTRS_PT_VALID_MASK)
+
+/* Verify bits match the regular PT bits. */
+AssertCompile(PGM_PTATTRS_W_SHIFT   == X86_PTE_BIT_RW);
+AssertCompile(PGM_PTATTRS_US_SHIFT  == X86_PTE_BIT_US);
+AssertCompile(PGM_PTATTRS_PWT_SHIFT == X86_PTE_BIT_PWT);
+AssertCompile(PGM_PTATTRS_PCD_SHIFT == X86_PTE_BIT_PCD);
+AssertCompile(PGM_PTATTRS_A_SHIFT   == X86_PTE_BIT_A);
+AssertCompile(PGM_PTATTRS_D_SHIFT   == X86_PTE_BIT_D);
+AssertCompile(PGM_PTATTRS_PAT_SHIFT == X86_PTE_BIT_PAT);
+AssertCompile(PGM_PTATTRS_G_SHIFT   == X86_PTE_BIT_G);
+AssertCompile(PGM_PTATTRS_W_MASK    == X86_PTE_RW);
+AssertCompile(PGM_PTATTRS_US_MASK   == X86_PTE_US);
+AssertCompile(PGM_PTATTRS_PWT_MASK  == X86_PTE_PWT);
+AssertCompile(PGM_PTATTRS_PCD_MASK  == X86_PTE_PCD);
+AssertCompile(PGM_PTATTRS_A_MASK    == X86_PTE_A);
+AssertCompile(PGM_PTATTRS_D_MASK    == X86_PTE_D);
+AssertCompile(PGM_PTATTRS_PAT_MASK  == X86_PTE_PAT);
+AssertCompile(PGM_PTATTRS_G_MASK    == X86_PTE_G);
+AssertCompile(PGM_PTATTRS_NX_MASK   == X86_PTE_PAE_NX);
+
+/* Verify those EPT bits that must map 1:1 (after shifting). */
+AssertCompile(PGM_PTATTRS_EPT_R_SHIFT          - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_READ);
+AssertCompile(PGM_PTATTRS_EPT_W_SHIFT          - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_WRITE);
+AssertCompile(PGM_PTATTRS_EPT_X_SUPER_SHIFT    - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_EXECUTE);
+AssertCompile(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_IGNORE_PAT);
+AssertCompile(PGM_PTATTRS_EPT_X_USER_SHIFT     - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_USER_EXECUTE);
+/** @} */
+
+
+/**
+ * Page table walk information.
+ *
+ * This provides extensive information regarding page faults (or EPT
+ * violations/misconfigurations) while traversing page tables.
+ */
+typedef struct PGMPTWALK
+{
+    /** The linear address that is being resolved (input). */
+    RTGCPTR         GCPtr;
+
+    /** The second-level physical address (input/output).
+     *  @remarks only valid if fIsSlat is set. */
+    RTGCPHYS        GCPhysNested;
+
+    /** The physical address that is the result of the walk (output).
+     * @remarks This is page aligned and only valid if fSucceeded is set. */
+    RTGCPHYS        GCPhys;
+
+    /** Set if the walk succeeded. */
+    bool            fSucceeded;
+    /** Whether this is a second-level address translation. */
+    bool            fIsSlat;
+    /** Whether the linear address (GCPtr) caused the second-level
+     *  address translation. */
+    bool            fIsLinearAddrValid;
+    /** The level problem arrised at.
+     * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
+     * level 8.  This is 0 on success. */
+    uint8_t         uLevel;
+    /** Set if the page isn't present. */
+    bool            fNotPresent;
+    /** Encountered a bad physical address. */
+    bool            fBadPhysAddr;
+    /** Set if there was reserved bit violations. */
+    bool            fRsvdError;
+    /** Set if it involves a big page (2/4 MB). */
+    bool            fBigPage;
+    /** Set if it involves a gigantic page (1 GB). */
+    bool            fGigantPage;
+    /** Set if the second-level fault was caused by an EPT misconfiguration. */
+    bool            fEptMisconfig;
+    bool            afPadding[6];
+
+    /** The effective attributes, PGM_PTATTRS_XXX. */
+    PGMPTATTRS      fEffective;
+} PGMPTWALK;
+/** Pointer to page walk information. */
+typedef PGMPTWALK *PPGMPTWALK;
+/** Pointer to const page walk information. */
+typedef PGMPTWALK const *PCPGMPTWALK;
+
+
 /** Macro for checking if the guest is using paging.
  * @param enmMode   PGMMODE_*.
@@ -351,6 +576,5 @@
 #define PGM_MK_PG_IS_MMIO2           RT_BIT(1)
 /** @}*/
-VMMDECL(int)        PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
-VMMDECL(bool)       PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr);
+VMMDECL(int)        PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
 VMMDECL(int)        PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
 VMMDECL(int)        PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 92426)
@@ -1426,8 +1426,8 @@
     }
 
-    RTGCPHYS    GCPhys;
-    uint64_t    fFlags;
-    int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
-    if (RT_SUCCESS(rc)) { /* probable */ }
+    PGMPTWALK Walk;
+    int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
+    if (RT_SUCCESS(rc))
+        Assert(Walk.fSucceeded); /* probable. */
     else
     {
@@ -1435,5 +1435,5 @@
         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
     }
-    if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
+    if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
     else
     {
@@ -1441,5 +1441,5 @@
         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     }
-    if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
+    if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
     else
     {
@@ -1447,5 +1447,5 @@
         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     }
-    GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
+    RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK);
     /** @todo Check reserved bits and such stuff. PGM is better at doing
      *        that, so do it when implementing the guest virtual address
@@ -1744,7 +1744,6 @@
         {
             pVCpu->iem.s.CodeTlb.cTlbMisses++;
-            RTGCPHYS    GCPhys;
-            uint64_t    fFlags;
-            int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
+            PGMPTWALK Walk;
+            int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
             if (RT_FAILURE(rc))
             {
@@ -1754,7 +1753,9 @@
 
             AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
+            Assert(Walk.fSucceeded);
             pTlbe->uTag             = uTag;
-            pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
-            pTlbe->GCPhys           = GCPhys;
+            pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
+                                    | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
+            pTlbe->GCPhys           = Walk.GCPhys;
             pTlbe->pbMappingR3      = NULL;
         }
@@ -1961,7 +1962,6 @@
     Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
 
-    RTGCPHYS    GCPhys;
-    uint64_t    fFlags;
-    int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
+    PGMPTWALK Walk;
+    int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
     if (RT_FAILURE(rc))
     {
@@ -1969,15 +1969,15 @@
         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
     }
-    if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
+    if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
     {
         Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     }
-    if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
+    if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
     {
         Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
     }
-    GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
+    RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK);
     Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n",  GCPtrNext,  GCPhys,  pVCpu->iem.s.cbOpcode));
     /** @todo Check reserved bits and such stuff. PGM is better at doing
@@ -8137,7 +8137,6 @@
      *        iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
      *        here. */
-    RTGCPHYS    GCPhys;
-    uint64_t    fFlags;
-    int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
+    PGMPTWALK Walk;
+    int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
     if (RT_FAILURE(rc))
     {
@@ -8151,9 +8150,9 @@
     /* If the page is writable and does not have the no-exec bit set, all
        access is allowed.  Otherwise we'll have to check more carefully... */
-    if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
+    if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
     {
         /* Write to read only memory? */
         if (   (fAccess & IEM_ACCESS_TYPE_WRITE)
-            && !(fFlags & X86_PTE_RW)
+            && !(Walk.fEffective & X86_PTE_RW)
             && (   (    pVCpu->iem.s.uCpl == 3
                     && !(fAccess & IEM_ACCESS_WHAT_SYS))
@@ -8166,5 +8165,5 @@
 
         /* Kernel memory accessed by userland? */
-        if (   !(fFlags & X86_PTE_US)
+        if (   !(Walk.fEffective & X86_PTE_US)
             && pVCpu->iem.s.uCpl == 3
             && !(fAccess & IEM_ACCESS_WHAT_SYS))
@@ -8177,5 +8176,5 @@
         /* Executing non-executable memory? */
         if (   (fAccess & IEM_ACCESS_TYPE_EXEC)
-            && (fFlags & X86_PTE_PAE_NX)
+            && (Walk.fEffective & X86_PTE_PAE_NX)
             && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
         {
@@ -8193,5 +8192,5 @@
     /** @todo testcase: check when A and D bits are actually set by the CPU.  */
     uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
-    if ((fFlags & fAccessedDirty) != fAccessedDirty)
+    if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
     {
         int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
@@ -8199,5 +8198,5 @@
     }
 
-    GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
+    RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK);
     *pGCPhysMem = GCPhys;
     return VINF_SUCCESS;
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 92426)
@@ -51,5 +51,6 @@
 DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
-static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk);
+static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
+                          PPGMPTWALKGST pGstWalk);
 #endif
 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
@@ -1723,15 +1724,15 @@
  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
  * @param   GCPtr       Guest Context virtual address of the page.
- * @param   pfFlags     Where to store the flags. These are X86_PTE_*, even for big pages.
- * @param   pGCPhys     Where to store the GC physical address of the page.
- *                      This is page aligned. The fact that the
- */
-VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
+ * @param   pWalk       Where to store the page walk information.
+ */
+VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
 {
     VMCPU_ASSERT_EMT(pVCpu);
+    Assert(pWalk);
+    RT_BZERO(pWalk, sizeof(*pWalk));
     uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
     AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
     AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
-    return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
+    return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
 }
 
@@ -1753,6 +1754,7 @@
  * @param   pWalk       Where to return the walk result. This is valid for some
  *                      error codes as well.
- */
-int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
+ * @param   pGstWalk    The guest mode specific page walk information.
+ */
+int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
 {
     VMCPU_ASSERT_EMT(pVCpu);
@@ -1760,20 +1762,20 @@
     {
         case PGMMODE_32_BIT:
-            pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
-            return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
+            return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
 
         case PGMMODE_PAE:
         case PGMMODE_PAE_NX:
-            pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
-            return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
+            return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
 
         case PGMMODE_AMD64:
         case PGMMODE_AMD64_NX:
-            pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
-            return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
+            return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
 
         case PGMMODE_REAL:
         case PGMMODE_PROTECTED:
-            pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
             return VERR_PGM_NOT_USED_IN_MODE;
 
@@ -1784,5 +1786,5 @@
         default:
             AssertFailed();
-            pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
             return VERR_PGM_NOT_USED_IN_MODE;
     }
@@ -1813,18 +1815,21 @@
  * @param   pWalk               Where to return the walk result. This is valid for
  *                              some error codes as well.
+ * @param   pGstWalk            The second-level paging-mode specific walk
+ *                              information.
  */
 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
-                          PPGMPTWALKGST pWalk)
-{
-    Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT);
+                          PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
+{
+    Assert(   pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
+           && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
     switch (pVCpu->pgm.s.enmGuestSlatMode)
     {
         case PGMSLAT_EPT:
-            pWalk->enmType = PGMPTWALKGSTTYPE_EPT;
-            return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept);
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
+            return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
 
         default:
             AssertFailed();
-            pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
+            pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
             return VERR_PGM_NOT_USED_IN_MODE;
     }
@@ -1851,6 +1856,7 @@
  *                      the result of this walk.  This is valid for some error
  *                      codes as well.
- */
-int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
+ * @param   pGstWalk    The guest-mode specific walk information.
+ */
+int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
 {
     /*
@@ -1858,14 +1864,14 @@
      * We also limit ourselves to the next page.
      */
-    if (   pWalk->u.Core.fSucceeded
-        && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
-    {
-        Assert(pWalk->u.Core.uLevel == 0);
-        if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
+    if (   pWalk->fSucceeded
+        && GCPtr - pWalk->GCPtr == PAGE_SIZE)
+    {
+        Assert(pWalk->uLevel == 0);
+        if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
         {
             /*
              * AMD64
              */
-            if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
+            if (!pWalk->fGigantPage && !pWalk->fBigPage)
             {
                 /*
@@ -1878,48 +1884,47 @@
                                         | X86_PDE_PCD | X86_PDE_A  | X86_PDE_PAE_NX | X86_PDE_PS;
 
-                if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
+                if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
                 {
-                    if (pWalk->u.Amd64.pPte)
+                    if (pGstWalk->u.Amd64.pPte)
                     {
                         X86PTEPAE Pte;
-                        Pte.u = pWalk->u.Amd64.pPte[1].u;
-                        if (   (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
+                        Pte.u = pGstWalk->u.Amd64.pPte[1].u;
+                        if (   (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
                             && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
                         {
-
-                            pWalk->u.Core.GCPtr  = GCPtr;
-                            pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
-                            pWalk->u.Amd64.Pte.u = Pte.u;
-                            pWalk->u.Amd64.pPte++;
+                            pWalk->GCPtr  = GCPtr;
+                            pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
+                            pGstWalk->u.Amd64.Pte.u = Pte.u;
+                            pGstWalk->u.Amd64.pPte++;
                             return VINF_SUCCESS;
                         }
                     }
                 }
-                else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
+                else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
                 {
                     Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
-                    if (pWalk->u.Amd64.pPde)
+                    if (pGstWalk->u.Amd64.pPde)
                     {
                         X86PDEPAE Pde;
-                        Pde.u = pWalk->u.Amd64.pPde[1].u;
-                        if (   (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
+                        Pde.u = pGstWalk->u.Amd64.pPde[1].u;
+                        if (   (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
                             && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
                         {
                             /* Get the new PTE and check out the first entry. */
                             int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
-                                                               &pWalk->u.Amd64.pPt);
+                                                               &pGstWalk->u.Amd64.pPt);
                             if (RT_SUCCESS(rc))
                             {
-                                pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
+                                pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
                                 X86PTEPAE Pte;
-                                Pte.u = pWalk->u.Amd64.pPte->u;
-                                if (   (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
+                                Pte.u = pGstWalk->u.Amd64.pPte->u;
+                                if (   (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
                                     && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
                                 {
-                                    pWalk->u.Core.GCPtr  = GCPtr;
-                                    pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
-                                    pWalk->u.Amd64.Pte.u = Pte.u;
-                                    pWalk->u.Amd64.Pde.u = Pde.u;
-                                    pWalk->u.Amd64.pPde++;
+                                    pWalk->GCPtr  = GCPtr;
+                                    pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
+                                    pGstWalk->u.Amd64.Pte.u = Pte.u;
+                                    pGstWalk->u.Amd64.Pde.u = Pde.u;
+                                    pGstWalk->u.Amd64.pPde++;
                                     return VINF_SUCCESS;
                                 }
@@ -1929,10 +1934,10 @@
                 }
             }
-            else if (!pWalk->u.Core.fGigantPage)
+            else if (!pWalk->fGigantPage)
             {
-                if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
+                if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
                 {
-                    pWalk->u.Core.GCPtr   = GCPtr;
-                    pWalk->u.Core.GCPhys += PAGE_SIZE;
+                    pWalk->GCPtr   = GCPtr;
+                    pWalk->GCPhys += PAGE_SIZE;
                     return VINF_SUCCESS;
                 }
@@ -1940,8 +1945,8 @@
             else
             {
-                if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
+                if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
                 {
-                    pWalk->u.Core.GCPtr   = GCPtr;
-                    pWalk->u.Core.GCPhys += PAGE_SIZE;
+                    pWalk->GCPtr   = GCPtr;
+                    pWalk->GCPhys += PAGE_SIZE;
                     return VINF_SUCCESS;
                 }
@@ -1950,21 +1955,5 @@
     }
     /* Case we don't handle.  Do full walk. */
-    return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
-}
-
-
-/**
- * Checks if the page is present.
- *
- * @returns true if the page is present.
- * @returns false if the page is not present.
- * @param   pVCpu       The cross context virtual CPU structure.
- * @param   GCPtr       Address within the page.
- */
-VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr)
-{
-    VMCPU_ASSERT_EMT(pVCpu);
-    int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
-    return RT_SUCCESS(rc);
+    return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
 }
 
@@ -3179,13 +3168,9 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     /* Update the guest SLAT mode if it's a nested-guest. */
-    if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
-    {
-        if (PGMMODE_WITH_PAGING(enmGuestMode))
-            pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
-        else
-            pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
-    }
+    if (   CPUMIsGuestVmxEptPagingEnabled(pVCpu)
+        && PGMMODE_WITH_PAGING(enmGuestMode))
+        pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
     else
-        Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
+        pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
 #endif
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 92426)
@@ -170,8 +170,8 @@
  *
  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
- * @param   pGstWalk        The guest page table walk result.
+ * @param   pWalk           The guest page table walk result.
  * @param   uErr            The error code.
  */
-PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PGSTPTWALK pGstWalk, RTGCUINT uErr)
+PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr)
 {
     /*
@@ -181,15 +181,15 @@
                      ? uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)
                      : uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
-    if (   pGstWalk->Core.fRsvdError
-        || pGstWalk->Core.fBadPhysAddr)
+    if (   pWalk->fRsvdError
+        || pWalk->fBadPhysAddr)
     {
         uNewErr |= X86_TRAP_PF_RSVD | X86_TRAP_PF_P;
-        Assert(!pGstWalk->Core.fNotPresent);
-    }
-    else if (!pGstWalk->Core.fNotPresent)
+        Assert(!pWalk->fNotPresent);
+    }
+    else if (!pWalk->fNotPresent)
         uNewErr |= X86_TRAP_PF_P;
     TRPMSetErrorCode(pVCpu, uNewErr);
 
-    LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", pGstWalk->Core.GCPtr, uErr, pGstWalk->Core.uLevel));
+    LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", pWalk->GCPtr, uErr, pWalk->uLevel));
     STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2GuestTrap; });
     return VINF_EM_RAW_GUEST_TRAP;
@@ -211,5 +211,6 @@
  * @param   pvFault         The fault address.
  * @param   pPage           The guest page at @a pvFault.
- * @param   pGstWalk        The guest page table walk result.
+ * @param   pWalk           The guest page table walk result.
+ * @param   pGstWalk        The guest paging-mode specific walk information.
  * @param   pfLockTaken     PGM lock taken here or not (out).  This is true
  *                          when we're called.
@@ -218,4 +219,5 @@
                                                                 RTGCPTR pvFault, PPGMPAGE pPage, bool *pfLockTaken
 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) || defined(DOXYGEN_RUNNING)
+                                                                , PPGMPTWALK pWalk
                                                                 , PGSTPTWALK pGstWalk
 # endif
@@ -234,5 +236,5 @@
          */
 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
-        const RTGCPHYS  GCPhysFault = pGstWalk->Core.GCPhys;
+        const RTGCPHYS  GCPhysFault = pWalk->GCPhys;
 # else
         const RTGCPHYS  GCPhysFault = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault);
@@ -277,6 +279,6 @@
                 && pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE
 #   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
-                && (pGstWalk->Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
-                                              == PGM_PTATTRS_W_MASK  /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */
+                && (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
+                                      == PGM_PTATTRS_W_MASK  /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */
 #   endif
                )
@@ -418,8 +420,9 @@
      * Walk the guest page translation tables and check if it's a guest fault.
      */
+    PGMPTWALK Walk;
     GSTPTWALK GstWalk;
-    rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);
+    rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &Walk, &GstWalk);
     if (RT_FAILURE_NP(rc))
-        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
+        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr));
 
     /* assert some GstWalk sanity. */
@@ -432,16 +435,16 @@
     /*AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); - ditto */
     /*AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); - ditto */
-    Assert(GstWalk.Core.fSucceeded);
+    Assert(Walk.fSucceeded);
 
     if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID))
     {
         if (    (   (uErr & X86_TRAP_PF_RW)
-                 && !(GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)
+                 && !(Walk.fEffective & PGM_PTATTRS_W_MASK)
                  && (   (uErr & X86_TRAP_PF_US)
                      || CPUMIsGuestR0WriteProtEnabled(pVCpu)) )
-            ||  ((uErr & X86_TRAP_PF_US) && !(GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK))
-            ||  ((uErr & X86_TRAP_PF_ID) && (GstWalk.Core.fEffective & PGM_PTATTRS_NX_MASK))
+            ||  ((uErr & X86_TRAP_PF_US) && !(Walk.fEffective & PGM_PTATTRS_US_MASK))
+            ||  ((uErr & X86_TRAP_PF_ID) &&  (Walk.fEffective & PGM_PTATTRS_NX_MASK))
            )
-            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
+            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr));
     }
 
@@ -468,5 +471,5 @@
     }
 #   endif
-    if (GstWalk.Core.fBigPage)
+    if (Walk.fBigPage)
     {
         Assert(GstWalk.Pde.u & X86_PDE_PS);
@@ -521,6 +524,10 @@
         Assert(GstWalk.Pte.u == GstWalk.pPte->u);
     }
+#if 0
+    /* Disabling this since it's not reliable for SMP, see @bugref{10092#c22}. */
     AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u,
               ("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u));
+#endif
+
 #  else  /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */
@@ -541,8 +548,8 @@
         PPGMPAGE pPage;
 #   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
-        rc = pgmPhysGetPageEx(pVM, GstWalk.Core.GCPhys, &pPage);
+        rc = pgmPhysGetPageEx(pVM, Walk.GCPhys, &pPage);
         if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
             return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage,
-                                                                                 pfLockTaken, &GstWalk));
+                                                                                 pfLockTaken, &Walk, &GstWalk));
         rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr);
 #   else
@@ -618,5 +625,5 @@
 #ifdef DEBUG_bird
         AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); // - triggers with smp w7 guests.
-        AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto.
+        AssertMsg(Walk.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto.
 #endif
     }
@@ -669,5 +676,5 @@
      */
 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
-    RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
+    RTGCPHYS GCPhys = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
 #  else
     RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK);
@@ -694,5 +701,5 @@
 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
         return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken,
-                                                                             &GstWalk));
+                                                                             &Walk, &GstWalk));
 # else
         return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken));
@@ -778,5 +785,5 @@
              * Check to see if we need to emulate the instruction if CR0.WP=0.
              */
-            if (    !(GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)
+            if (    !(Walk.fEffective & PGM_PTATTRS_W_MASK)
                 &&  (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG
                 &&  CPUMGetGuestCPL(pVCpu) < 3)
@@ -797,10 +804,10 @@
                  */
 #    if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && 1
-                if (   (GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK
-                    && (GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))
+                if (   (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK
+                    && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))
                     && pVM->cCpus == 1 /* Sorry, no go on SMP. Add CFGM option? */)
                 {
-                    Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, GstWalk.Core.fBigPage));
-                    rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, GstWalk.Core.fBigPage, PGM_MK_PG_IS_WRITE_FAULT);
+                    Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, Walk.fBigPage));
+                    rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, Walk.fBigPage, PGM_MK_PG_IS_WRITE_FAULT);
                     if (rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3)
                     {
@@ -817,5 +824,5 @@
                 /* Interpret the access. */
                 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault));
-                Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), GstWalk.Core.fBigPage, !!(GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK)));
+                Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), Walk.fBigPage, !!(Walk.fEffective & PGM_PTATTRS_US_MASK)));
                 if (RT_SUCCESS(rc))
                     STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eWPEmulInRZ);
@@ -855,11 +862,11 @@
 #   endif
 #   ifdef VBOX_STRICT
-                RTGCPHYS GCPhys2 = RTGCPHYS_MAX;
-                uint64_t fPageGst = UINT64_MAX;
+                PGMPTWALK GstPageWalk;
+                GstPageWalk.GCPhys = RTGCPHYS_MAX;
                 if (!pVM->pgm.s.fNestedPaging)
                 {
-                    rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2);
-                    AssertMsg(RT_SUCCESS(rc) && ((fPageGst & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst));
-                    LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst));
+                    rc = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk);
+                    AssertMsg(RT_SUCCESS(rc) && ((GstPageWalk.fEffective & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, GstPageWalk.fEffective));
+                    LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GstPageWalk.GCPhys, GstPageWalk.fEffective));
                 }
 #    if 0 /* Bogus! Triggers incorrectly with w7-64 and later for the SyncPage case: "Pde at %RGv changed behind our back?" */
@@ -867,5 +874,5 @@
                 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL);
                 AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */,
-                          ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, GCPhys2, fPageGst, pvFault));
+                          ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, GstPageWalk.GCPhys, fPageGst, pvFault));
 #    endif
 #   endif /* VBOX_STRICT */
@@ -879,6 +886,6 @@
          * mode accesses the page again.
          */
-        else if (   (GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK
-                 && (GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))
+        else if (   (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK
+                 && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))
                  &&  pVCpu->pgm.s.cNetwareWp0Hacks > 0
                  &&  (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG
@@ -909,6 +916,6 @@
         {
             /* Get guest page flags. */
-            uint64_t fPageGst;
-            int rc2 = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
+            PGMPTWALK GstPageWalk;
+            int rc2 = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk);
             if (RT_SUCCESS(rc2))
             {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 92426)
@@ -24,7 +24,7 @@
  || PGM_GST_TYPE == PGM_TYPE_PAE \
  || PGM_GST_TYPE == PGM_TYPE_AMD64
-DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
-#endif
-PGM_GST_DECL(int,  GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
+DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
+#endif
+PGM_GST_DECL(int,  GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
 PGM_GST_DECL(int,  ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
 
@@ -76,25 +76,25 @@
 
 
-DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
+DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
 {
     NOREF(iLevel); NOREF(pVCpu);
-    pWalk->Core.fNotPresent     = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fNotPresent     = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
 
-DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
+DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
 {
     AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
-    pWalk->Core.fBadPhysAddr    = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fBadPhysAddr    = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
 
-DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
+DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
 {
     NOREF(pVCpu);
-    pWalk->Core.fRsvdError      = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fRsvdError      = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
@@ -110,22 +110,37 @@
  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
  * @param   GCPtr       The guest virtual address to walk by.
- * @param   pWalk       Where to return the walk result. This is always set.
- */
-DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
+ * @param   pWalk       The common page walk information.
+ * @param   pGstWalk    The guest mode specific page walk information.
+ *
+ * @warning Callers must initialize @a pWalk and @a pGstWalk before calling this
+ *          function.
+ */
+DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
 {
     int rc;
 
 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+/** @def PGM_GST_SLAT_WALK
+ * Macro to perform guest second-level address translation (EPT or Nested).
+ *
+ * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
+ * @param   a_GCPtrNested   The nested-guest linear address that caused the
+ *                          second-level translation.
+ * @param   a_GCPhysNested  The nested-guest physical address to translate.
+ * @param   a_GCPhysOut     Where to store the guest-physical address (result).
+ */
 # define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
     do { \
         if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \
         { \
-            PGMPTWALKGST SlatWalk; \
-            int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk); \
+            PGMPTWALK    SlatWalk; \
+            PGMPTWALKGST SlatGstWalk; \
+            int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk, \
+                                           &SlatGstWalk); \
             if (RT_SUCCESS(rcX)) \
-                (a_GCPhysOut) = SlatWalk.u.Core.GCPhys; \
+                (a_GCPhysOut) = SlatWalk.GCPhys; \
             else \
             { \
-                (a_pWalk)->Core = SlatWalk.u.Core; \
+                *(a_pWalk) = SlatWalk; \
                 return rcX; \
             } \
@@ -135,8 +150,9 @@
 
     /*
-     * Init the walking structure.
+     * Init the walking structures.
      */
     RT_ZERO(*pWalk);
-    pWalk->Core.GCPtr = GCPtr;
+    RT_ZERO(*pGstWalk);
+    pWalk->GCPtr = GCPtr;
 
 # if PGM_GST_TYPE == PGM_TYPE_32BIT \
@@ -155,12 +171,12 @@
          * The PML4 table.
          */
-        rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
+        rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
 
         PX86PML4E pPml4e;
-        pWalk->pPml4e  = pPml4e  = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
+        pGstWalk->pPml4e  = pPml4e  = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
         X86PML4E  Pml4e;
-        pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
+        pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
@@ -170,6 +186,6 @@
         else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
 
-        pWalk->Core.fEffective = fEffective = Pml4e.u & (  X86_PML4E_P   | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT
-                                                         | X86_PML4E_PCD | X86_PML4E_A  | X86_PML4E_NX);
+        pWalk->fEffective = fEffective = Pml4e.u & (  X86_PML4E_P   | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT
+                                                    | X86_PML4E_PCD | X86_PML4E_A  | X86_PML4E_NX);
 
         /*
@@ -180,10 +196,10 @@
         PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
 #endif
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pWalk->pPdpt);
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
 
 # elif PGM_GST_TYPE == PGM_TYPE_PAE
-        rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt);
+        rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
@@ -193,7 +209,7 @@
 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
         PX86PDPE pPdpe;
-        pWalk->pPdpe  = pPdpe  = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
+        pGstWalk->pPdpe  = pPdpe  = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
         X86PDPE  Pdpe;
-        pWalk->Pdpe.u = Pdpe.u = pPdpe->u;
+        pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
@@ -204,11 +220,11 @@
 
 # if PGM_GST_TYPE == PGM_TYPE_AMD64
-        pWalk->Core.fEffective = fEffective &= (Pdpe.u & (  X86_PDPE_P   | X86_PDPE_RW  | X86_PDPE_US
-                                                          | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))
-                                             | (Pdpe.u & X86_PDPE_LM_NX);
+        pWalk->fEffective = fEffective &= (Pdpe.u & (  X86_PDPE_P   | X86_PDPE_RW  | X86_PDPE_US
+                                                     | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))
+                                        | (Pdpe.u & X86_PDPE_LM_NX);
 # else
         /* NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set. */
-        pWalk->Core.fEffective = fEffective  = X86_PDPE_P | X86_PDPE_RW  | X86_PDPE_US | X86_PDPE_A
-                                             | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
+        pWalk->fEffective = fEffective  = X86_PDPE_P | X86_PDPE_RW  | X86_PDPE_US | X86_PDPE_A
+                                        | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
 # endif
 
@@ -220,10 +236,10 @@
         PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
 # endif
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pWalk->pPd);
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
 
 # elif PGM_GST_TYPE == PGM_TYPE_32BIT
-        rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
+        rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
@@ -232,7 +248,7 @@
     {
         PGSTPDE pPde;
-        pWalk->pPde  = pPde  = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
+        pGstWalk->pPde  = pPde  = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
         GSTPDE  Pde;
-        pWalk->Pde.u = Pde.u = pPde->u;
+        pGstWalk->Pde.u = Pde.u = pPde->u;
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
@@ -253,10 +269,10 @@
             fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
             fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
-            pWalk->Core.fEffective = fEffective;
+            pWalk->fEffective = fEffective;
             Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
             Assert(fEffective & PGM_PTATTRS_R_MASK);
 
-            pWalk->Core.fBigPage   = true;
-            pWalk->Core.fSucceeded = true;
+            pWalk->fBigPage   = true;
+            pWalk->fSucceeded = true;
             RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
                                | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
@@ -264,6 +280,6 @@
             PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
 # endif
-            pWalk->Core.GCPhys     = GCPhysPde;
-            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
+            pWalk->GCPhys     = GCPhysPde;
+            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
             return VINF_SUCCESS;
         }
@@ -272,10 +288,10 @@
             return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-        pWalk->Core.fEffective = fEffective  = Pde.u & (  X86_PDE_P   | X86_PDE_RW  | X86_PDE_US
-                                                        | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
+        pWalk->fEffective = fEffective  = Pde.u & (  X86_PDE_P   | X86_PDE_RW  | X86_PDE_US
+                                                   | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
 # else
-        pWalk->Core.fEffective = fEffective &= (Pde.u & (  X86_PDE_P   | X86_PDE_RW  | X86_PDE_US
-                                                         | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))
-                                             | (Pde.u & X86_PDE_PAE_NX);
+        pWalk->fEffective = fEffective &= (Pde.u & (  X86_PDE_P   | X86_PDE_RW  | X86_PDE_US
+                                                    | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))
+                                        | (Pde.u & X86_PDE_PAE_NX);
 # endif
 
@@ -287,5 +303,5 @@
         PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
 # endif
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pWalk->pPt);
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
@@ -293,7 +309,7 @@
     {
         PGSTPTE pPte;
-        pWalk->pPte  = pPte  = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
+        pGstWalk->pPte  = pPte  = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
         GSTPTE  Pte;
-        pWalk->Pte.u = Pte.u = pPte->u;
+        pGstWalk->Pte.u = Pte.u = pPte->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
@@ -313,9 +329,9 @@
 # endif
         fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
-        pWalk->Core.fEffective = fEffective;
+        pWalk->fEffective = fEffective;
         Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
         Assert(fEffective & PGM_PTATTRS_R_MASK);
 
-        pWalk->Core.fSucceeded = true;
+        pWalk->fSucceeded = true;
         RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
                            | (GCPtr & PAGE_OFFSET_MASK);
@@ -323,5 +339,5 @@
         PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
 # endif
-        pWalk->Core.GCPhys     = GCPhysPte;
+        pWalk->GCPhys     = GCPhysPte;
         return VINF_SUCCESS;
     }
@@ -341,9 +357,7 @@
  * @param   pVCpu       The cross context virtual CPU structure.
  * @param   GCPtr       Guest Context virtual address of the page.
- * @param   pfFlags     Where to store the flags. These are X86_PTE_*, even for big pages.
- * @param   pGCPhys     Where to store the GC physical address of the page.
- *                      This is page aligned!
- */
-PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
+ * @param   pWalk       Where to store the page walk info.
+ */
+PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
 {
 #if PGM_GST_TYPE == PGM_TYPE_REAL \
@@ -352,8 +366,10 @@
      * Fake it.
      */
-    if (pfFlags)
-        *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
-    if (pGCPhys)
-        *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
+    RT_ZERO(*pWalk);
+    pWalk->fSucceeded = true;
+    pWalk->GCPtr      = GCPtr;
+    pWalk->GCPhys     = GCPtr & PAGE_BASE_GC_MASK;
+    pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
+    pWalk->GCPhys     = GCPtr & PAGE_BASE_GC_MASK;
     NOREF(pVCpu);
     return VINF_SUCCESS;
@@ -363,35 +379,34 @@
    || PGM_GST_TYPE == PGM_TYPE_AMD64
 
-    GSTPTWALK Walk;
-    int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
+    PGMPTWALK Walk;
+    GSTPTWALK GstWalk;
+    RT_ZERO(Walk);
+    RT_ZERO(GstWalk);
+    int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
     if (RT_FAILURE(rc))
         return rc;
 
-    if (pGCPhys)
-        *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
-
-    if (pfFlags)
+    uint64_t fFlags;
+    if (!Walk.fBigPage)
+        fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US))                      /* NX not needed */
+               | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
+# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
+               | (Walk.fEffective & PGM_PTATTRS_NX_MASK)
+# endif
+                 ;
+    else
     {
-        if (!Walk.Core.fBigPage)
-            *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US))                      /* NX not needed */
-                     | (Walk.Core.fEffective & (  PGM_PTATTRS_W_MASK
-                                                | PGM_PTATTRS_US_MASK))
+        fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS))   /* NX not needed */
+               | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
-                     | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK)
-# endif
-                     ;
-        else
-        {
-            *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS))   /* NX not needed */
-                     | (Walk.Core.fEffective & (  PGM_PTATTRS_W_MASK
-                                                | PGM_PTATTRS_US_MASK
-                                                | PGM_PTATTRS_PAT_MASK))
-# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
-                     | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK)
-# endif
-                     ;
-        }
+               | (Walk.fEffective & PGM_PTATTRS_NX_MASK)
+# endif
+               ;
     }
 
+    pWalk->fSucceeded = true;
+    pWalk->GCPtr      = GCPtr;
+    pWalk->GCPhys     = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
+    pWalk->fEffective = fFlags;
     return VINF_SUCCESS;
 
@@ -425,10 +440,11 @@
     for (;;)
     {
-        GSTPTWALK Walk;
-        int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
+        PGMPTWALK Walk;
+        GSTPTWALK GstWalk;
+        int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
         if (RT_FAILURE(rc))
             return rc;
 
-        if (!Walk.Core.fBigPage)
+        if (!Walk.fBigPage)
         {
             /*
@@ -438,10 +454,10 @@
              */
             unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
-            while (iPTE < RT_ELEMENTS(Walk.pPt->a))
+            while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
             {
-                GSTPTE Pte = Walk.pPt->a[iPTE];
+                GSTPTE Pte = GstWalk.pPt->a[iPTE];
                 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
                       | (fFlags & ~GST_PTE_PG_MASK);
-                Walk.pPt->a[iPTE] = Pte;
+                GstWalk.pPt->a[iPTE] = Pte;
 
                 /* next page */
@@ -460,11 +476,11 @@
             GSTPDE PdeNew;
 # if PGM_GST_TYPE == PGM_TYPE_32BIT
-            PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
+            PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
 # else
-            PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
+            PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
 # endif
                      | (fFlags & ~GST_PTE_PG_MASK)
                      | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
-            *Walk.pPde = PdeNew;
+            *GstWalk.pPde = PdeNew;
 
             /* advance */
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h	(revision 92426)
@@ -17,27 +17,27 @@
 
 #if PGM_GST_TYPE == PGM_TYPE_EPT
-DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
+DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
 {
     NOREF(pVCpu);
-    pWalk->Core.fNotPresent     = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fNotPresent     = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
 
 
-DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
+DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
 {
     AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
-    pWalk->Core.fBadPhysAddr    = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fBadPhysAddr    = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
 
 
-DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
+DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
 {
     NOREF(pVCpu);
-    pWalk->Core.fRsvdError      = true;
-    pWalk->Core.uLevel          = (uint8_t)iLevel;
+    pWalk->fRsvdError      = true;
+    pWalk->uLevel          = (uint8_t)iLevel;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
@@ -45,15 +45,17 @@
 
 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
-                                            PGSTPTWALK pWalk)
-{
+                                            PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
+{
+    /** @todo implement figuring out fEptMisconfig. */
     /*
-     * Init walk structure.
+     * Init walk structures.
      */
-    int rc;
     RT_ZERO(*pWalk);
-    pWalk->Core.GCPtr              = GCPtrNested;
-    pWalk->Core.GCPhysNested       = GCPhysNested;
-    pWalk->Core.fIsSlat            = true;
-    pWalk->Core.fIsLinearAddrValid = fIsLinearAddrValid;
+    RT_ZERO(*pGstWalk);
+
+    pWalk->GCPtr              = GCPtrNested;
+    pWalk->GCPhysNested       = GCPhysNested;
+    pWalk->fIsLinearAddrValid = fIsLinearAddrValid;
+    pWalk->fIsSlat            = true;
 
     /*
@@ -82,12 +84,12 @@
     uint64_t fEffective;
     {
-        rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);
+        int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
 
         PEPTPML4E pPml4e;
-        pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
+        pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
         EPTPML4E  Pml4e;
-        pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
+        pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
@@ -107,7 +109,7 @@
                    | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
                    | fEffectiveEpt;
-        pWalk->Core.fEffective = fEffective;
-
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt);
+        pWalk->fEffective = fEffective;
+
+        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
@@ -115,7 +117,7 @@
     {
         PEPTPDPTE pPdpte;
-        pWalk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
+        pGstWalk->pPdpte = pPdpte = &pGstWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
         EPTPDPTE  Pdpte;
-        pWalk->Pdpte.u = Pdpte.u = pPdpte->u;
+        pGstWalk->Pdpte.u = Pdpte.u = pPdpte->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
@@ -134,5 +136,5 @@
                         | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
                         | (fEffectiveEpt & fCumulativeEpt);
-            pWalk->Core.fEffective = fEffective;
+            pWalk->fEffective = fEffective;
         }
         else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))
@@ -151,11 +153,11 @@
             fEffective |= RT_BF_MAKE(PGM_PTATTRS_D,           fDirty)
                         | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
-            pWalk->Core.fEffective = fEffective;
-
-            pWalk->Core.fGigantPage  = true;
-            pWalk->Core.fSucceeded   = true;
-            pWalk->Core.GCPhys       = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
+            pWalk->fEffective = fEffective;
+
+            pWalk->fGigantPage  = true;
+            pWalk->fSucceeded   = true;
+            pWalk->GCPhys       = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
                                      | (GCPhysNested & GST_GIGANT_PAGE_OFFSET_MASK);
-            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
+            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
             return VINF_SUCCESS;
         }
@@ -164,9 +166,11 @@
     {
         PGSTPDE pPde;
-        pWalk->pPde  = pPde  = &pWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];
+        pGstWalk->pPde  = pPde  = &pGstWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];
         GSTPDE  Pde;
-        pWalk->Pde.u = Pde.u = pPde->u;
+        pGstWalk->Pde.u = Pde.u = pPde->u;
+
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 2);
+
         if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
         {
@@ -187,11 +191,11 @@
             fEffective |= RT_BF_MAKE(PGM_PTATTRS_D,           fDirty)
                         | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
-            pWalk->Core.fEffective = fEffective;
-
-            pWalk->Core.fBigPage     = true;
-            pWalk->Core.fSucceeded   = true;
-            pWalk->Core.GCPhys       = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
+            pWalk->fEffective = fEffective;
+
+            pWalk->fBigPage     = true;
+            pWalk->fSucceeded   = true;
+            pWalk->GCPhys       = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
                                      | (GCPhysNested & GST_BIG_PAGE_OFFSET_MASK);
-            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
+            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
             return VINF_SUCCESS;
         }
@@ -209,7 +213,7 @@
                     | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
                     | (fEffectiveEpt & fCumulativeEpt);
-        pWalk->Core.fEffective = fEffective;
-
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
+        pWalk->fEffective = fEffective;
+
+        int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pGstWalk->pPt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
@@ -217,7 +221,7 @@
     {
         PGSTPTE pPte;
-        pWalk->pPte  = pPte  = &pWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];
+        pGstWalk->pPte  = pPte  = &pGstWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];
         GSTPTE  Pte;
-        pWalk->Pte.u = Pte.u = pPte->u;
+        pGstWalk->Pte.u = Pte.u = pPte->u;
 
         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
@@ -240,9 +244,8 @@
         fEffective |= RT_BF_MAKE(PGM_PTATTRS_D,           fDirty)
                     | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
-        pWalk->Core.fEffective = fEffective;
-
-        pWalk->Core.fSucceeded   = true;
-        pWalk->Core.GCPhys       = GST_GET_PTE_GCPHYS(Pte)
-                                 | (GCPhysNested & PAGE_OFFSET_MASK);
+        pWalk->fEffective = fEffective;
+
+        pWalk->fSucceeded   = true;
+        pWalk->GCPhys       = GST_GET_PTE_GCPHYS(Pte) | (GCPhysNested & PAGE_OFFSET_MASK);
         return VINF_SUCCESS;
     }
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 92426)
@@ -2308,7 +2308,8 @@
 VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
 {
-    int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
+    PGMPTWALK Walk;
+    int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
     if (pGCPhys && RT_SUCCESS(rc))
-        *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
+        *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK);
     return rc;
 }
@@ -2327,9 +2328,9 @@
 VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
 {
-    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
-    RTGCPHYS GCPhys;
-    int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
+    PVMCC     pVM = pVCpu->CTX_SUFF(pVM);
+    PGMPTWALK Walk;
+    int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
     if (RT_SUCCESS(rc))
-        rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
+        rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
     return rc;
 }
@@ -3429,6 +3430,4 @@
 VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
 {
-    RTGCPHYS    GCPhys;
-    uint64_t    fFlags;
     int         rc;
     PVMCC       pVM = pVCpu->CTX_SUFF(pVM);
@@ -3449,10 +3448,11 @@
     {
         /* Convert virtual to physical address + flags */
-        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
+        PGMPTWALK Walk;
+        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
         AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
-        GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
+        RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
 
         /* mark the guest page as accessed. */
-        if (!(fFlags & X86_PTE_A))
+        if (!(Walk.fEffective & X86_PTE_A))
         {
             rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
@@ -3469,10 +3469,11 @@
     {
         /* Convert virtual to physical address + flags */
-        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
+        PGMPTWALK Walk;
+        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
         AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
-        GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
+        RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
 
         /* mark the guest page as accessed. */
-        if (!(fFlags & X86_PTE_A))
+        if (!(Walk.fEffective & X86_PTE_A))
         {
             rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
@@ -3520,6 +3521,4 @@
 VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
 {
-    RTGCPHYS    GCPhys;
-    uint64_t    fFlags;
     int         rc;
     PVMCC       pVM = pVCpu->CTX_SUFF(pVM);
@@ -3540,14 +3539,15 @@
     {
         /* Convert virtual to physical address + flags */
-        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
+        PGMPTWALK Walk;
+        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
         AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
-        GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
+        RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
 
         /* Mention when we ignore X86_PTE_RW... */
-        if (!(fFlags & X86_PTE_RW))
+        if (!(Walk.fEffective & X86_PTE_RW))
             Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
 
         /* Mark the guest page as accessed and dirty if necessary. */
-        if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
+        if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
         {
             rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
@@ -3564,14 +3564,15 @@
     {
         /* Convert virtual to physical address + flags */
-        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
+        PGMPTWALK Walk;
+        rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
         AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
-        GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
+        RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
 
         /* Mention when we ignore X86_PTE_RW... */
-        if (!(fFlags & X86_PTE_RW))
+        if (!(Walk.fEffective & X86_PTE_RW))
             Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
 
         /* Mark the guest page as accessed and dirty if necessary. */
-        if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
+        if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
         {
             rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllShw.h	(revision 92426)
@@ -569,18 +569,17 @@
                      *        set instead of resolving the guest physical
                      *        address yet again. */
-                    RTGCPHYS GCPhys;
-                    uint64_t fGstPte;
-                    rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
+                    PGMPTWALK GstWalk;
+                    rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk);
                     AssertRC(rc);
                     if (RT_SUCCESS(rc))
                     {
-                        Assert((fGstPte & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
-                        PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
+                        Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
+                        PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys);
                         Assert(pPage);
                         if (pPage)
                         {
-                            rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
+                            rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys);
                             AssertRCReturn(rc, rc);
-                            Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
+                            Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage));
                         }
                     }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 92426)
@@ -7193,8 +7193,8 @@
 
         /* Check if the page at the fault-address is the APIC base. */
-        RTGCPHYS GCPhysPage;
-        int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
+        PGMPTWALK Walk;
+        int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, &Walk);
         if (   rc2 == VINF_SUCCESS
-            && GCPhysPage == GCPhysApicBase)
+            && Walk.GCPhys == GCPhysApicBase)
         {
             /* Only attempt to patch the instruction once. */
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp	(revision 92426)
@@ -74,11 +74,10 @@
         {
             /** @todo inefficient to fetch each guest page like this... */
-            RTGCPHYS GCPhys;
-            uint64_t fFlags;
-            rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys);
+            PGMPTWALK Walk;
+            rc = PGMGstGetPage(pVCpu, GCPtrPage, &Walk);
             if (    rc == VINF_SUCCESS
-                &&  !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */
+                &&  !(Walk.fEffective & X86_PTE_RW)) /* important as we make assumptions about this below! */
             {
-                PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
+                PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
                 Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage));
                 if (    pPage
@@ -89,5 +88,5 @@
                     PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage);
                     PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
-                    PageDesc.GCPhys = GCPhys;
+                    PageDesc.GCPhys = Walk.GCPhys;
 
                     rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc);
Index: /trunk/src/VBox/VMM/VMMR3/DBGFAddr.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/DBGFAddr.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMR3/DBGFAddr.cpp	(revision 92426)
@@ -231,5 +231,8 @@
     VMCPU_ASSERT_EMT(pVCpu);
     /* This is just a wrapper because we cannot pass FlatPtr thru VMR3ReqCall directly. */
-    return PGMGstGetPage(pVCpu, pAddress->FlatPtr, NULL, pGCPhys);
+    PGMPTWALK Walk;
+    int const rc = PGMGstGetPage(pVCpu, pAddress->FlatPtr, &Walk);
+    *pGCPhys = Walk.GCPhys;
+    return rc;
 }
 
Index: /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp	(revision 92426)
@@ -869,6 +869,6 @@
     RTGCPHYS        GCPhysPrev              = NIL_RTGCPHYS;
     bool            fFullWalk               = true;
-    PGMPTWALKGST    Walk;
-    RT_ZERO(Walk);
+    PGMPTWALK       Walk;
+    PGMPTWALKGST    WalkGst;
 
     PGM_LOCK_VOID(pVM);
@@ -877,16 +877,16 @@
         int rc;
         if (fFullWalk)
-            rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk);
+            rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk, &WalkGst);
         else
-            rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk);
-        if (RT_SUCCESS(rc) && Walk.u.Core.fSucceeded)
+            rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk, &WalkGst);
+        if (RT_SUCCESS(rc) && Walk.fSucceeded)
         {
             fFullWalk = false;
 
             /* Skip if same page as previous one (W10 optimization). */
-            if (   Walk.u.Core.GCPhys != GCPhysPrev
+            if (   Walk.GCPhys != GCPhysPrev
                 || cbPrev != 0)
             {
-                PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.u.Core.GCPhys);
+                PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
                 if (   pPage
                     && (   !PGM_PAGE_IS_ZERO(pPage)
@@ -895,8 +895,8 @@
                     && !PGM_PAGE_IS_BALLOONED(pPage))
                 {
-                    GCPhysPrev = Walk.u.Core.GCPhys;
+                    GCPhysPrev = Walk.GCPhys;
                     void const *pvPage;
                     PGMPAGEMAPLOCK Lock;
-                    rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.u.Core.GCPhys, &pvPage, &Lock);
+                    rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.GCPhys, &pvPage, &Lock);
                     if (RT_SUCCESS(rc))
                     {
@@ -933,6 +933,6 @@
         else
         {
-            Assert(Walk.enmType != PGMPTWALKGSTTYPE_INVALID);
-            Assert(!Walk.u.Core.fSucceeded);
+            Assert(WalkGst.enmType != PGMPTWALKGSTTYPE_INVALID);
+            Assert(!Walk.fSucceeded);
             cbPrev = 0; /* ignore error. */
 
@@ -942,5 +942,5 @@
              */
             uint64_t cPagesCanSkip;
-            switch (Walk.u.Core.uLevel)
+            switch (Walk.uLevel)
             {
                 case 1:
@@ -949,5 +949,5 @@
                     break;
                 case 2:
-                    if (Walk.enmType == PGMPTWALKGSTTYPE_32BIT)
+                    if (WalkGst.enmType == PGMPTWALKGSTTYPE_32BIT)
                     {
                         cPagesCanSkip = X86_PG_ENTRIES     - ((GCPtr >> X86_PT_SHIFT)     & X86_PT_MASK);
@@ -977,5 +977,5 @@
                     break;
                 default:
-                    AssertMsgFailed(("%d\n", Walk.u.Core.uLevel));
+                    AssertMsgFailed(("%d\n", Walk.uLevel));
                     cPagesCanSkip = 0;
                     break;
Index: /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp	(revision 92425)
+++ /trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp	(revision 92426)
@@ -288,19 +288,18 @@
 {
     /* Debug only API for the page fusion testcase. */
-    RTGCPHYS GCPhys;
-    uint64_t fFlags;
+    PGMPTWALK Walk;
 
     PGM_LOCK_VOID(pVM);
 
-    int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &fFlags, &GCPhys);
+    int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &Walk);
     switch (rc)
     {
         case VINF_SUCCESS:
         {
-            PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
+            PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys);
             if (pPage)
             {
                 *pfShared    = PGM_PAGE_IS_SHARED(pPage);
-                *pfPageFlags = fFlags;
+                *pfPageFlags = Walk.fEffective;
             }
             else
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 92425)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 92426)
@@ -37,5 +37,4 @@
 #include <VBox/vmm/gmm.h>
 #include <VBox/vmm/hm.h>
-#include <VBox/vmm/hm_vmx.h>
 #include <iprt/asm.h>
 #include <iprt/assert.h>
@@ -2331,219 +2330,4 @@
 
 
-/** @name PGMPTATTRS
- *
- * PGM page-table attributes.
- *
- * This is VirtualBox's combined page table attributes. It combines regular page
- * table and Intel EPT attributes. It's 64-bit in size so there's ample room for
- * bits added in the future to EPT or regular page tables (for e.g. Protection Key).
- *
- * The following bits map 1:1 (shifted by PGM_PTATTRS_EPT_SHIFT) to the Intel EPT
- * attributes as these are unique to EPT and fit within 64-bits despite the shift:
- *   - EPT_R         : Read access.
- *   - EPT_W         : Write access.
- *   - EPT_X_SUPER   : Execute or execute for supervisor-mode linear addr access.
- *   - EPT_MEMTYPE   : EPT memory type.
- *   - EPT_IGNORE_PAT: Ignore PAT memory type.
- *   - EPT_X_USER    : Execute access for user-mode linear addresses.
- *
- * For regular page tables, the R bit is always 1 (same as P bit).
- * For Intel EPT, the EPT_R and EPT_W bits are copied to R and W bits respectively.
- *
- * The following EPT attributes are mapped to the following positions because they
- * exist in the regular page tables at these positions OR are exclusive to EPT and
- * have been mapped to arbitrarily chosen positions:
- *   - EPT_A               : Accessed                (EPT bit  8 maps to bit  5).
- *   - EPT_D               : Dirty                   (EPT bit  9 maps to bit  6).
- *   - EPT_SUPER_SHW_STACK : Supervisor Shadow Stack (EPT bit 60 maps to bit 24).
- *   - EPT_SUPPRESS_VE_XCPT: Suppress \#VE exception (EPT bit 63 maps to bit 25).
- *
- * Bits 12, 11:9 and 43 are deliberately kept unused (correspond to bit PS and bits
- * 11:9 in the regular page-table structures and to bit 11 in the EPT structures
- * respectively) as bit 12 is the page-size bit and bits 11:9 are reserved for
- * use by software and we may want to use/preserve them in the future.
- *
- * @{ */
-typedef uint64_t PGMPTATTRS;
-/** Pointer to a PGMPTATTRS type. */
-typedef PGMPTATTRS *PPGMPTATTRS;
-
-/** Read bit (always 1 for regular PT, copy of EPT_R for EPT). */
-#define PGM_PTATTRS_R_SHIFT                         0
-#define PGM_PTATTRS_R_MASK                          RT_BIT_64(PGM_PTATTRS_R_SHIFT)
-/** Write access bit (aka read/write bit for regular PT). */
-#define PGM_PTATTRS_W_SHIFT                         1
-#define PGM_PTATTRS_W_MASK                          RT_BIT_64(PGM_PTATTRS_W_SHIFT)
-/** User-mode access bit. */
-#define PGM_PTATTRS_US_SHIFT                        2
-#define PGM_PTATTRS_US_MASK                         RT_BIT_64(PGM_PTATTRS_US_SHIFT)
-/** Write through cache bit. */
-#define PGM_PTATTRS_PWT_SHIFT                       3
-#define PGM_PTATTRS_PWT_MASK                        RT_BIT_64(PGM_PTATTRS_PWT_SHIFT)
-/** Cache disabled bit. */
-#define PGM_PTATTRS_PCD_SHIFT                       4
-#define PGM_PTATTRS_PCD_MASK                        RT_BIT_64(PGM_PTATTRS_PCD_SHIFT)
-/** Accessed bit. */
-#define PGM_PTATTRS_A_SHIFT                         5
-#define PGM_PTATTRS_A_MASK                          RT_BIT_64(PGM_PTATTRS_A_SHIFT)
-/** Dirty bit. */
-#define PGM_PTATTRS_D_SHIFT                         6
-#define PGM_PTATTRS_D_MASK                          RT_BIT_64(PGM_PTATTRS_D_SHIFT)
-/** The PAT bit. */
-#define PGM_PTATTRS_PAT_SHIFT                       7
-#define PGM_PTATTRS_PAT_MASK                        RT_BIT_64(PGM_PTATTRS_PAT_SHIFT)
-/** The global bit. */
-#define PGM_PTATTRS_G_SHIFT                         8
-#define PGM_PTATTRS_G_MASK                          RT_BIT_64(PGM_PTATTRS_G_SHIFT)
-/** Reserved (bits 12:9) unused. */
-#define PGM_PTATTRS_RSVD_12_9_SHIFT                 9
-#define PGM_PTATTRS_RSVD_12_9_MASK                  UINT64_C(0x0000000000001e00)
-/** Read access bit - EPT only. */
-#define PGM_PTATTRS_EPT_R_SHIFT                     13
-#define PGM_PTATTRS_EPT_R_MASK                      RT_BIT_64(PGM_PTATTRS_EPT_R_SHIFT)
-/** Write access bit - EPT only. */
-#define PGM_PTATTRS_EPT_W_SHIFT                     14
-#define PGM_PTATTRS_EPT_W_MASK                      RT_BIT_64(PGM_PTATTRS_EPT_W_SHIFT)
-/** Execute or execute access for supervisor-mode linear addresses - EPT only. */
-#define PGM_PTATTRS_EPT_X_SUPER_SHIFT               15
-#define PGM_PTATTRS_EPT_X_SUPER_MASK                RT_BIT_64(PGM_PTATTRS_EPT_X_SUPER_SHIFT)
-/** EPT memory type - EPT only. */
-#define PGM_PTATTRS_EPT_MEMTYPE_SHIFT               16
-#define PGM_PTATTRS_EPT_MEMTYPE_MASK                UINT64_C(0x0000000000070000)
-/** Ignore PAT memory type - EPT only. */
-#define PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT            19
-#define PGM_PTATTRS_EPT_IGNORE_PAT_MASK             RT_BIT_64(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT)
-/** Reserved (bits 22:20) unused. */
-#define PGM_PTATTRS_RSVD_22_20_SHIFT                20
-#define PGM_PTATTRS_RSVD_22_20_MASK                 UINT64_C(0x0000000000700000)
-/** Execute access for user-mode linear addresses - EPT only. */
-#define PGM_PTATTRS_EPT_X_USER_SHIFT                23
-#define PGM_PTATTRS_EPT_X_USER_MASK                 RT_BIT_64(PGM_PTATTRS_EPT_X_USER_SHIFT)
-/** Reserved (bit 23) - unused. */
-#define PGM_PTATTRS_RSVD_23_SHIFT                   24
-#define PGM_PTATTRS_RSVD_23_MASK                    UINT64_C(0x0000000001000000)
-/** Supervisor shadow stack - EPT only. */
-#define PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT       25
-#define PGM_PTATTRS_EPT_SUPER_SHW_STACK_MASK        RT_BIT_64(PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT)
-/** Suppress \#VE exception - EPT only. */
-#define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT      26
-#define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_MASK       RT_BIT_64(PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT)
-/** Reserved (bits 62:27) - unused. */
-#define PGM_PTATTRS_RSVD_62_27_SHIFT                27
-#define PGM_PTATTRS_RSVD_62_27_MASK                 UINT64_C(0x7ffffffff8000000)
-/** No-execute bit. */
-#define PGM_PTATTRS_NX_SHIFT                        63
-#define PGM_PTATTRS_NX_MASK                         RT_BIT_64(PGM_PTATTRS_NX_SHIFT)
-
-RT_BF_ASSERT_COMPILE_CHECKS(PGM_PTATTRS_, UINT64_C(0), UINT64_MAX,
-                            (R, W, US, PWT, PCD, A, D, PAT, G, RSVD_12_9, EPT_R, EPT_W, EPT_X_SUPER, EPT_MEMTYPE, EPT_IGNORE_PAT,
-                             RSVD_22_20, EPT_X_USER, RSVD_23, EPT_SUPER_SHW_STACK, EPT_SUPPRESS_VE_XCPT, RSVD_62_27, NX));
-
-/** The bit position where the EPT specific attributes begin. */
-#define PGM_PTATTRS_EPT_SHIFT                       PGM_PTATTRS_EPT_R_SHIFT
-/** The mask of EPT bits (bits 26:ATTR_SHIFT). In the future we might choose to
- *  use higher unused bits for something else, in that case adjust this mask. */
-#define PGM_PTATTRS_EPT_MASK                        UINT64_C(0x0000000007ffe000)
-
-/** The mask of all PGM page attribute bits for regular page-tables. */
-#define PGM_PTATTRS_PT_VALID_MASK                   (  PGM_PTATTRS_R_MASK \
-                                                     | PGM_PTATTRS_W_MASK \
-                                                     | PGM_PTATTRS_US_MASK \
-                                                     | PGM_PTATTRS_PWT_MASK \
-                                                     | PGM_PTATTRS_PCD_MASK \
-                                                     | PGM_PTATTRS_A_MASK \
-                                                     | PGM_PTATTRS_D_MASK \
-                                                     | PGM_PTATTRS_PAT_MASK \
-                                                     | PGM_PTATTRS_G_MASK \
-                                                     | PGM_PTATTRS_NX_MASK)
-
-/** The mask of all PGM page attribute bits for EPT. */
-#define PGM_PTATTRS_EPT_VALID_MASK                  (  PGM_PTATTRS_R_MASK \
-                                                     | PGM_PTATTRS_W_MASK \
-                                                     | PGM_PTATTRS_A_MASK \
-                                                     | PGM_PTATTRS_D_MASK \
-                                                     | PGM_PTATTRS_EPT_R_MASK \
-                                                     | PGM_PTATTRS_EPT_W_MASK \
-                                                     | PGM_PTATTRS_EPT_X_SUPER \
-                                                     | PGM_PTATTRS_EPT_MEMTYPE \
-                                                     | PGM_PTATTRS_EPT_IGNORE_PAT \
-                                                     | PGM_PTATTRS_EPT_X_USER \
-                                                     | PGM_PTATTRS_EPT_SUPER_SHW_STACK \
-                                                     | PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT)
-
-/* The mask of all PGM page attribute bits (combined). */
-#define PGM_PTATTRS_VALID_MASK                      (PGM_PTATTRS_PT_VALID_MASK | PGM_PTATTRS_PT_VALID_MASK)
-
-/* Verify bits match the regular PT bits. */
-AssertCompile(PGM_PTATTRS_W_SHIFT   == X86_PTE_BIT_RW);
-AssertCompile(PGM_PTATTRS_US_SHIFT  == X86_PTE_BIT_US);
-AssertCompile(PGM_PTATTRS_PWT_SHIFT == X86_PTE_BIT_PWT);
-AssertCompile(PGM_PTATTRS_PCD_SHIFT == X86_PTE_BIT_PCD);
-AssertCompile(PGM_PTATTRS_A_SHIFT   == X86_PTE_BIT_A);
-AssertCompile(PGM_PTATTRS_D_SHIFT   == X86_PTE_BIT_D);
-AssertCompile(PGM_PTATTRS_PAT_SHIFT == X86_PTE_BIT_PAT);
-AssertCompile(PGM_PTATTRS_G_SHIFT   == X86_PTE_BIT_G);
-AssertCompile(PGM_PTATTRS_W_MASK    == X86_PTE_RW);
-AssertCompile(PGM_PTATTRS_US_MASK   == X86_PTE_US);
-AssertCompile(PGM_PTATTRS_PWT_MASK  == X86_PTE_PWT);
-AssertCompile(PGM_PTATTRS_PCD_MASK  == X86_PTE_PCD);
-AssertCompile(PGM_PTATTRS_A_MASK    == X86_PTE_A);
-AssertCompile(PGM_PTATTRS_D_MASK    == X86_PTE_D);
-AssertCompile(PGM_PTATTRS_PAT_MASK  == X86_PTE_PAT);
-AssertCompile(PGM_PTATTRS_G_MASK    == X86_PTE_G);
-
-/* Verify those EPT bits that must map 1:1 (after shifting). */
-AssertCompile(PGM_PTATTRS_EPT_R_SHIFT          - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_READ);
-AssertCompile(PGM_PTATTRS_EPT_W_SHIFT          - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_WRITE);
-AssertCompile(PGM_PTATTRS_EPT_X_SUPER_SHIFT    - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_EXECUTE);
-AssertCompile(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_IGNORE_PAT);
-AssertCompile(PGM_PTATTRS_EPT_X_USER_SHIFT     - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_USER_EXECUTE);
-/** @} */
-
-
-/**
- * Page fault guest state for the AMD64 paging mode.
- */
-typedef struct PGMPTWALKCORE
-{
-    /** The guest virtual address that is being resolved by the walk
-     *  (input). */
-    RTGCPTR         GCPtr;
-
-    /** The nested-guest physical address that is being resolved if this is a
-     *  second-level walk (input).
-     *  @remarks only valid if fIsSlat is set. */
-    RTGCPHYS        GCPhysNested;
-
-    /** The guest physical address that is the result of the walk.
-     * @remarks only valid if fSucceeded is set. */
-    RTGCPHYS        GCPhys;
-
-    /** Set if the walk succeeded, i.d. GCPhys is valid. */
-    bool            fSucceeded;
-    /** Whether this is a second-level translation. */
-    bool            fIsSlat;
-    /** Whether the linear address (GCPtr) is valid and thus the cause for the
-     *  second-level translation. */
-    bool            fIsLinearAddrValid;
-    /** The level problem arrised at.
-     * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
-     * level 8.  This is 0 on success. */
-    uint8_t         uLevel;
-    /** Set if the page isn't present. */
-    bool            fNotPresent;
-    /** Encountered a bad physical address. */
-    bool            fBadPhysAddr;
-    /** Set if there was reserved bit violations. */
-    bool            fRsvdError;
-    /** Set if it involves a big page (2/4 MB). */
-    bool            fBigPage;
-    /** Set if it involves a gigantic page (1 GB). */
-    bool            fGigantPage;
-    bool            afPadding[7];
-    /** The effective attributes, PGM_PTATTRS_XXX. */
-    PGMPTATTRS      fEffective;
-} PGMPTWALKCORE;
-
 /**
  * Guest page table walk for the AMD64 mode.
@@ -2551,7 +2335,4 @@
 typedef struct PGMPTWALKGSTAMD64
 {
-    /** The common core. */
-    PGMPTWALKCORE   Core;
-
     PX86PML4        pPml4;
     PX86PML4E       pPml4e;
@@ -2580,7 +2361,4 @@
 typedef struct PGMPTWALKGSTEPT
 {
-    /** The common core. */
-    PGMPTWALKCORE   Core;
-
     PEPTPML4        pPml4;
     PEPTPML4E       pPml4e;
@@ -2609,7 +2387,4 @@
 typedef struct PGMPTWALKGSTPAE
 {
-    /** The common core. */
-    PGMPTWALKCORE   Core;
-
     PX86PDPT        pPdpt;
     PX86PDPE        pPdpe;
@@ -2634,7 +2409,4 @@
 typedef struct PGMPTWALKGST32BIT
 {
-    /** The common core. */
-    PGMPTWALKCORE   Core;
-
     PX86PD          pPd;
     PX86PDE         pPde;
@@ -2676,6 +2448,4 @@
     union
     {
-        /** The page walker core - always valid. */
-        PGMPTWALKCORE       Core;
         /** The page walker for AMD64. */
         PGMPTWALKGSTAMD64   Amd64;
@@ -2866,5 +2636,5 @@
     /** The guest mode type. */
     uint32_t                        uType;
-    DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
+    DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk));
     DECLCALLBACKMEMBER(int, pfnModifyPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
     DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
@@ -3915,6 +3685,6 @@
 int             pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppPml4);
 #endif
-int             pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk);
-int             pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk);
+int             pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
+int             pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
 
 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 && defined(IN_RING3)
