Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h	(revision 92479)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h	(revision 92480)
@@ -17,4 +17,32 @@
 
 #if PGM_GST_TYPE == PGM_TYPE_EPT
+DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(PCVMCPUCC pVCpu, uint64_t uEntry)
+{
+    if (!(uEntry & VMX_BF_EPT_PT_READ_MASK))
+    {
+        if (uEntry & VMX_BF_EPT_PT_WRITE_MASK)
+            return false;
+
+        Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
+        if (   !RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY)
+            && (uEntry & VMX_BF_EPT_PT_EXECUTE_MASK))
+            return false;
+    }
+    return true;
+}
+
+
+DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(uint64_t uEntry, uint8_t uLevel)
+{
+    Assert(uLevel >= 3 && uLevel <= 1); NOREF(uLevel);
+    uint64_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
+    if (   fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_2
+        || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_3
+        || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_7)
+        return false;
+    return true;
+}
+
+
 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint64_t uEntry, uint8_t uLevel)
 {
@@ -43,6 +71,7 @@
 {
     NOREF(pVCpu);
-    pWalk->fRsvdError = true;
-    pWalk->uLevel     = uLevel;
+    pWalk->fRsvdError  = true;
+    pWalk->uLevel      = uLevel;
+    pWalk->enmSlatFail = PGMSLATFAIL_EPT_MISCONFIG;
     return VERR_PAGE_TABLE_NOT_PRESENT;
 }
@@ -52,5 +81,4 @@
                                             PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
 {
-    /** @todo implement figuring out fEptMisconfig. */
     /*
      * Init walk structures.
@@ -87,10 +115,15 @@
      * Do the walk.
      */
+    int const rc2 = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4);
+    if (RT_SUCCESS(rc2))
+    { /* likely */ }
+    else
+        return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc2);
+
     uint64_t fEffective;
     {
-        int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4);
-        if (RT_SUCCESS(rc)) { /* probable */ }
-        else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
-
+        /*
+         * PML4E.
+         */
         PEPTPML4E pPml4e;
         pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
@@ -116,9 +149,12 @@
         pWalk->fEffective = fEffective;
 
-        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt);
+        int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt);
         if (RT_SUCCESS(rc)) { /* probable */ }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
     }
     {
+        /*
+         * PDPTE.
+         */
         PEPTPDPTE pPdpte;
         pGstWalk->pPdpte = pPdpte = &pGstWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
@@ -143,5 +179,6 @@
             pWalk->fEffective = fEffective;
         }
-        else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))
+        else if (   GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte)
+                 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pdpte.u, 3))
         {
             uint64_t const fEptAttrs     = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
@@ -168,6 +205,13 @@
         }
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 3);
-    }
-    {
+
+        int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpte.u & EPT_PDPTE_PG_MASK, &pGstWalk->pPd);
+        if (RT_SUCCESS(rc)) { /* probable */ }
+        else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
+    }
+    {
+        /*
+         * PDE.
+         */
         PGSTPDE pPde;
         pGstWalk->pPde  = pPde  = &pGstWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];
@@ -178,9 +222,22 @@
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pde.u, 2);
 
-        if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
+        /* The order of the following 2 "if" statements matter. */
+        if (GST_IS_PDE_VALID(pVCpu, Pde))
         {
-            if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
-            else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
-
+            uint64_t const fEptAttrs     = Pde.u & EPT_PDE_ATTR_MASK;
+            uint8_t const fRead          = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
+            uint8_t const fWrite         = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
+            uint8_t const fAccessed      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
+            uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
+            fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
+                        | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
+                        | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
+                        | (fEffectiveEpt & fCumulativeEpt);
+            pWalk->fEffective = fEffective;
+
+        }
+        else if (   GST_IS_BIG_PDE_VALID(pVCpu, Pde)
+                 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pde.u, 2))
+        {
             uint64_t const fEptAttrs     = Pde.u & EPT_PDE2M_ATTR_MASK;
             uint8_t const fRead          = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
@@ -205,18 +262,5 @@
             return VINF_SUCCESS;
         }
-
-        if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
-            return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
-
-        uint64_t const fEptAttrs     = Pde.u & EPT_PDE_ATTR_MASK;
-        uint8_t const fRead          = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
-        uint8_t const fWrite         = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
-        uint8_t const fAccessed      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
-        uint64_t const fEffectiveEpt = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & PGM_PTATTRS_EPT_MASK;
-        fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
-                    | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
-                    | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
-                    | (fEffectiveEpt & fCumulativeEpt);
-        pWalk->fEffective = fEffective;
+        else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
 
         int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pGstWalk->pPt);
@@ -225,4 +269,7 @@
     }
     {
+        /*
+         * PTE.
+         */
         PGSTPTE pPte;
         pGstWalk->pPte  = pPte  = &pGstWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];
@@ -233,6 +280,9 @@
         else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pte.u, 1);
 
-        if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
-        else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
+        if (   GST_IS_PTE_VALID(pVCpu, Pte)
+            && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pte.u, 1))
+        { /* likely*/  }
+        else
+            return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
 
         uint64_t const fEptAttrs     = Pte.u & EPT_PTE_ATTR_MASK;
Index: /trunk/src/VBox/VMM/VMMR3/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 92479)
+++ /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 92480)
@@ -1621,4 +1621,5 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
+        pVCpu->pgm.s.uEptVpidCapMsr           = fEptVpidCap;
         pVCpu->pgm.s.fGstEptMbzPteMask        = fMbzPageFrameMask | EPT_PTE_MBZ_MASK;
         pVCpu->pgm.s.fGstEptMbzPdeMask        = fMbzPageFrameMask | EPT_PDE_MBZ_MASK;
@@ -1632,11 +1633,5 @@
                && !pVM->cpum.ro.GuestFeatures.fVmxSppEpt
                && !pVM->cpum.ro.GuestFeatures.fVmxEptXcptVe);
-        pVCpu->pgm.s.fGstEptPresentMask         = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE;
-        pVCpu->pgm.s.fGstEptShadowedPml4eMask   = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
-        pVCpu->pgm.s.fGstEptShadowedPdpeMask    = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
-        pVCpu->pgm.s.fGstEptShadowedBigPdpeMask = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
-        pVCpu->pgm.s.fGstEptShadowedPdeMask     = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
-        pVCpu->pgm.s.fGstEptShadowedBigPdeMask  = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
-        pVCpu->pgm.s.fGstEptShadowedPteMask     = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
+        pVCpu->pgm.s.fGstEptPresentMask       = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE;
 #endif
     }
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 92479)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 92480)
@@ -3436,4 +3436,7 @@
     /** The guest's EPT pointer (copy of virtual VMCS). */
     uint64_t                        uEptPtr;
+    /** Copy of the VM's IA32_VMX_EPT_VPID_CAP VPID MSR for faster access. Doesn't
+     *  change through the lifetime of the VM. */
+    uint64_t                        uEptVpidCapMsr;
     /** Mask containing the MBZ PTE bits. */
     uint64_t                        fGstEptMbzPteMask;
@@ -3450,16 +3453,4 @@
     /** Mask to determine whether an entry is present. */
     uint64_t                        fGstEptPresentMask;
-    /** Mask containing the PML4E bits that we shadow. */
-    uint64_t                        fGstEptShadowedPml4eMask;
-    /** Mask containing the PDPE bits that we shadow. */
-    uint64_t                        fGstEptShadowedPdpeMask;
-    /** Mask containing the big page PDPE bits that we shadow. */
-    uint64_t                        fGstEptShadowedBigPdpeMask;
-    /** Mask containing the PDE bits that we shadow. */
-    uint64_t                        fGstEptShadowedPdeMask;
-    /** Mask containing the big page PDE bits that we shadow. */
-    uint64_t                        fGstEptShadowedBigPdeMask;
-    /** Mask containing the PTE bits that we shadow. */
-    uint64_t                        fGstEptShadowedPteMask;
     /** @} */
 
