Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 71754)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 71755)
@@ -1370,5 +1370,6 @@
 #ifndef IN_RC
 /**
- * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
+ * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
+ * active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1389,5 +1390,5 @@
 
 /**
- * Checks if the guest VMCB has the specified CR read intercept active.
+ * Checks if the nested-guest VMCB has the specified CR read intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1408,5 +1409,5 @@
 
 /**
- * Checks if the guest VMCB has the specified CR write intercept active.
+ * Checks if the nested-guest VMCB has the specified CR write intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1427,5 +1428,5 @@
 
 /**
- * Checks if the guest VMCB has the specified DR read intercept active.
+ * Checks if the nested-guest VMCB has the specified DR read intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1446,5 +1447,5 @@
 
 /**
- * Checks if the guest VMCB has the specified DR write intercept active.
+ * Checks if the nested-guest VMCB has the specified DR write intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1465,5 +1466,5 @@
 
 /**
- * Checks if the guest VMCB has the specified exception intercept active.
+ * Checks if the nested-guest VMCB has the specified exception intercept active.
  *
  * @returns @c true if in intercept is active, @c false otherwise.
@@ -1484,5 +1485,5 @@
 
 /**
- * Checks if the guest VMCB has virtual-interrupt masking enabled.
+ * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
  *
  * @returns @c true if virtual-interrupts are masked, @c false otherwise.
@@ -1499,4 +1500,22 @@
         return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
     return HMIsGuestSvmVirtIntrMasking(pVCpu, pCtx);
+}
+
+/**
+ * Checks if the nested-guest VMCB has nested-paging enabled.
+ *
+ * @returns @c true if nested-paging is enabled, @c false otherwise.
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   pCtx        Pointer to the context.
+ *
+ * @remarks Should only be called when SVM feature is exposed to the guest.
+ */
+DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    Assert(pVmcb);
+    if (!pCtx->hwvirt.svm.fHMCachedVmcb)
+        return pVmcb->ctrl.NestedPaging.n.u1NestedPaging;
+    return HMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx);
 }
 
@@ -1702,5 +1721,5 @@
                                                     uint64_t *puValidEfer);
 VMMDECL(void)           CPUMSetGuestMsrEferNoCheck(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
-
+VMMDECL(bool)           CPUMIsPatMsrValid(uint64_t uValue);
 
 /** @name Typical scalable bus frequency values.
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 71754)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 71755)
@@ -903,6 +903,6 @@
     /** Offset 0x648-0x667 - Reserved. */
     uint8_t     u8Reserved9[0x668 - 0x648];
-    /** Offset 0x668 - G_PAT. */
-    uint64_t    u64GPAT;
+    /** Offset 0x668 - PAT (Page Attribute Table) MSR. */
+    uint64_t    u64PAT;
     /** Offset 0x670 - DBGCTL. */
     uint64_t    u64DBGCTL;
@@ -958,5 +958,5 @@
 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR2,          0x640 - 0x400);
 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved9,     0x648 - 0x400);
-AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64GPAT,         0x668 - 0x400);
+AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64PAT,          0x668 - 0x400);
 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DBGCTL,       0x670 - 0x400);
 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_FROM,      0x678 - 0x400);
@@ -1051,4 +1051,6 @@
     /** Cache of DBGCTL. */
     uint64_t            u64DBGCTL;
+    /** Cache of the PAT MSR. */
+    uint64_t            u64PAT;
     /** @} */
 
@@ -1138,4 +1140,5 @@
 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector);
 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
 VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
 VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 71754)
+++ /trunk/include/iprt/x86.h	(revision 71755)
@@ -1236,4 +1236,7 @@
 /** Page Attribute Table. */
 #define MSR_IA32_CR_PAT                     0x277
+/** Default PAT MSR value on processor powerup / reset (see Intel spec. 11.12.4
+ *  "Programming the PAT", AMD spec. 7.8.2 "PAT Indexing") */
+#define MSR_IA32_CR_PAT_INIT_VAL            UINT64_C(0x0007040600070406)
 
 /** Performance counter MSRs. (Intel only) */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 71755)
@@ -551,19 +551,10 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
-
-    for (uint32_t cShift = 0; cShift < 63; cShift += 8)
+    if (CPUMIsPatMsrValid(uValue))
     {
-        /* Check all eight bits because the top 5 bits of each byte are reserved. */
-        uint8_t uType = (uint8_t)(uValue >> cShift);
-        if ((uType >= 8) || (uType == 2) || (uType == 3))
-        {
-            Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n",
-                 cShift + 7, cShift, uValue, uType));
-            return VERR_CPUM_RAISE_GP_0;
-        }
+        pVCpu->cpum.s.Guest.msrPAT = uValue;
+        return VINF_SUCCESS;
     }
-
-    pVCpu->cpum.s.Guest.msrPAT = uValue;
-    return VINF_SUCCESS;
+    return VERR_CPUM_RAISE_GP_0;
 }
 
@@ -6209,4 +6200,26 @@
 
 /**
+ * Checks if a guest PAT MSR write is valid.
+ *
+ * @returns @c true if the PAT bit combination is valid, @c false otherwise.
+ * @param   uValue      The PAT MSR value.
+ */
+VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue)
+{
+    for (uint32_t cShift = 0; cShift < 63; cShift += 8)
+    {
+        /* Check all eight bits because the top 5 bits of each byte are reserved. */
+        uint8_t uType = (uint8_t)(uValue >> cShift);
+        if ((uType >= 8) || (uType == 2) || (uType == 3))
+        {
+            Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", cShift + 7, cShift, uValue, uType));
+            return false;
+        }
+    }
+    return true;
+}
+
+
+/**
  * Validates an EFER MSR write.
  *
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 71755)
@@ -132,6 +132,6 @@
  * in IEM).
  *
- * @param   pVCpu           The cross context virtual CPU structure.
- * @param   pCtx            Pointer to the guest-CPU context.
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @param   pCtx    Pointer to the guest-CPU context.
  *
  * @sa      hmR0SvmVmRunCacheVmcb.
@@ -169,4 +169,14 @@
         pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking      = pNstGstVmcbCache->fVIntrMasking;
         pVmcbNstGstCtrl->TLBCtrl                       = pNstGstVmcbCache->TLBCtrl;
+
+        /*
+         * If the nested-hypervisor isn't using nested-paging (and thus shadow paging
+         * is used by HM), we restore the original PAT MSR from the nested-guest VMCB.
+         * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by
+         * hardware-assisted SVM or by IEM.
+         */
+        if (!pNstGstVmcbCache->u1NestedPaging)
+            pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT;
+
         pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging;
         pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt           = pNstGstVmcbCache->u1LbrVirt;
@@ -177,9 +187,10 @@
      * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full
      * CPU state change. However, if we exit to ring-3 in response to receiving a physical
-     * interrupt, we skip signaling any CPU state change as normally no change
-     * is done to the execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
-     * However, with nested-guests, the state can change for e.g., we might perform a
-     * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU
-     * state change here.
+     * interrupt, we skip signaling any CPU state change as normally no change is done to the
+     * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
+     *
+     * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a
+     * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state
+     * change here.
      */
     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
@@ -439,5 +450,6 @@
 
 /**
- * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
+ * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
+ * active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -456,5 +468,5 @@
 
 /**
- * Checks if the guest VMCB has the specified CR read intercept active.
+ * Checks if the nested-guest VMCB has the specified CR read intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -473,6 +485,5 @@
 
 /**
- * Checks if the guest VMCB has the specified CR write intercept
- * active.
+ * Checks if the nested-guest VMCB has the specified CR write intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -491,6 +502,5 @@
 
 /**
- * Checks if the guest VMCB has the specified DR read intercept
- * active.
+ * Checks if the nested-guest VMCB has the specified DR read intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -509,5 +519,5 @@
 
 /**
- * Checks if the guest VMCB has the specified DR write intercept active.
+ * Checks if the nested-guest VMCB has the specified DR write intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -526,5 +536,5 @@
 
 /**
- * Checks if the guest VMCB has the specified exception intercept active.
+ * Checks if the nested-guest VMCB has the specified exception intercept active.
  *
  * @returns true if in intercept is active, false otherwise.
@@ -543,5 +553,5 @@
 
 /**
- * Checks if the guest VMCB has virtual-interrupts masking enabled.
+ * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
  *
  * @returns true if virtual-interrupts are masked, @c false otherwise.
@@ -554,4 +564,19 @@
     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     return pVmcbNstGstCache->fVIntrMasking;
+}
+
+
+/**
+ * Checks if the nested-guest VMCB has nested-paging enabled.
+ *
+ * @returns true if nested-paging is enabled, @c false otherwise.
+ * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
+ * @param   pCtx    Pointer to the context.
+ */
+VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
+    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
+    return RT_BOOL(pVmcbNstGstCache->u1NestedPaging);
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 71755)
@@ -152,4 +152,6 @@
         pVmcbNstGstState->u8CPL         = pCtx->ss.Attr.n.u2Dpl;   /* See comment in CPUMGetGuestCPL(). */
         Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
+        if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))
+            pVmcbNstGstState->u64PAT = pCtx->msrPAT;
 
         PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
@@ -180,6 +182,4 @@
         else
             pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
-
-        /** @todo NRIP. */
 
         /* Save exit information. */
@@ -459,5 +459,16 @@
         }
 
-        /** @todo gPAT MSR validation? */
+        /*
+         * PAT (Page Attribute Table) MSR.
+         *
+         * The CPU only validates and loads it when nested-paging is enabled.
+         * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
+         */
+        if (   pVmcbCtrl->NestedPaging.n.u1NestedPaging
+            && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
+        {
+            Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
+            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
 
         /*
@@ -614,4 +625,6 @@
         pCtx->rip        = pVmcbNstGst->u64RIP;
         CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer);
+        if (pVmcbCtrl->NestedPaging.n.u1NestedPaging)
+            pCtx->msrPAT = pVmcbNstGst->u64PAT;
 
         /* Mask DR6, DR7 bits mandatory set/clear bits. */
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 71755)
@@ -1001,8 +1001,10 @@
         /*
          * Setup the PAT MSR (applicable for Nested Paging only).
-         * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
-         * so choose type 6 for all PAT slots.
+         *
+         * While guests can modify and see the modified values throug the shadow values,
+         * we shall not honor any guest modifications of this MSR to ensure caching is always
+         * enabled similar to how we always run with CR0.CD and NW bits cleared.
          */
-        pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
+        pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
 
         /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
@@ -1754,4 +1756,6 @@
     pVmcb->guest.u64SFMASK       = pCtx->msrSFMASK;
     pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
+
+    /* We don't honor guest modifications to its PAT MSR (similar to ignoring CR0.CD, NW bits). */
 }
 
@@ -2417,4 +2421,5 @@
         pVmcbNstGstCache->u64CR4            = pVmcbNstGstState->u64CR4;
         pVmcbNstGstCache->u64EFER           = pVmcbNstGstState->u64EFER;
+        pVmcbNstGstCache->u64PAT            = pVmcbNstGstState->u64PAT;
         pVmcbNstGstCache->u64DBGCTL         = pVmcbNstGstState->u64DBGCTL;
         pVmcbNstGstCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
@@ -2470,4 +2475,7 @@
         pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
         pVmcbNstGst->guest.u64DBGCTL = pVmcb->guest.u64DBGCTL;
+
+        /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */
+        pVmcbNstGst->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
     }
     else
@@ -3919,5 +3927,5 @@
         Log4(("guest.u64SFMASK                   %#RX64\n",   pVmcb->guest.u64SFMASK));
         Log4(("guest.u64KernelGSBase             %#RX64\n",   pVmcb->guest.u64KernelGSBase));
-        Log4(("guest.u64GPAT                     %#RX64\n",   pVmcb->guest.u64GPAT));
+        Log4(("guest.u64PAT                      %#RX64\n",   pVmcb->guest.u64PAT));
         Log4(("guest.u64DBGCTL                   %#RX64\n",   pVmcb->guest.u64DBGCTL));
         Log4(("guest.u64BR_FROM                  %#RX64\n",   pVmcb->guest.u64BR_FROM));
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 71755)
@@ -1231,5 +1231,5 @@
      */
     /* Init PAT MSR */
-    pCtx->msrPAT                    = UINT64_C(0x0007040600070406); /** @todo correct? */
+    pCtx->msrPAT                    = MSR_IA32_CR_PAT_INIT_VAL;
 
     /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
@@ -2359,5 +2359,5 @@
     pHlp->pfnPrintf(pHlp, "%su64SysEnterESP             = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterESP);
     pHlp->pfnPrintf(pHlp, "%su64CR2                     = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR2);
-    pHlp->pfnPrintf(pHlp, "%su64GPAT                    = %#RX64\n", pszPrefix, pVmcbStateSave->u64GPAT);
+    pHlp->pfnPrintf(pHlp, "%su64PAT                     = %#RX64\n", pszPrefix, pVmcbStateSave->u64PAT);
     pHlp->pfnPrintf(pHlp, "%su64DBGCTL                  = %#RX64\n", pszPrefix, pVmcbStateSave->u64DBGCTL);
     pHlp->pfnPrintf(pHlp, "%su64BR_FROM                 = %#RX64\n", pszPrefix, pVmcbStateSave->u64BR_FROM);
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 71754)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 71755)
@@ -3534,4 +3534,8 @@
         rc = SSMR3PutU32(pSSM, pPatch->cFaults);
         AssertRCReturn(rc, rc);
+        /** @todo We need to save SVMNESTEDVMCBCACHE (if pCtx fHMCached is true as we
+         *        are in nested-geust execution and the cache contains pristine
+         *        fields that we only restore on #VMEXIT and not on
+         *        every exit-to-ring 3. */
     }
 #endif
@@ -3553,5 +3557,5 @@
     int rc;
 
-    Log(("hmR3Load:\n"));
+    LogFlowFunc(("uVersion=%u\n", uVersion));
     Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
 
