Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 71832)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 71833)
@@ -1521,4 +1521,22 @@
 
 /**
+ * Gets the nested-guest VMCB pause-filter count.
+ *
+ * @returns The pause-filter count.
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   pCtx        Pointer to the context.
+ *
+ * @remarks Should only be called when SVM feature is exposed to the guest.
+ */
+DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    Assert(pVmcb);
+    if (!pCtx->hwvirt.svm.fHMCachedVmcb)
+        return pVmcb->ctrl.u16PauseFilterCount;
+    return HMGetGuestSvmPauseFilterCount(pVCpu, pCtx);
+}
+
+/**
  * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
  *
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 71832)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 71833)
@@ -266,5 +266,5 @@
     alignb 8
     .hwvirt.svm.HostState              resb        184
-    .hwvirt.svm.u16Padding0            resw          1
+    .hwvirt.svm.uPrevPauseTick         resq          1
     .hwvirt.svm.cPauseFilter           resw          1
     .hwvirt.svm.cPauseFilterThreshold  resw          1
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 71832)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 71833)
@@ -500,35 +500,37 @@
                 /** 0x300 - Guest's host-state save area. */
                 SVMHOSTSTATE        HostState;
-                /** 0x3b8 - Padding. */
-                uint16_t            u16Padding0;
-                /** 0x3ba - Pause filter count. */
+                /** 0x3b8 - Guest TSC time-stamp of when the previous PAUSE instr. was executed. */
+                uint64_t            uPrevPauseTick;
+                /** 0x3c0 - Pause filter count. */
                 uint16_t            cPauseFilter;
-                /** 0x3bc - Pause filter threshold. */
+                /** 0x3c2 - Pause filter threshold. */
                 uint16_t            cPauseFilterThreshold;
-                /** 0x3be - Whether the injected event is subject to event intercepts. */
+                /** 0x3c4 - Whether the injected event is subject to event intercepts. */
                 bool                fInterceptEvents;
-                /** 0x3bf - Whether parts of the VMCB are cached (and potentially modified) by HM. */
+                /** 0x3c5 - Whether parts of the VMCB are cached (and potentially modified) by HM. */
                 bool                fHMCachedVmcb;
-                /** 0x3c0 - MSR permission bitmap - R0 ptr. */
+                /** 0x3c6 - Padding. */
+                bool                afPadding[2];
+                /** 0x3c8 - MSR permission bitmap - R0 ptr. */
                 R0PTRTYPE(void *)   pvMsrBitmapR0;
 #if HC_ARCH_BITS == 32
                 uint32_t            uvMsrBitmapR0Padding;
 #endif
-                /** 0x3c8 - MSR permission bitmap - R3 ptr. */
+                /** 0x3d0 - MSR permission bitmap - R3 ptr. */
                 R3PTRTYPE(void *)   pvMsrBitmapR3;
 #if HC_ARCH_BITS == 32
                 uint32_t            uvMsrBitmapR3Padding;
 #endif
-                /** 0x3d0 - IO permission bitmap - R0 ptr. */
+                /** 0x3d8 - IO permission bitmap - R0 ptr. */
                 R0PTRTYPE(void *)   pvIoBitmapR0;
 #if HC_ARCH_BITS == 32
                 uint32_t            uIoBitmapR0Padding;
 #endif
-                /** 0x3d8 - IO permission bitmap - R3 ptr. */
+                /** 0x3e0 - IO permission bitmap - R3 ptr. */
                 R3PTRTYPE(void *)   pvIoBitmapR3;
 #if HC_ARCH_BITS == 32
                 uint32_t            uIoBitmapR3Padding;
 #endif
-                /** 0x3e0 - Host physical address of the nested-guest VMCB.  */
+                /** 0x3e8 - Host physical address of the nested-guest VMCB.  */
                 RTHCPHYS            HCPhysVmcb;
             } svm;
@@ -540,10 +542,10 @@
         } CPUM_UNION_NM(s);
 
-        /** 0x3e8 - A subset of force flags that are preserved while running the nested-guest. */
+        /** 0x3f0 - A subset of force flags that are preserved while running the nested-guest. */
         uint32_t                fLocalForcedActions;
-        /** 0x3f0 - Global interrupt flag (always true on nested VMX). */
+        /** 0x3f4 - Global interrupt flag (always true on nested VMX). */
         bool                    fGif;
-        /** 0x3f1 - Padding. */
-        uint8_t                 abPadding1[19];
+        /** 0x3f5 - Padding. */
+        uint8_t                 abPadding1[11];
     } hwvirt;
     /** @} */
@@ -605,9 +607,9 @@
 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR3,                0x2f8);
 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,              0x300);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter,           0x3ba);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,          0x3c0);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,           0x3d8);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HCPhysVmcb,             0x3e0);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.fLocalForcedActions,        0x3e8);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter,           0x3c0);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,          0x3c8);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,           0x3e0);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HCPhysVmcb,             0x3e8);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.fLocalForcedActions, 0x3f0);
 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR0,       8);
 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 8);
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 71832)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 71833)
@@ -1135,14 +1135,15 @@
  * Don't add any more functions here unless there is no other option.
  */
-VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept);
-VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
-VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
-VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
-VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
-VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector);
-VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx);
-VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
-VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
-VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)     HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept);
+VMM_INT_DECL(bool)     HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
+VMM_INT_DECL(bool)     HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
+VMM_INT_DECL(bool)     HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
+VMM_INT_DECL(bool)     HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
+VMM_INT_DECL(bool)     HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector);
+VMM_INT_DECL(bool)     HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)     HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)     HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)     HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
 
 /** @} */
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 71832)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 71833)
@@ -156,17 +156,19 @@
          * fields that are potentially modified by hardware-assisted SVM.
          */
-        pVmcbNstGstCtrl->u16InterceptRdCRx             = pNstGstVmcbCache->u16InterceptRdCRx;
-        pVmcbNstGstCtrl->u16InterceptWrCRx             = pNstGstVmcbCache->u16InterceptWrCRx;
-        pVmcbNstGstCtrl->u16InterceptRdDRx             = pNstGstVmcbCache->u16InterceptRdDRx;
-        pVmcbNstGstCtrl->u16InterceptWrDRx             = pNstGstVmcbCache->u16InterceptWrDRx;
-        pVmcbNstGstCtrl->u32InterceptXcpt              = pNstGstVmcbCache->u32InterceptXcpt;
-        pVmcbNstGstCtrl->u64InterceptCtrl              = pNstGstVmcbCache->u64InterceptCtrl;
-        pVmcbNstGstState->u64DBGCTL                    = pNstGstVmcbCache->u64DBGCTL;
-        pVmcbNstGstCtrl->u32VmcbCleanBits              = pNstGstVmcbCache->u32VmcbCleanBits;
-        pVmcbNstGstCtrl->u64IOPMPhysAddr               = pNstGstVmcbCache->u64IOPMPhysAddr;
-        pVmcbNstGstCtrl->u64MSRPMPhysAddr              = pNstGstVmcbCache->u64MSRPMPhysAddr;
-        pVmcbNstGstCtrl->u64TSCOffset                  = pNstGstVmcbCache->u64TSCOffset;
-        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking      = pNstGstVmcbCache->fVIntrMasking;
-        pVmcbNstGstCtrl->TLBCtrl                       = pNstGstVmcbCache->TLBCtrl;
+        pVmcbNstGstCtrl->u16InterceptRdCRx        = pNstGstVmcbCache->u16InterceptRdCRx;
+        pVmcbNstGstCtrl->u16InterceptWrCRx        = pNstGstVmcbCache->u16InterceptWrCRx;
+        pVmcbNstGstCtrl->u16InterceptRdDRx        = pNstGstVmcbCache->u16InterceptRdDRx;
+        pVmcbNstGstCtrl->u16InterceptWrDRx        = pNstGstVmcbCache->u16InterceptWrDRx;
+        pVmcbNstGstCtrl->u16PauseFilterCount      = pNstGstVmcbCache->u16PauseFilterCount;
+        pVmcbNstGstCtrl->u16PauseFilterThreshold  = pNstGstVmcbCache->u16PauseFilterThreshold;
+        pVmcbNstGstCtrl->u32InterceptXcpt         = pNstGstVmcbCache->u32InterceptXcpt;
+        pVmcbNstGstCtrl->u64InterceptCtrl         = pNstGstVmcbCache->u64InterceptCtrl;
+        pVmcbNstGstState->u64DBGCTL               = pNstGstVmcbCache->u64DBGCTL;
+        pVmcbNstGstCtrl->u32VmcbCleanBits         = pNstGstVmcbCache->u32VmcbCleanBits;
+        pVmcbNstGstCtrl->u64IOPMPhysAddr          = pNstGstVmcbCache->u64IOPMPhysAddr;
+        pVmcbNstGstCtrl->u64MSRPMPhysAddr         = pNstGstVmcbCache->u64MSRPMPhysAddr;
+        pVmcbNstGstCtrl->u64TSCOffset             = pNstGstVmcbCache->u64TSCOffset;
+        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking;
+        pVmcbNstGstCtrl->TLBCtrl                  = pNstGstVmcbCache->TLBCtrl;
 
         /*
@@ -583,4 +585,19 @@
 
 /**
+ * Returns the nested-guest VMCB pause-filter count.
+ *
+ * @returns The pause-filter count.
+ * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
+ * @param   pCtx    Pointer to the context.
+ */
+VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
+    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
+    return pVmcbNstGstCache->u16PauseFilterCount;
+}
+
+
+/**
  * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
  * interrupts.
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 71832)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 71833)
@@ -1041,5 +1041,5 @@
             if (fPauseFilterThreshold)
                 pVmcb->ctrl.u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
-            pVmcb->ctrl.u32InterceptXcpt |= SVM_CTRL_INTERCEPT_PAUSE;
+            pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
         }
 
@@ -2048,29 +2048,30 @@
  * @param   pCtx            Pointer to the guest-CPU context.
  */
-static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
-{
-    RT_NOREF(pCtx);
+static void hmR0SvmLoadGuestInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
+{
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     {
-        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
+        PVM          pVM             = pVCpu->CTX_SUFF(pVM);
+        PCSVMVMCB    pVmcb           = pVCpu->hm.s.svm.pVmcb;
+        PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
 
         /* Merge the guest's CR intercepts into the nested-guest VMCB. */
-        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
-        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
+        pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
+        pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
 
         /* Always intercept CR4 writes for tracking PGM mode changes. */
-        pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4);
+        pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
 
         /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
-        if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
-        {
-            pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(3);
-            pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(3);
+        if (!pVM->hm.s.fNestedPaging)
+        {
+            pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
+            pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
         }
 
         /** @todo Figure out debugging with nested-guests, till then just intercept
          *        all DR[0-15] accesses. */
-        pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
-        pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
+        pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff;
+        pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff;
 
         /*
@@ -2087,8 +2088,8 @@
          */
 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
-        pVmcbNstGst->ctrl.u32InterceptXcpt  |= (pVmcb->ctrl.u32InterceptXcpt & ~(  RT_BIT(X86_XCPT_UD)
+        pVmcbNstGstCtrl->u32InterceptXcpt  |= (pVmcb->ctrl.u32InterceptXcpt & ~(  RT_BIT(X86_XCPT_UD)
                                                                                  | RT_BIT(X86_XCPT_BP)));
 #else
-        pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
+        pVmcbNstGstCtrl->u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
 #endif
 
@@ -2103,10 +2104,29 @@
          *   the nested-guest, the physical CPU raises a \#UD exception as expected.
          */
-        pVmcbNstGst->ctrl.u64InterceptCtrl  |= (pVmcb->ctrl.u64InterceptCtrl & ~(  SVM_CTRL_INTERCEPT_VINTR
-                                                                                 | SVM_CTRL_INTERCEPT_VMMCALL))
-                                            |  HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
-
-        Assert(   (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
+        pVmcbNstGstCtrl->u64InterceptCtrl  |= (pVmcb->ctrl.u64InterceptCtrl & ~(  SVM_CTRL_INTERCEPT_VINTR
+                                                                                | SVM_CTRL_INTERCEPT_VMMCALL))
+                                           |  HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
+
+        Assert(   (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
                == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
+
+        /*
+         * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
+         * since SVM doesn't have a preemption timer.
+         *
+         * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the
+         * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters.
+         */
+        if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
+        {
+            pVmcbNstGstCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, pVmcb->ctrl.u16PauseFilterCount);
+            pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold,
+                                                              pVmcb->ctrl.u16PauseFilterThreshold);
+        }
+        else
+        {
+            pVmcbNstGstCtrl->u16PauseFilterCount     = pVmcb->ctrl.u16PauseFilterCount;
+            pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcb->ctrl.u16PauseFilterThreshold;
+        }
 
         /** @todo This doesn't make sense. Re-think and remove. */
@@ -2118,6 +2138,6 @@
         if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
         {
-            pVmcbNstGst->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
-                                               |  SVM_CTRL_INTERCEPT_VMLOAD;
+            pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
+                                              |  SVM_CTRL_INTERCEPT_VMLOAD;
         }
 
@@ -2128,11 +2148,11 @@
         if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
         {
-            pVmcbNstGst->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
-                                               |  SVM_CTRL_INTERCEPT_STGI;
+            pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
+                                              |  SVM_CTRL_INTERCEPT_STGI;
         }
 #endif
 
         /* Finally, update the VMCB clean bits. */
-        pVmcbNstGst->ctrl.u32VmcbCleanBits  &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+        pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     }
@@ -2412,25 +2432,27 @@
     if (!fWasCached)
     {
-        pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
-        pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
-        pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
-        pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
-        pVmcbNstGstCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
-        pVmcbNstGstCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
-        pVmcbNstGstCache->u64CR0            = pVmcbNstGstState->u64CR0;
-        pVmcbNstGstCache->u64CR3            = pVmcbNstGstState->u64CR3;
-        pVmcbNstGstCache->u64CR4            = pVmcbNstGstState->u64CR4;
-        pVmcbNstGstCache->u64EFER           = pVmcbNstGstState->u64EFER;
-        pVmcbNstGstCache->u64PAT            = pVmcbNstGstState->u64PAT;
-        pVmcbNstGstCache->u64DBGCTL         = pVmcbNstGstState->u64DBGCTL;
-        pVmcbNstGstCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
-        pVmcbNstGstCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
-        pVmcbNstGstCache->u64TSCOffset      = pVmcbNstGstCtrl->u64TSCOffset;
-        pVmcbNstGstCache->u32VmcbCleanBits  = pVmcbNstGstCtrl->u32VmcbCleanBits;
-        pVmcbNstGstCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
-        pVmcbNstGstCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
-        pVmcbNstGstCache->u1NestedPaging    = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;
-        pVmcbNstGstCache->u1LbrVirt         = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
-        pCtx->hwvirt.svm.fHMCachedVmcb      = true;
+        pVmcbNstGstCache->u16InterceptRdCRx       = pVmcbNstGstCtrl->u16InterceptRdCRx;
+        pVmcbNstGstCache->u16InterceptWrCRx       = pVmcbNstGstCtrl->u16InterceptWrCRx;
+        pVmcbNstGstCache->u16InterceptRdDRx       = pVmcbNstGstCtrl->u16InterceptRdDRx;
+        pVmcbNstGstCache->u16InterceptWrDRx       = pVmcbNstGstCtrl->u16InterceptWrDRx;
+        pVmcbNstGstCache->u16PauseFilterCount     = pVmcbNstGstCtrl->u16PauseFilterCount;
+        pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
+        pVmcbNstGstCache->u32InterceptXcpt        = pVmcbNstGstCtrl->u32InterceptXcpt;
+        pVmcbNstGstCache->u64InterceptCtrl        = pVmcbNstGstCtrl->u64InterceptCtrl;
+        pVmcbNstGstCache->u64CR0                  = pVmcbNstGstState->u64CR0;
+        pVmcbNstGstCache->u64CR3                  = pVmcbNstGstState->u64CR3;
+        pVmcbNstGstCache->u64CR4                  = pVmcbNstGstState->u64CR4;
+        pVmcbNstGstCache->u64EFER                 = pVmcbNstGstState->u64EFER;
+        pVmcbNstGstCache->u64PAT                  = pVmcbNstGstState->u64PAT;
+        pVmcbNstGstCache->u64DBGCTL               = pVmcbNstGstState->u64DBGCTL;
+        pVmcbNstGstCache->u64IOPMPhysAddr         = pVmcbNstGstCtrl->u64IOPMPhysAddr;
+        pVmcbNstGstCache->u64MSRPMPhysAddr        = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
+        pVmcbNstGstCache->u64TSCOffset            = pVmcbNstGstCtrl->u64TSCOffset;
+        pVmcbNstGstCache->u32VmcbCleanBits        = pVmcbNstGstCtrl->u32VmcbCleanBits;
+        pVmcbNstGstCache->fVIntrMasking           = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
+        pVmcbNstGstCache->TLBCtrl                 = pVmcbNstGstCtrl->TLBCtrl;
+        pVmcbNstGstCache->u1NestedPaging          = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;
+        pVmcbNstGstCache->u1LbrVirt               = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
+        pCtx->hwvirt.svm.fHMCachedVmcb            = true;
         Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
     }
@@ -2465,18 +2487,19 @@
 
         /*
-         * Use the same nested-paging as the "outer" guest. We can't dynamically
-         * switch off nested-paging suddenly while executing a VM (see assertion at the
-         * end of Trap0eHandler() in PGMAllBth.h).
+         * Use the same nested-paging as the outer guest. We can't dynamically switch off
+         * nested-paging suddenly while executing a VM (see assertion at the end of
+         * Trap0eHandler() in PGMAllBth.h).
          */
         pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
 
-        /* For now copy the LBR info. from outer guest VMCB. */
-        /** @todo fix this later. */
-        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
+        /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */
+        PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
+        pVmcbNstGstState->u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
+
+#ifdef DEBUG_ramshankar
+        /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
         pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
-        pVmcbNstGst->guest.u64DBGCTL = pVmcb->guest.u64DBGCTL;
-
-        /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */
-        pVmcbNstGst->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
+        pVmcbNstGstState->u64DBGCTL = pVmcb->guest.u64DBGCTL;
+#endif
     }
     else
@@ -2528,5 +2551,5 @@
 #endif
 
-    hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcbNstGst, pCtx);
+    hmR0SvmLoadGuestInterceptsNested(pVCpu, pVmcbNstGst, pCtx);
 
     rc = hmR0SvmSetupVMRunHandler(pVCpu);
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 71832)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 71833)
@@ -1282,5 +1282,6 @@
     {
         memset(pCtx->hwvirt.svm.CTX_SUFF(pVmcb), 0, SVM_VMCB_PAGES << PAGE_SHIFT);
-        pCtx->hwvirt.svm.uMsrHSavePa = 0;
+        pCtx->hwvirt.svm.uMsrHSavePa    = 0;
+        pCtx->hwvirt.svm.uPrevPauseTick = 0;
     }
 }
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 71832)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 71833)
@@ -239,9 +239,10 @@
     alignb 8
     .Guest.hwvirt.svm.HostState              resb         184
-    .Guest.hwvirt.svm.u16Padding0            resw         1
+    .Guest.hwvirt.svm.uPrevPauseTick         resq         1
     .Guest.hwvirt.svm.cPauseFilter           resw         1
     .Guest.hwvirt.svm.cPauseFilterThreshold  resw         1
     .Guest.hwvirt.svm.fInterceptEvents       resb         1
     .Guest.hwvirt.svm.fHMCachedVmcb          resb         1
+    alignb 8
     .Guest.hwvirt.svm.pvMsrBitmapR0          RTR0PTR_RES  1
     alignb 8
@@ -526,9 +527,10 @@
     alignb 8
     .Hyper.hwvirt.svm.HostState              resb         184
-    .Hyper.hwvirt.svm.u16Padding0            resw         1
+    .Hyper.hwvirt.svm.uPrevPauseTick         resq         1
     .Hyper.hwvirt.svm.cPauseFilter           resw         1
     .Hyper.hwvirt.svm.cPauseFilterThreshold  resw         1
     .Hyper.hwvirt.svm.fInterceptEvents       resb         1
     .Hyper.hwvirt.svm.fHMCachedVmcb          resb         1
+    alignb 8
     .Hyper.hwvirt.svm.pvMsrBitmapR0          RTR0PTR_RES  1
     alignb 8
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 71832)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 71833)
@@ -137,4 +137,5 @@
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pVmcbR3);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uPrevPauseTick);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilter);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold);
