Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 49895)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 49896)
@@ -751,4 +751,7 @@
         pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
 
+        /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
+        pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
+
         /* Without Nested Paging, we need additionally intercepts. */
         if (!pVM->hm.s.fNestedPaging)
@@ -2865,14 +2868,4 @@
         hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
 
-    /*
-     * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
-     * We avoid changing the corresponding VMCB Clean bit as we're not changing it to a different value since the previous run.
-     */
-    /** @todo The above assumption could be wrong. It's not documented what
-     *        should be done wrt to the VMCB Clean bit, but we'll find out the
-     *        hard way. */
-    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
-    pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
-
 #ifdef HMSVM_SYNC_FULL_GUEST_STATE
     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
@@ -2893,5 +2886,8 @@
             pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
         else
+        {
+            PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
             pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
+        }
     }
 
