Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45408)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45409)
@@ -169,5 +169,5 @@
     uint32_t        fVmcsFieldsRead;
     /** Whether TSC-offsetting should be setup before VM-entry. */
-    bool            fUpdateTscOffsetting;
+    bool            fUpdateTscOffsettingAndPreemptTimer;
     /** Whether the VM-exit was caused by a page-fault during delivery of a
      *  contributary exception or a page-fault. */
@@ -2353,6 +2353,8 @@
          *        VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
          *        VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
-         *        VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR,
-         *        VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER. */
+         *        VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
+
+        if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
+            val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
 
         if ((val & zap) != val)
@@ -4795,5 +4797,5 @@
 
 /**
- * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
+ * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
  * from the guest-state area in the VMCS.
  *
@@ -6348,8 +6350,8 @@
 
     /* Setup TSC-offsetting or intercept RDTSC(P)s and update the preemption timer. */
-    if (pVmxTransient->fUpdateTscOffsetting)
+    if (pVmxTransient->fUpdateTscOffsettingAndPreemptTimer)
     {
         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu, pMixedCtx);
-        pVmxTransient->fUpdateTscOffsetting = false;
+        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
     }
 
@@ -6483,5 +6485,5 @@
 
     VMXTRANSIENT VmxTransient;
-    VmxTransient.fUpdateTscOffsetting = true;
+    VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     int          rc     = VERR_INTERNAL_ERROR_5;
     unsigned     cLoops = 0;
@@ -6822,5 +6824,5 @@
         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
-            pVmxTransient->fUpdateTscOffsetting = true;
+            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     }
     else
@@ -6857,5 +6859,5 @@
         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
-            pVmxTransient->fUpdateTscOffsetting = true;
+            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     }
     else
@@ -7129,9 +7131,13 @@
 {
     VMX_VALIDATE_EXIT_HANDLER_PARAMS();
+
+    /* If we're saving the preemption-timer value on every VM-exit & we've reached zero, reset it up on next VM-entry. */
+    if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
+        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
+
     /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
     bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
-    int rc = fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
-    return rc;
+    return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
 }
 
