Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46529)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46530)
@@ -142,4 +142,6 @@
     /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
     uint64_t        u64ExitCode;
+    /** The guest's TPR value used for TPR shadowing. */
+    uint8_t         u8GuestTpr;
 } SVMTRANSIENT, *PSVMTRANSIENT;
 /** @}  */
@@ -449,7 +451,7 @@
  *
  * @param   pVCpu       Pointer to the VMCPU.
- * @param   uMsr       The MSR.
- * @param   fRead       Whether reading is allowed.
- * @param   fWrite      Whether writing is allowed.
+ * @param   uMsr        The MSR for which the access permissions are being set.
+ * @param   enmRead     MSR read permissions.
+ * @param   enmWrite    MSR write permissions.
  */
 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
@@ -626,12 +628,12 @@
          * Don't intercept guest read/write accesses to these MSRs.
          */
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
-        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
@@ -886,9 +888,10 @@
  * @returns VBox status code.
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVmcb       Pointer to the VMCB.
  * @param   pCtx        Pointer the guest-CPU context.
  *
  * @remarks No-long-jump zone!!!
  */
-static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
+DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
 {
     /*
@@ -1037,4 +1040,5 @@
 }
 
+
 /**
  * Loads the guest segment registers into the VMCB.
@@ -1042,9 +1046,10 @@
  * @returns VBox status code.
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVmcb       Pointer to the VMCB.
  * @param   pCtx        Pointer to the guest-CPU context.
  *
  * @remarks No-long-jump zone!!!
  */
-static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
+DECLINLINE(void) hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
 {
     /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
@@ -1100,9 +1105,10 @@
  *
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVmcb       Pointer to the VMCB.
  * @param   pCtx        Pointer to the guest-CPU context.
  *
  * @remarks No-long-jump zone!!!
  */
-static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pCtx)
+DECLINLINE(void) hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
 {
     /* Guest Sysenter MSRs. */
@@ -1147,6 +1153,7 @@
  *
  * @remarks No-long-jump zone!!!
- */
-static void hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
+ * @remarks Requires EFLAGS to be up-to-date in the VMCB!
+ */
+DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
@@ -1235,4 +1242,66 @@
     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
 }
+
+
+/**
+ * Loads the guest APIC state (currently just the TPR).
+ *
+ * @returns VBox status code.
+ * @param   pVCpu   Pointer to the VMCPU.
+ * @param   pVmcb   Pointer to the VMCB.
+ * @param   pCtx    Pointer to the guest-CPU context.
+ */
+DECLINLINE(int) hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
+{
+    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_APIC_STATE))
+        return VINF_SUCCESS;
+
+    bool    fPendingIntr;
+    uint8_t u8Tpr;
+    int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
+    AssertRCReturn(rc, rc);
+
+    /** Assume that we need to trap all TPR accesses and thus need not check on
+     *  every #VMEXIT if we should update the TPR. */
+    Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
+    pVCpu->hm.s.svm.fSyncVTpr = false;
+
+    /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
+    if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
+    {
+        pCtx->msrLSTAR = u8LastTPR;
+
+        /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
+        if (fPendingIntr)
+            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
+        else
+        {
+            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
+            pVCpu->hm.s.svm.fSyncVTpr = true;
+        }
+
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
+    }
+    else
+    {
+        /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
+        pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
+
+        /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
+        if (fPending)
+            pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
+        else
+        {
+            pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
+            pVCpu->hm.s.svm.fSyncVTpr = true;
+        }
+
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+    }
+
+    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE;
+    return rc;
+}
+
 
 /**
@@ -1353,9 +1422,9 @@
     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
 
-    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pCtx);
+    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
     AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
 
-    hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
-    hmR0SvmLoadGuestMsrs(pVCpu, pCtx);
+    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
+    hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
 
     pVmcb->guest.u64RIP    = pCtx->rip;
@@ -1366,5 +1435,8 @@
 
     /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
-    hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx);
+    hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx);
+
+    rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
+    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
 
     rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
@@ -2294,6 +2366,4 @@
 #endif
 
-    /* -XXX- todo TPR syncing. */
-
     /*
      * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
@@ -2310,4 +2380,16 @@
     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
     STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
+
+    /*
+     * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
+     * so we can update it on the way back if the guest changed the TPR.
+     */
+    if (pVCpu->hm.s.svm.fSyncVTpr)
+    {
+        if (pVM->hm.s.fTPRPatchingActive)
+            pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
+        else
+            pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
+    }
 
     /* Flush the appropriate tagged-TLB entries. */
@@ -2401,9 +2483,7 @@
 
     TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
+    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
+
     Assert(!(ASMGetFlags() & X86_EFL_IF));
-    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
-
-    /* -XXX- TPR patching? */
-
     ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
 
@@ -2414,7 +2494,22 @@
     hmR0SvmSaveGuestState(pVCpu, pMixedCtx);                    /* Save the guest state from the VMCB to the guest-CPU context. */
 
-    /* --XXX- TPR syncing todo */
+    if (pVCpu->hm.s.svm.fSyncVTpr)
+    {
+        /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
+        if (   pVM->hm.s.fTPRPatchingActive
+            && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
+        {
+            int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff);
+            AssertRC(rc);
+        }
+        else if ((uint8_t)(pSvmTransient->u8GuestTpr >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR)
+        {
+            int rc = PDMApicSetTPR(pVCpu, (pVmcb->ctrl.IntCtrl.n.u8VTPR << 4));
+            AssertRC(rc);
+        }
+    }
 
     /* -XXX- premature interruption during event injection */
+
 }
 
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 46529)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 46530)
@@ -2659,5 +2659,5 @@
                 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
                 {
-                    /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
+                    /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
                     if (fPendingIntr)
                         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
@@ -6695,7 +6695,5 @@
 
     /* Clear any unused and reserved bits. */
-    pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_CR2
-                                      | HM_CHANGED_VMX_RESERVED1
-                                      | HM_CHANGED_VMX_RESERVED2);
+    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
 
     AssertMsg(!pVCpu->hm.s.fContextUseFlags,
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 46529)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 46530)
@@ -128,14 +128,10 @@
 # define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(19)
 # define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(20)
-# define HM_CHANGED_VMX_RESERVED1                RT_BIT(21)
-# define HM_CHANGED_VMX_RESERVED2                RT_BIT(22)
 /* AMD-V specific state. */
-# define HM_CHANGED_SVM_INTERCEPT_VECTORS        RT_BIT(16)
-# define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS       RT_BIT(17)
-# define HM_CHANGED_SVM_GUEST_ASID               RT_BIT(18)
-# define HM_CHANGED_SVM_GUEST_TPR                RT_BIT(19)
-# define HM_CHANGED_SVM_GUEST_NP                 RT_BIT(20)
-# define HM_CHANGED_SVM_LBR                      RT_BIT(21)
-# define HM_CHANGED_SVM_AVIC                     RT_BIT(22)
+# define HM_CHANGED_SVM_GUEST_APIC_STATE         RT_BIT(16)
+# define HM_CHANGED_SVM_RESERVED1                RT_BIT(17)
+# define HM_CHANGED_SVM_RESERVED2                RT_BIT(18)
+# define HM_CHANGED_SVM_RESERVED3                RT_BIT(19)
+# define HM_CHANGED_SVM_RESERVED4                RT_BIT(20)
 
 # define HM_CHANGED_HOST_CONTEXT                 RT_BIT(23)
@@ -159,7 +155,5 @@
                                                  | HM_CHANGED_VMX_GUEST_APIC_STATE     \
                                                  | HM_CHANGED_VMX_ENTRY_CTLS           \
-                                                 | HM_CHANGED_VMX_EXIT_CTLS            \
-                                                 | HM_CHANGED_VMX_RESERVED1            \
-                                                 | HM_CHANGED_VMX_RESERVED2)
+                                                 | HM_CHANGED_VMX_EXIT_CTLS)
 #endif
 
@@ -725,4 +719,8 @@
         /** Virtual address of the MSR bitmap. */
         R0PTRTYPE(void *)           pvMsrBitmap;
+
+        /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
+         *  we should check if the VTPR changed on every VM-exit. */
+        bool                        fSyncVTpr;
     } svm;
 
