Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 74632)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 74633)
@@ -5203,7 +5203,8 @@
                 /*
                  * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
-                 * to bits 0:3 of the destination operand and bits 63:4 are cleared.
+                 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
+                 * are cleared.
                  *
-                 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
+                 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
                  */
                 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
@@ -5725,4 +5726,25 @@
             }
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+            if (   IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
+                && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
+            {
+                /*
+                 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
+                 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
+                 * cleared. Following this the processor performs TPR virtualization.
+                 *
+                 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
+                 */
+                uint32_t const uVTpr = (uNewCrX & 0xf) << 4;
+                iemVmxVirtApicWriteRaw32(pVCpu, uVTpr, XAPIC_OFF_TPR);
+                rcStrict = iemVmxVmexitTprVirtualization(pVCpu, cbInstr);
+                if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
+                    return rcStrict;
+                rcStrict = VINF_SUCCESS;
+                break;
+            }
+#endif
+
 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
             if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
@@ -5795,14 +5817,8 @@
         {
             case 0:
-            case 4:
-                rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr);
-                break;
-            case 3:
-                rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr);
-                break;
-            default:
-                break;
-        }
-
+            case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr);   break;
+            case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr);               break;
+            case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr);                        break;
+        }
         if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
                 return rcStrict;
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 74632)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 74633)
@@ -910,5 +910,4 @@
 {
     Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
-
     uint8_t  const  uWidth         = VMX_VMCS_ENC_WIDTH_NATURAL;
     uint8_t  const  uType          = VMX_VMCS_ENC_TYPE_CONTROL;
@@ -935,9 +934,24 @@
 {
     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
-
     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
-    uint32_t const uValue      = *(const uint32_t *)(pbVirtApic + offReg);
-    return uValue;
+    uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
+    return uReg;
+}
+
+
+/**
+ * Writes a 32-bit register to the virtual-APIC page at the given offset.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   uReg        The register value to write.
+ * @param   offReg      The offset of the register being written.
+ */
+DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint32_t uReg, uint8_t offReg)
+{
+    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
+    uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
+    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
+    *(uint32_t *)(pbVirtApic + offReg) = uReg;
 }
 
@@ -1386,4 +1400,7 @@
  * @param   pVCpu       The cross context virtual CPU structure.
  * @param   cbInstr     The VM-exit instruction length in bytes.
+ *
+ * @remarks Callers may clear this field to 0. Hence, this function does not check
+ *          the validity of the instruction length.
  */
 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
@@ -2083,5 +2100,20 @@
      * Save guest RIP, RSP and RFLAGS.
      * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
-     */
+     *
+     * For trap-like VM-exits we must advance the RIP by the length of the instruction.
+     * Callers must pass the instruction length in the VM-exit instruction length
+     * field though it is undefined for such VM-exits. After updating RIP here, we clear
+     * the VM-exit instruction length field.
+     *
+     * See Intel spec. 27.1 "Architectural State Before A VM Exit"
+     */
+    if (HMVmxIsTrapLikeVmexit(uExitReason))
+    {
+        uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
+        AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+        iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
+    }
+
     /* We don't support enclave mode yet. */
     pVmcs->u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
@@ -3159,4 +3191,6 @@
  *
  * @returns VBox strict status code.
+ * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the Mov instruction did not cause a
+ *         VM-exit.
  * @param   pVCpu       The cross context virtual CPU structure.
  * @param   iGReg       The general register to which the CR8 value is being stored.
@@ -3185,4 +3219,80 @@
                          | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG,   iGReg);
         return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
+    }
+
+    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
+}
+
+
+/**
+ * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
+ *
+ * @returns VBox strict status code.
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   iGReg       The general register from which the CR8 value is being
+ *                      loaded.
+ * @param   cbInstr     The instruction length in bytes.
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
+{
+    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
+    Assert(pVmcs);
+
+    /*
+     * If the CR8-load exiting control is set, we must cause a VM-exit.
+     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
+     */
+    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
+    {
+        Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
+
+        VMXVEXITINFO ExitInfo;
+        RT_ZERO(ExitInfo);
+        ExitInfo.uReason = VMX_EXIT_MOV_CRX;
+        ExitInfo.cbInstr = cbInstr;
+
+        ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
+                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,   VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
+                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG,   iGReg);
+        return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
+    }
+
+    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
+}
+
+
+/**
+ * VMX VM-exit handler for TPR virtualization.
+ *
+ * @returns VBox strict status code.
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   cbInstr     The instruction length in bytes.
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxVmexitTprVirtualization(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
+    Assert(pVmcs);
+
+    Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
+    Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY));    /* We don't support virtual-interrupt delivery yet. */
+
+    uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
+    uint32_t const uVTpr         = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
+
+    /*
+     * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
+     * See Intel spec. 29.1.2 "TPR Virtualization".
+     */
+    if (((uVTpr >> 4) & 0xf) < uTprThreshold)
+    {
+        Log2(("tpr_virt: uVTpr=%u uTprThreshold=%u -> VM-exit\n", uVTpr, uTprThreshold));
+
+        /*
+         * This is a trap-like VM-exit. We pass the instruction length along in the VM-exit
+         * instruction length field and let the VM-exit handler update the RIP when appropriate.
+         * It will then clear the VM-exit instruction length field before completing the VM-exit.
+         */
+        iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
+        return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
     }
 
