Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 65988)
+++ /trunk/include/VBox/vmm/hm.h	(revision 65989)
@@ -147,4 +147,6 @@
 VMM_INT_DECL(void)              HMHypercallsEnable(PVMCPU pVCpu);
 VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
+
+VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
 
 VMM_INT_DECL(void)              HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode);
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 65988)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 65989)
@@ -229,4 +229,5 @@
 	VMMAll/DBGFAll.cpp \
 	VMMAll/HMAll.cpp \
+	VMMAll/HMSVMAll.cpp \
 	VMMAll/IEMAll.cpp \
 	VMMAll/IEMAllAImpl.asm \
@@ -579,4 +580,6 @@
  	VMMAll/GIMAllHv.cpp \
  	VMMAll/GIMAllKvm.cpp \
+	VMMAll/HMAll.cpp \
+	VMMAll/HMSVMAll.cpp \
  	VMMAll/MMAll.cpp \
  	VMMAll/MMAllHyper.cpp \
@@ -724,4 +727,5 @@
  	VMMAll/GIMAllKvm.cpp \
  	VMMAll/HMAll.cpp \
+ 	VMMAll/HMSVMAll.cpp \
  	VMMAll/IEMAll.cpp \
  	VMMAll/IEMAllAImpl.asm \
Index: /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 65988)
+++ /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 65989)
@@ -92,4 +92,6 @@
  * @retval  VINF_SUCCESS if the hypercall succeeded (even if its operation
  *          failed).
+ * @retval  VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
+ *          RIP.
  * @retval  VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
  * @retval  VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
@@ -104,4 +106,5 @@
  * @param   pCtx        Pointer to the guest-CPU context.
  *
+ * @remarks The caller of this function needs to advance RIP as required.
  * @thread  EMT.
  */
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 65988)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 65989)
@@ -96,19 +96,4 @@
 
 
-/**
- * Flushes the guest TLB.
- *
- * @returns VBox status code.
- * @param   pVCpu       The cross context virtual CPU structure.
- */
-VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
-{
-    LogFlow(("HMFlushTLB\n"));
-
-    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
-    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
-    return VINF_SUCCESS;
-}
-
 #ifdef IN_RING0
 
@@ -170,4 +155,18 @@
 #endif /* IN_RING0 */
 #ifndef IN_RC
+/**
+ * Flushes the guest TLB.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       The cross context virtual CPU structure.
+ */
+VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
+{
+    LogFlow(("HMFlushTLB\n"));
+
+    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
+    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
+    return VINF_SUCCESS;
+}
 
 /**
@@ -271,91 +270,4 @@
 
     return VINF_SUCCESS;
-}
-
-#endif /* !IN_RC */
-
-/**
- * Checks if nested paging is enabled.
- *
- * @returns true if nested paging is active, false otherwise.
- * @param   pVM         The cross context VM structure.
- *
- * @remarks Works before hmR3InitFinalizeR0.
- */
-VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
-{
-    return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
-}
-
-
-/**
- * Checks if both nested paging and unhampered guest execution are enabled.
- *
- * The almost complete guest execution in hardware is only applicable to VT-x.
- *
- * @returns true if we have both enabled, otherwise false.
- * @param   pVM         The cross context VM structure.
- *
- * @remarks Works before hmR3InitFinalizeR0.
- */
-VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
-{
-    return HMIsEnabled(pVM)
-        && pVM->hm.s.fNestedPaging
-        && (   pVM->hm.s.vmx.fUnrestrictedGuest
-            || pVM->hm.s.svm.fSupported);
-}
-
-
-/**
- * Checks if this VM is long-mode capable.
- *
- * @returns true if long mode is allowed, false otherwise.
- * @param   pVM         The cross context VM structure.
- */
-VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
-{
-    return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
-}
-
-
-/**
- * Checks if MSR bitmaps are available. It is assumed that when it's available
- * it will be used as well.
- *
- * @returns true if MSR bitmaps are available, false otherwise.
- * @param   pVM         The cross context VM structure.
- */
-VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
-{
-    if (HMIsEnabled(pVM))
-    {
-        if (pVM->hm.s.svm.fSupported)
-            return true;
-
-        if (   pVM->hm.s.vmx.fSupported
-            && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
-        {
-            return true;
-        }
-    }
-    return false;
-}
-
-
-/**
- * Return the shadow paging mode for nested paging/ept
- *
- * @returns shadow paging mode
- * @param   pVM         The cross context VM structure.
- */
-VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
-{
-    Assert(HMIsNestedPagingActive(pVM));
-    if (pVM->hm.s.svm.fSupported)
-        return PGMMODE_NESTED;
-
-    Assert(pVM->hm.s.vmx.fSupported);
-    return PGMMODE_EPT;
 }
 
@@ -410,4 +322,92 @@
     return VINF_SUCCESS;
 }
+
+
+/**
+ * Checks if nested paging is enabled.
+ *
+ * @returns true if nested paging is active, false otherwise.
+ * @param   pVM         The cross context VM structure.
+ *
+ * @remarks Works before hmR3InitFinalizeR0.
+ */
+VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
+{
+    return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
+}
+
+
+/**
+ * Checks if both nested paging and unhampered guest execution are enabled.
+ *
+ * The almost complete guest execution in hardware is only applicable to VT-x.
+ *
+ * @returns true if we have both enabled, otherwise false.
+ * @param   pVM         The cross context VM structure.
+ *
+ * @remarks Works before hmR3InitFinalizeR0.
+ */
+VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
+{
+    return HMIsEnabled(pVM)
+        && pVM->hm.s.fNestedPaging
+        && (   pVM->hm.s.vmx.fUnrestrictedGuest
+            || pVM->hm.s.svm.fSupported);
+}
+
+
+/**
+ * Checks if this VM is long-mode capable.
+ *
+ * @returns true if long mode is allowed, false otherwise.
+ * @param   pVM         The cross context VM structure.
+ */
+VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
+{
+    return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
+}
+
+
+/**
+ * Checks if MSR bitmaps are available. It is assumed that when it's available
+ * it will be used as well.
+ *
+ * @returns true if MSR bitmaps are available, false otherwise.
+ * @param   pVM         The cross context VM structure.
+ */
+VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
+{
+    if (HMIsEnabled(pVM))
+    {
+        if (pVM->hm.s.svm.fSupported)
+            return true;
+
+        if (   pVM->hm.s.vmx.fSupported
+            && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
+        {
+            return true;
+        }
+    }
+    return false;
+}
+
+
+/**
+ * Return the shadow paging mode for nested paging/ept
+ *
+ * @returns shadow paging mode
+ * @param   pVM         The cross context VM structure.
+ */
+VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
+{
+    Assert(HMIsNestedPagingActive(pVM));
+    if (pVM->hm.s.svm.fSupported)
+        return PGMMODE_NESTED;
+
+    Assert(pVM->hm.s.vmx.fSupported);
+    return PGMMODE_EPT;
+}
+#endif /* !IN_RC */
+
 
 /**
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 65989)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 65989)
@@ -0,0 +1,174 @@
+/* $Id$ */
+/** @file
+ * HM SVM (AMD-V) - All contexts.
+ */
+
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+*   Header Files                                                                                                                 *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_HM
+#include "HMInternal.h"
+#include <VBox/vmm/apic.h>
+#include <VBox/vmm/gim.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/vm.h>
+
+
+#ifndef IN_RC
+/**
+ * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
+ * guests. This simply looks up the patch record at EIP and does the required.
+ *
+ * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
+ * like how we want it to be (e.g. not followed by shr 4 as is usually done for
+ * TPR). See hmR3ReplaceTprInstr() for the details.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if the access was handled successfully.
+ * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
+ * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
+ *
+ * @param   pVCpu               The cross context virtual CPU structure.
+ * @param   pCtx                Pointer to the guest-CPU context.
+ * @param   pfUpdateRipAndRF    Whether the guest RIP/EIP has been updated as
+ *                              part of the TPR patch operation.
+ */
+static int hmSvmEmulateMovTpr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdateRipAndRF)
+{
+    Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
+
+    /*
+     * We do this in a loop as we increment the RIP after a successful emulation
+     * and the new RIP may be a patched instruction which needs emulation as well.
+     */
+    bool fUpdateRipAndRF = false;
+    bool fPatchFound     = false;
+    PVM  pVM = pVCpu->CTX_SUFF(pVM);
+    for (;;)
+    {
+        bool    fPending;
+        uint8_t u8Tpr;
+
+        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+        if (!pPatch)
+            break;
+
+        fPatchFound = true;
+        switch (pPatch->enmType)
+        {
+            case HMTPRINSTR_READ:
+            {
+                int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
+                AssertRC(rc);
+
+                rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
+                AssertRC(rc);
+                pCtx->rip += pPatch->cbOp;
+                pCtx->eflags.Bits.u1RF = 0;
+                fUpdateRipAndRF = true;
+                break;
+            }
+
+            case HMTPRINSTR_WRITE_REG:
+            case HMTPRINSTR_WRITE_IMM:
+            {
+                if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
+                {
+                    uint32_t u32Val;
+                    int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
+                    AssertRC(rc);
+                    u8Tpr = u32Val;
+                }
+                else
+                    u8Tpr = (uint8_t)pPatch->uSrcOperand;
+
+                int rc2 = APICSetTpr(pVCpu, u8Tpr);
+                AssertRC(rc2);
+                HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
+
+                pCtx->rip += pPatch->cbOp;
+                pCtx->eflags.Bits.u1RF = 0;
+                fUpdateRipAndRF = true;
+                break;
+            }
+
+            default:
+            {
+                AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
+                pVCpu->hm.s.u32HMError = pPatch->enmType;
+                *pfUpdateRipAndRF = fUpdateRipAndRF;
+                return VERR_SVM_UNEXPECTED_PATCH_TYPE;
+            }
+        }
+    }
+
+    *pfUpdateRipAndRF = fUpdateRipAndRF;
+    if (fPatchFound)
+        return VINF_SUCCESS;
+    return VERR_NOT_FOUND;
+}
+#endif /* !IN_RC */
+
+
+/**
+ * Performs the operations necessary that are part of the vmmcall instruction
+ * execution for AMD-V.
+ *
+ * @returns Strict VBox status code (i.e. informational status codes too).
+ *
+ * @retval  VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
+ *          update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
+ *          continue guest execution.
+ * @retval  VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
+ *          RIP.
+ * @retval  VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
+ *
+ * @param   pVCpu               The cross context virtual CPU structure.
+ * @param   pCtx                Pointer to the guest-CPU context.
+ * @param   pfUpdatedRipAndRF   Whether the guest RIP/EIP has been updated as
+ *                              part of handling the VMMCALL operation.
+ */
+VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF)
+{
+#ifndef IN_RC
+    /*
+     * TPR patched instruction emulation for 32-bit guests.
+     */
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (pVM->hm.s.fTprPatchingAllowed)
+    {
+        int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF);
+        if (RT_SUCCESS(rc))
+            return VINF_SUCCESS;
+
+        if (rc != VERR_NOT_FOUND)
+        {
+            Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc));
+            return rc;
+        }
+    }
+#endif
+
+    /*
+     * Paravirtualized hypercalls.
+     */
+    *pfUpdatedRipAndRF = false;
+    if (pVCpu->hm.s.fHypercallsEnabled)
+        return GIMHypercall(pVCpu, pCtx);
+
+    return VERR_NOT_AVAILABLE;
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65988)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65989)
@@ -5877,4 +5877,28 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT
 /**
+ * Implements 'VMMCALL'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
+{
+    /*
+     * We do not check for presence of SVM/AMD-V here as the KVM GIM provider
+     * might patch in an invalid vmmcall instruction with an Intel vmcall
+     * instruction.
+     */
+    bool fUpdatedRipAndRF;
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
+    if (RT_SUCCESS(rcStrict))
+    {
+        if (!fUpdatedRipAndRF)
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+        return rcStrict;
+    }
+
+    return iemRaiseUndefinedOpcode(pVCpu);
+}
+
+
+/**
  * Implements 'VMLOAD'.
  */
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65988)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65989)
@@ -442,47 +442,55 @@
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
 
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/** Opcode 0x0f 0x01 0xd9. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
+{
+    IEMOP_MNEMONIC(vmmcall, "vmmcall");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
+}
+
+
+/** Opcode 0x0f 0x01 0xda. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
+{
+    IEMOP_MNEMONIC(vmload, "vmload");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
+}
+
+
+/** Opcode 0x0f 0x01 0xdb. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
+{
+    IEMOP_MNEMONIC(vmsave, "vmsave");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
+}
+
+
+/** Opcode 0x0f 0x01 0xdc. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
+{
+    IEMOP_MNEMONIC(stgi, "stgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
+}
+
+
+/** Opcode 0x0f 0x01 0xdd. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
+{
+    IEMOP_MNEMONIC(clgi, "clgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
+}
+
+
+/** Opcode 0x0f 0x01 0xdf. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
+{
+    IEMOP_MNEMONIC(invlpga, "invlpga");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
+}
+#else
 /** Opcode 0x0f 0x01 0xd9. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
 
-#ifdef VBOX_WITH_NESTED_HWVIRT
-/** Opcode 0x0f 0x01 0xda. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
-{
-    IEMOP_MNEMONIC(vmload, "vmload");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
-}
-
-
-/** Opcode 0x0f 0x01 0xdb. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
-{
-    IEMOP_MNEMONIC(vmsave, "vmsave");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
-}
-
-
-/** Opcode 0x0f 0x01 0xdc. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
-{
-    IEMOP_MNEMONIC(stgi, "stgi");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
-}
-
-
-/** Opcode 0x0f 0x01 0xdd. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
-{
-    IEMOP_MNEMONIC(clgi, "clgi");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
-}
-
-
-/** Opcode 0x0f 0x01 0xdf. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
-{
-    IEMOP_MNEMONIC(invlpga, "invlpga");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
-}
-#else
 /** Opcode 0x0f 0x01 0xda. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65988)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65989)
@@ -3966,87 +3966,4 @@
     Event.n.u32ErrorCode     = 0;
     hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-}
-
-
-/**
- * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
- * guests. This simply looks up the patch record at EIP and does the required.
- *
- * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
- * like how we want it to be (e.g. not followed by shr 4 as is usually done for
- * TPR). See hmR3ReplaceTprInstr() for the details.
- *
- * @returns VBox status code.
- * @retval VINF_SUCCESS if the access was handled successfully.
- * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
- * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
- *
- * @param   pVM         The cross context VM structure.
- * @param   pVCpu       The cross context virtual CPU structure.
- * @param   pCtx        Pointer to the guest-CPU context.
- */
-static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
-{
-    Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
-
-    /*
-     * We do this in a loop as we increment the RIP after a successful emulation
-     * and the new RIP may be a patched instruction which needs emulation as well.
-     */
-    bool fPatchFound = false;
-    for (;;)
-    {
-        bool    fPending;
-        uint8_t u8Tpr;
-
-        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
-        if (!pPatch)
-            break;
-
-        fPatchFound = true;
-        switch (pPatch->enmType)
-        {
-            case HMTPRINSTR_READ:
-            {
-                int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
-                AssertRC(rc);
-
-                rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
-                AssertRC(rc);
-                pCtx->rip += pPatch->cbOp;
-                break;
-            }
-
-            case HMTPRINSTR_WRITE_REG:
-            case HMTPRINSTR_WRITE_IMM:
-            {
-                if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
-                {
-                    uint32_t u32Val;
-                    int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
-                    AssertRC(rc);
-                    u8Tpr = u32Val;
-                }
-                else
-                    u8Tpr = (uint8_t)pPatch->uSrcOperand;
-
-                int rc2 = APICSetTpr(pVCpu, u8Tpr);
-                AssertRC(rc2);
-                HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
-
-                pCtx->rip += pPatch->cbOp;
-                break;
-            }
-
-            default:
-                AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
-                pVCpu->hm.s.u32HMError = pPatch->enmType;
-                return VERR_SVM_UNEXPECTED_PATCH_TYPE;
-        }
-    }
-
-    if (fPatchFound)
-        return VINF_SUCCESS;
-    return VERR_NOT_FOUND;
 }
 
@@ -5260,43 +5177,18 @@
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
 
-    /* First check if this is a patched VMMCALL for mov TPR */
-    int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
-    if (rc == VINF_SUCCESS)
-    {
-        HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
-        return VINF_SUCCESS;
-    }
-
-    if (rc == VERR_NOT_FOUND)
-    {
-        if (pVCpu->hm.s.fHypercallsEnabled)
-        {
-            VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, pCtx);
-            if (RT_SUCCESS(VBOXSTRICTRC_VAL(rcStrict)))
-            {
-                if (rcStrict == VINF_SUCCESS)
-                    hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3 /* cbInstr */);
-                else
-                    Assert(   rcStrict == VINF_GIM_HYPERCALL_CONTINUING
-                           || rcStrict == VINF_GIM_R3_HYPERCALL);
-
-                /* If the hypercall changes anything other than guest's general-purpose registers,
-                   we would need to reload the guest changed bits here before VM-entry. */
-            }
-            rc = VBOXSTRICTRC_VAL(rcStrict);
-            HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
-        }
-        else
-            Log4(("hmR0SvmExitVmmCall: Hypercalls not enabled\n"));
-    }
-
-    /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
-    if (RT_FAILURE(rc))
-    {
-        hmR0SvmSetPendingXcptUD(pVCpu);
-        rc = VINF_SUCCESS;
-    }
-
-    return rc;
+    bool fRipUpdated;
+    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fRipUpdated);
+    if (RT_SUCCESS(rcStrict))
+    {
+        if (!fRipUpdated)
+            hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3 /* cbInstr */);
+
+        /* If the hypercall or TPR patching changes anything other than guest's general-purpose registers,
+           we would need to reload the guest changed bits here before VM-entry. */
+        return VBOXSTRICTRC_VAL(rcStrict);
+    }
+
+    hmR0SvmSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
 }
 
Index: /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
===================================================================
--- /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h	(revision 65988)
+++ /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h	(revision 65989)
@@ -237,4 +237,10 @@
         case VINF_GIM_R3_HYPERCALL:
         {
+            /* Currently hypercall instruction (vmcall/vmmcall) emulation is compiled
+               only when Nested Hw. virt feature is enabled in IEM (for easier IEM backports). */
+#ifdef VBOX_WITH_NESTED_HWVIRT
+            rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
+            break;
+#else
             /** @todo IEM/REM need to handle VMCALL/VMMCALL, see
              *        @bugref{7270#c168}. */
@@ -259,4 +265,5 @@
             }
             break;
+#endif
         }
 
