Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 78524)
+++ /trunk/include/VBox/vmm/iem.h	(revision 78525)
@@ -348,4 +348,5 @@
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo);
 #endif
 /** @}  */
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 78524)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 78525)
@@ -16193,4 +16193,32 @@
 
 /**
+ * Interface for HM and EM to emulate the INVVPID instruction.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
+ * @param   pExitInfo       Pointer to the VM-exit information struct.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
+    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
+    Assert(pExitInfo);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+
+    uint8_t const  iEffSeg          = pExitInfo->InstrInfo.Inv.iSegReg;
+    uint8_t const  cbInstr          = pExitInfo->cbInstr;
+    RTGCPTR const  GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
+    uint64_t const uInvvpidType     = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
+                                    ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
+                                    : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
+    VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, pExitInfo);
+    Assert(!pVCpu->iem.s.cActiveMappings);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
+
+
+/**
  * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
  *
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 78524)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 78525)
@@ -178,5 +178,4 @@
  *  VMX_EXIT_EPT_MISCONFIG
  *  VMX_EXIT_INVEPT
- *  VMX_EXIT_INVVPID
  *  VMX_EXIT_RDRAND
  *  VMX_EXIT_VMFUNC
@@ -2990,4 +2989,5 @@
         case VMX_EXIT_INVEPT:
         case VMX_EXIT_INVPCID:
+        case VMX_EXIT_INVVPID:
         case VMX_EXIT_LDTR_TR_ACCESS:
         case VMX_EXIT_GDTR_IDTR_ACCESS:
@@ -8512,4 +8512,171 @@
 
 /**
+ * INVVPID instruction execution worker.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu               The cross context virtual CPU structure.
+ * @param   cbInstr             The instruction length in bytes.
+ * @param   iEffSeg             The segment of the invvpid descriptor.
+ * @param   GCPtrInvvpidDesc    The address of invvpid descriptor.
+ * @param   uInvvpidType        The invalidation type.
+ * @param   pExitInfo           Pointer to the VM-exit information struct. Optional,
+ *                              can be NULL.
+ *
+ * @remarks Common VMX instruction checks are already expected to by the caller,
+ *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxInvvpid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
+                                      uint64_t uInvvpidType, PCVMXVEXITINFO pExitInfo)
+{
+    /* Check if INVVPID instruction is supported, otherwise raise #UD. */
+    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVpid)
+        return iemRaiseUndefinedOpcode(pVCpu);
+
+    /* Nested-guest intercept. */
+    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
+    {
+        if (pExitInfo)
+            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
+        return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVVPID, VMXINSTRID_NONE, cbInstr);
+    }
+
+    /* CPL. */
+    if (pVCpu->iem.s.uCpl != 0)
+    {
+        Log(("invvpid: CPL != 0 -> #GP(0)\n"));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    /*
+     * Validate INVVPID invalidation type.
+     *
+     * Each of the types have a supported bit in IA32_VMX_EPT_VPID_CAP MSR.
+     * In theory, it's possible for a CPU to not support flushing individual addresses
+     * but all the other types or any other combination.
+     */
+    uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
+    uint8_t const fTypeIndivAddr              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
+    uint8_t const fTypeSingleCtx              = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
+    uint8_t const fTypeAllCtx                 = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
+    uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
+    if (   (fTypeIndivAddr              && uInvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
+        || (fTypeSingleCtx              && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
+        || (fTypeAllCtx                 && uInvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
+        || (fTypeSingleCtxRetainGlobals && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
+    { /* likely */ }
+    else
+    {
+        Log(("invvpid: invalid/unrecognized invvpid type %#x -> VMFail\n", uInvvpidType));
+        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid;
+        iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+        return VINF_SUCCESS;
+    }
+
+    /*
+     * Fetch the invvpid descriptor from guest memory.
+     */
+    RTUINT128U uDesc;
+    VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvvpidDesc);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        /*
+         * Validate the descriptor.
+         */
+        if (uDesc.s.Lo > 0xfff)
+        {
+            Log(("invvpid: reserved bits set in invvpid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_DescRsvd;
+            iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+            return VINF_SUCCESS;
+        }
+
+        IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
+        RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
+        uint8_t       const uVpid        = uDesc.s.Lo & UINT64_C(0xfff);
+        uint64_t      const uCr3         = pVCpu->cpum.GstCtx.cr3;
+        switch (uInvvpidType)
+        {
+            case VMXTLBFLUSHVPID_INDIV_ADDR:
+            {
+                if (uVpid != 0)
+                {
+                    if (IEM_IS_CANONICAL(GCPtrInvAddr))
+                    {
+                        /* Invalidate mappings for the linear address tagged with VPID. */
+                        /** @todo PGM support for VPID? Currently just flush everything. */
+                        PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
+                        iemVmxVmSucceed(pVCpu);
+                    }
+                    else
+                    {
+                        Log(("invvpid: invalidation address %#RGP is not canonical -> VMFail\n", GCPtrInvAddr));
+                        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidAddr;
+                        iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+                    }
+                }
+                else
+                {
+                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
+                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid;
+                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+                }
+                break;
+            }
+
+            case VMXTLBFLUSHVPID_SINGLE_CONTEXT:
+            {
+                if (uVpid != 0)
+                {
+                    /* Invalidate all mappings with VPID. */
+                    /** @todo PGM support for VPID? Currently just flush everything. */
+                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
+                    iemVmxVmSucceed(pVCpu);
+                }
+                else
+                {
+                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
+                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid;
+                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+                }
+                break;
+            }
+
+            case VMXTLBFLUSHVPID_ALL_CONTEXTS:
+            {
+                /* Invalidate all mappings with non-zero VPIDs. */
+                /** @todo PGM support for VPID? Currently just flush everything. */
+                PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
+                iemVmxVmSucceed(pVCpu);
+                break;
+            }
+
+            case VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS:
+            {
+                if (uVpid != 0)
+                {
+                    /* Invalidate all mappings with VPID except global translations. */
+                    /** @todo PGM support for VPID? Currently just flush everything. */
+                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
+                    iemVmxVmSucceed(pVCpu);
+                }
+                else
+                {
+                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
+                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid;
+                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
+                }
+                break;
+            }
+            IEM_NOT_REACHED_DEFAULT_CASE_RET();
+        }
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+}
+
+
+/**
  * VMXON instruction execution worker.
  *
@@ -8898,4 +9065,13 @@
 
 /**
+ * Implements 'INVVPID'.
+ */
+IEM_CIMPL_DEF_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType)
+{
+    return iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, NULL /* pExitInfo */);
+}
+
+
+/**
  * Implements VMX's implementation of PAUSE.
  */
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h	(revision 78524)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h	(revision 78525)
@@ -304,6 +304,49 @@
 /** Opcode 0x66 0x0f 0x38 0x80. */
 FNIEMOP_STUB(iemOp_invept_Gy_Mdq);
+
 /** Opcode 0x66 0x0f 0x38 0x81. */
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+FNIEMOP_DEF(iemOp_invvpid_Gy_Mdq)
+{
+    IEMOP_MNEMONIC(invvpid, "invvpid Gy,Mdq");
+    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+    IEMOP_HLP_IN_VMX_OPERATION("invvpid", kVmxVDiag_Invvpid);
+    IEMOP_HLP_VMX_INSTR("invvpid", kVmxVDiag_Invvpid);
+    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+    if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
+    {
+        /* Register, memory. */
+        if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
+        {
+            IEM_MC_BEGIN(3, 0);
+            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
+            IEM_MC_ARG(RTGCPTR,  GCPtrInvvpidDesc, 1);
+            IEM_MC_ARG(uint64_t, uInvvpidType,     2);
+            IEM_MC_FETCH_GREG_U64(uInvvpidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
+            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvvpidDesc, bRm, 0);
+            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
+            IEM_MC_CALL_CIMPL_3(iemCImpl_invvpid, iEffSeg, GCPtrInvvpidDesc, uInvvpidType);
+            IEM_MC_END();
+        }
+        else
+        {
+            IEM_MC_BEGIN(3, 0);
+            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
+            IEM_MC_ARG(RTGCPTR,  GCPtrInvvpidDesc, 1);
+            IEM_MC_ARG(uint32_t, uInvvpidType,     2);
+            IEM_MC_FETCH_GREG_U32(uInvvpidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
+            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvvpidDesc, bRm, 0);
+            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
+            IEM_MC_CALL_CIMPL_3(iemCImpl_invvpid, iEffSeg, GCPtrInvvpidDesc, uInvvpidType);
+            IEM_MC_END();
+        }
+    }
+    Log(("iemOp_invvpid_Gy_Mdq: invalid encoding -> #UD\n"));
+    return IEMOP_RAISE_INVALID_OPCODE();
+}
+#else
 FNIEMOP_STUB(iemOp_invvpid_Gy_Mdq);
+#endif
+
 /** Opcode 0x66 0x0f 0x38 0x82. */
 FNIEMOP_DEF(iemOp_invpcid_Gy_Mdq)
