Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 73605)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 73606)
@@ -1302,4 +1302,6 @@
 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu);
 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu);
+VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu);
+VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu);
 VMMDECL(VBOXSTRICTRC)   CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
 VMMDECL(VBOXSTRICTRC)   CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
@@ -1788,26 +1790,36 @@
 
 /**
- * Checks if we are executing inside a VMX nested hardware-virtualized guest.
- *
- * @returns @c true if in VMX nested-guest mode, @c false otherwise.
- * @param   pCtx        Pointer to the context.
- */
-DECLINLINE(bool) CPUMIsGuestInVmxNestedHwVirtMode(PCCPUMCTX pCtx)
-{
-    /** @todo Intel. */
+ * Checks if the guest is in VMX non-root operation.
+ *
+ * @returns @c true if in VMX non-root operation, @c false otherwise.
+ * @param   pCtx    Current CPU context.
+ */
+DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
+{
+#ifndef IN_RC
+    Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
+    return pCtx->hwvirt.vmx.fInVmxNonRootMode;
+#else
     NOREF(pCtx);
     return false;
-}
-
-/**
- * Checks if we are executing inside a nested hardware-virtualized guest.
- *
- * @returns @c true if in SVM/VMX nested-guest mode, @c false otherwise.
- * @param   pCtx        Pointer to the context.
- */
-DECLINLINE(bool) CPUMIsGuestInNestedHwVirtMode(PCCPUMCTX pCtx)
-{
-    return CPUMIsGuestInSvmNestedHwVirtMode(pCtx) || CPUMIsGuestInVmxNestedHwVirtMode(pCtx);
-}
+#endif
+}
+
+/**
+ * Checks if the guest is in VMX root operation.
+ *
+ * @returns @c true if in VMX root operation, @c false otherwise.
+ * @param   pCtx    Current CPU context.
+ */
+DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
+{
+#ifndef IN_RC
+    return pCtx->hwvirt.vmx.fInVmxRootMode;
+#else
+    NOREF(pCtx);
+    return false;
+#endif
+}
+
 #endif /* IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS */
 
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 73605)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 73606)
@@ -31,4 +31,5 @@
 # include <VBox/types.h>
 # include <VBox/vmm/hm_svm.h>
+# include <VBox/vmm/hm_vmx.h>
 #else
 # pragma D depends_on library x86.d
@@ -488,62 +489,79 @@
             {
                 /** 0x2e0 - MSR holding physical address of the Guest's Host-state. */
-                uint64_t            uMsrHSavePa;
+                uint64_t                uMsrHSavePa;
                 /** 0x2e8 - Guest physical address of the nested-guest VMCB. */
-                RTGCPHYS            GCPhysVmcb;
+                RTGCPHYS                GCPhysVmcb;
                 /** 0x2f0 - Cache of the nested-guest VMCB - R0 ptr. */
-                R0PTRTYPE(PSVMVMCB) pVmcbR0;
+                R0PTRTYPE(PSVMVMCB)     pVmcbR0;
 #if HC_ARCH_BITS == 32
-                uint32_t            uVmcbR0Padding;
+                uint32_t                uVmcbR0Padding;
 #endif
                 /** 0x2f8 - Cache of the nested-guest VMCB - R3 ptr. */
-                R3PTRTYPE(PSVMVMCB) pVmcbR3;
+                R3PTRTYPE(PSVMVMCB)     pVmcbR3;
 #if HC_ARCH_BITS == 32
-                uint32_t            uVmcbR3Padding;
+                uint32_t                uVmcbR3Padding;
 #endif
                 /** 0x300 - Guest's host-state save area. */
-                SVMHOSTSTATE        HostState;
+                SVMHOSTSTATE            HostState;
                 /** 0x3b8 - Guest TSC time-stamp of when the previous PAUSE instr. was executed. */
-                uint64_t            uPrevPauseTick;
+                uint64_t                uPrevPauseTick;
                 /** 0x3c0 - Pause filter count. */
-                uint16_t            cPauseFilter;
+                uint16_t                cPauseFilter;
                 /** 0x3c2 - Pause filter threshold. */
-                uint16_t            cPauseFilterThreshold;
+                uint16_t                cPauseFilterThreshold;
                 /** 0x3c4 - Whether the injected event is subject to event intercepts. */
-                bool                fInterceptEvents;
+                bool                    fInterceptEvents;
                 /** 0x3c5 - Padding. */
-                bool                afPadding[3];
+                bool                    afPadding[3];
                 /** 0x3c8 - MSR permission bitmap - R0 ptr. */
-                R0PTRTYPE(void *)   pvMsrBitmapR0;
+                R0PTRTYPE(void *)       pvMsrBitmapR0;
 #if HC_ARCH_BITS == 32
-                uint32_t            uvMsrBitmapR0Padding;
+                uint32_t                uvMsrBitmapR0Padding;
 #endif
                 /** 0x3d0 - MSR permission bitmap - R3 ptr. */
-                R3PTRTYPE(void *)   pvMsrBitmapR3;
+                R3PTRTYPE(void *)       pvMsrBitmapR3;
 #if HC_ARCH_BITS == 32
-                uint32_t            uvMsrBitmapR3Padding;
+                uint32_t                uvMsrBitmapR3Padding;
 #endif
                 /** 0x3d8 - IO permission bitmap - R0 ptr. */
-                R0PTRTYPE(void *)   pvIoBitmapR0;
+                R0PTRTYPE(void *)       pvIoBitmapR0;
 #if HC_ARCH_BITS == 32
-                uint32_t            uIoBitmapR0Padding;
+                uint32_t                uIoBitmapR0Padding;
 #endif
                 /** 0x3e0 - IO permission bitmap - R3 ptr. */
-                R3PTRTYPE(void *)   pvIoBitmapR3;
+                R3PTRTYPE(void *)       pvIoBitmapR3;
 #if HC_ARCH_BITS == 32
-                uint32_t            uIoBitmapR3Padding;
+                uint32_t                uIoBitmapR3Padding;
 #endif
                 /** 0x3e8 - Host physical address of the nested-guest VMCB.  */
-                RTHCPHYS            HCPhysVmcb;
+                RTHCPHYS                HCPhysVmcb;
             } svm;
 
             struct
             {
-                /** 0x2e0 - Whether the guest is in VMX root mode. */
-                uint32_t            fInVmxRootMode : 1;
-                uint32_t            afPadding  : 31;
                 /** 0x2e4 - Guest physical address of the VMXON region. */
-                RTGCPHYS            GCPhysVmxon;
-                /** 0x2ec - Padding. */
-                uint8_t             abPadding[0x3f0 - 0x2ec];
+                RTGCPHYS                GCPhysVmxon;
+                /** 0x2e8 - Guest physical address of the current VMCS pointer. */
+                RTGCPHYS                GCPhysVmcs;
+                /** 0x2f0 - Last emulated VMX instruction diagnostic. */
+                VMXVINSTRDIAG           enmInstrDiag;
+                /** 0x2f4 - Whether the guest is in VMX root mode. */
+                bool                    fInVmxRootMode;
+                /** 0x2f5 - Whether the guest is in VMX non-root mode. */
+                bool                    fInVmxNonRootMode;
+                /** 0x2f6 - Padding.  */
+                bool                    afPadding[2];
+                /** 0x2f8 - Cache of the nested-guest current VMCS - R0 ptr. */
+                R0PTRTYPE(PVMXVVMCS)    pVmcsR0;
+#if HC_ARCH_BITS == 32
+                uint32_t                uVmcsR0Padding;
+#endif
+                /** 0x300 - Cache of the nested-guest curent VMCS - R3 ptr. */
+                R3PTRTYPE(PVMXVVMCS)    pVmcsR3;
+#if HC_ARCH_BITS == 32
+                uint32_t                uVmcsR3Padding;
+#endif
+                /** 0x308 - Padding. */
+                uint8_t             abPadding[0x3f0 - 0x308];
             } vmx;
         } CPUM_UNION_NM(s);
Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 73605)
+++ /trunk/include/VBox/vmm/hm.h	(revision 73606)
@@ -130,5 +130,4 @@
 VMM_INT_DECL(bool)              HMHasPendingIrq(PVM pVM);
 VMM_INT_DECL(PX86PDPE)          HMGetPaePdpes(PVMCPU pVCpu);
-VMM_INT_DECL(int)               HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
 VMM_INT_DECL(bool)              HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable);
 VMM_INT_DECL(bool)              HMIsSvmActive(PVM pVM);
@@ -136,6 +135,14 @@
 VMM_INT_DECL(bool)              HMIsVmxSupported(PVM pVM);
 VMM_INT_DECL(void)              HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
+/** @} */
+
+/** @name All-context VMX helpers.
+ * These are VMX functions (based on VMX specs.) that may be used by IEM/REM and
+ * not VirtualBox functions that are used for hardware-assisted VMX. Those are
+ * declared below under the !IN_RC section.
+ * @{ */
 VMM_INT_DECL(int)               HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs);
 VMM_INT_DECL(int)               HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue);
+VMM_INT_DECL(const char *)      HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag);
 /** @} */
 
@@ -151,5 +158,4 @@
                                                          uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
                                                          PSVMIOIOEXITINFO pIoExitInfo);
-VMM_INT_DECL(int)               HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu);
 /** @} */
 
@@ -168,16 +174,20 @@
 VMM_INT_DECL(void)              HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx);
 # endif
+VMM_INT_DECL(int)               HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
+VMM_INT_DECL(int)               HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu);
 #else /* Nops in RC: */
-# define HMFlushTLB(pVCpu)                              do { } while (0)
-# define HMFlushTLBOnAllVCpus(pVM)                      do { } while (0)
-# define HMInvalidatePageOnAllVCpus(pVM, GCVirt)        do { } while (0)
-# define HMInvalidatePhysPage(pVM,  GCVirt)             do { } while (0)
-# define HMAreNestedPagingAndFullGuestExecEnabled(pVM)  false
-# define HMIsLongModeAllowed(pVM)                       false
-# define HMIsNestedPagingActive(pVM)                    false
-# define HMIsMsrBitmapsActive(pVM)                      false
-# define HMSvmIsVGifActive(pVM)                         false
-# define HMSvmNstGstApplyTscOffset(pVCpu, uTicks)       (uTicks)
-# define HMSvmNstGstVmExitNotify(pVCpu, pCtx)           do { } while (0)
+# define HMFlushTLB(pVCpu)                                            do { } while (0)
+# define HMFlushTLBOnAllVCpus(pVM)                                    do { } while (0)
+# define HMInvalidatePageOnAllVCpus(pVM, GCVirt)                      do { } while (0)
+# define HMInvalidatePhysPage(pVM,  GCVirt)                           do { } while (0)
+# define HMAreNestedPagingAndFullGuestExecEnabled(pVM)                false
+# define HMIsLongModeAllowed(pVM)                                     false
+# define HMIsNestedPagingActive(pVM)                                  false
+# define HMIsMsrBitmapsActive(pVM)                                    false
+# define HMSvmIsVGifActive(pVM)                                       false
+# define HMSvmNstGstApplyTscOffset(pVCpu, uTicks)                     (uTicks)
+# define HMSvmNstGstVmExitNotify(pVCpu, pCtx)                         do { } while (0)
+# define HMSvmIsSubjectToErratum170(puFamily, puModel, puStepping)    false
+# define HMHCSvmMaybeMovTprHypercall(pVCpu)                           do { } while (0)
 #endif
 
Index: /trunk/include/VBox/vmm/hm_vmx.h
===================================================================
--- /trunk/include/VBox/vmm/hm_vmx.h	(revision 73605)
+++ /trunk/include/VBox/vmm/hm_vmx.h	(revision 73606)
@@ -812,4 +812,75 @@
 
 /**
+ * VMX VMCS revision identifier.
+ */
+typedef union
+{
+    struct
+    {
+        /** Revision identifier. */
+        uint32_t    u31RevisionId : 31;
+        /** Whether this is a shadow VMCS. */
+        uint32_t    fIsShadowVmcs : 1;
+    } n;
+    /* The unsigned integer view. */
+    uint32_t        u;
+} VMXVMCSREVID;
+AssertCompileSize(VMXVMCSREVID, 4);
+/** Pointer to the VMXVMCSREVID union. */
+typedef VMXVMCSREVID *PVMXVMCSREVID;
+/** Pointer to a const VMXVVMCSREVID union. */
+typedef const VMXVMCSREVID *PCVMXVMCSREVID;
+
+/**
+ * VMX VM-exit instruction information.
+ */
+typedef union
+{
+    /** Plain unsigned int representation. */
+    uint32_t    u;
+    /** INS and OUTS information. */
+    struct
+    {
+        uint32_t    u7Reserved0 : 7;
+        /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
+        uint32_t    u3AddrSize  : 3;
+        uint32_t    u5Reserved1 : 5;
+        /** The segment register (X86_SREG_XXX). */
+        uint32_t    iSegReg     : 3;
+        uint32_t    uReserved2  : 14;
+    } StrIo;
+    /** INVEPT, INVVPID, INVPCID, VMCLEAR, VMPTRLD, VMPTRST, VMXON, VMXOFF, XSAVES,
+     *  XRSTORS information. */
+    struct
+    {
+        /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
+        uint32_t    u2Scaling       : 2;
+        uint32_t    u5Reserved0     : 5;
+        /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
+        uint32_t    u3AddrSize      : 3;
+        /** Memory/Register - Always cleared to 0 to indicate memory operand. */
+        uint32_t    fIsRegOperand   : 1;
+        uint32_t    u4Reserved0     : 4;
+        /** The segment register (X86_SREG_XXX). */
+        uint32_t    iSegReg         : 3;
+        /** The index register (X86_GREG_XXX). */
+        uint32_t    iIdxReg         : 4;
+        /** Set if index register is invalid. */
+        uint32_t    fIdxRegInvalid  : 1;
+        /** The base register (X86_GREG_XXX). */
+        uint32_t    iBaseReg        : 4;
+        /** Set if base register is invalid. */
+        uint32_t    fBaseRegInvalid : 1;
+        /** Register 2 (X86_GREG_XXX). */
+        uint32_t    iReg2           : 4;
+    } InvVmxXsaves;
+} VMXEXITINSTRINFO;
+AssertCompileSize(VMXEXITINSTRINFO, 4);
+/** Pointer to a VMX VM-exit instruction info. struct. */
+typedef VMXEXITINSTRINFO *PVMXEXITINSTRINFO;
+/** Pointer to a const VMX VM-exit instruction info. struct. */
+typedef const VMXEXITINSTRINFO *PCVMXEXITINSTRINFO;
+
+/**
  * VMX MSR autoload/store element.
  * In accordance to the VT-x spec.
@@ -1046,57 +1117,62 @@
 
 /** @name VM Instruction Errors.
- * @{
- */
-/** VMCALL executed in VMX root operation. */
-#define VMX_ERROR_VMCALL                                        1
-/** VMCLEAR with invalid physical address. */
-#define VMX_ERROR_VMCLEAR_INVALID_PHYS_ADDR                     2
-/** VMCLEAR with VMXON pointer. */
-#define VMX_ERROR_VMCLEAR_INVALID_VMXON_PTR                     3
-/** VMLAUNCH with non-clear VMCS. */
-#define VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS                        4
-/** VMRESUME with non-launched VMCS. */
-#define VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS                    5
-/** VMRESUME with a corrupted VMCS (indicates corruption of the current VMCS). */
-#define VMX_ERROR_VMRESUME_CORRUPTED_VMCS                       6
-/** VM-entry with invalid control field(s). */
-#define VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS                7
-/** VM-entry with invalid host-state field(s). */
-#define VMX_ERROR_VMENTRY_INVALID_HOST_STATE                    8
-/** VMPTRLD with invalid physical address. */
-#define VMX_ERROR_VMPTRLD_INVALID_PHYS_ADDR                     9
-/** VMPTRLD with VMXON pointer. */
-#define VMX_ERROR_VMPTRLD_VMXON_PTR                             10
-/** VMPTRLD with incorrect VMCS revision identifier. */
-#define VMX_ERROR_VMPTRLD_WRONG_VMCS_REVISION                   11
-/** VMREAD/VMWRITE from/to unsupported VMCS component. */
-#define VMX_ERROR_VMREAD_INVALID_COMPONENT                      12
-#define VMX_ERROR_VMWRITE_INVALID_COMPONENT                     VMX_ERROR_VMREAD_INVALID_COMPONENT
-/** VMWRITE to read-only VMCS component. */
-#define VMX_ERROR_VMWRITE_READONLY_COMPONENT                    13
-/** VMXON executed in VMX root operation. */
-#define VMX_ERROR_VMXON_IN_VMX_ROOT_OP                          15
-/** VM-entry with invalid executive-VMCS pointer. */
-#define VMX_ERROR_VMENTRY_INVALID_VMCS_EXEC_PTR                 16
-/** VM-entry with non-launched executive VMCS. */
-#define VMX_ERROR_VMENTRY_NON_LAUNCHED_EXEC_VMCS                17
-/** VM-entry with executive-VMCS pointer not VMXON pointer. */
-#define VMX_ERROR_VMENTRY_EXEC_VMCS_PTR                         18
-/** VMCALL with non-clear VMCS. */
-#define VMX_ERROR_VMCALL_NON_CLEAR_VMCS                         19
-/** VMCALL with invalid VM-exit control fields. */
-#define VMX_ERROR_VMCALL_INVALID_VMEXIT_FIELDS                  20
-/** VMCALL with incorrect MSEG revision identifier. */
-#define VMX_ERROR_VMCALL_INVALID_MSEG_REVISION                  22
-/** VMXOFF under dual-monitor treatment of SMIs and SMM. */
-#define VMX_ERROR_VMXOFF_DUAL_MONITOR                           23
-/** VMCALL with invalid SMM-monitor features. */
-#define VMX_ERROR_VMCALL_INVALID_SMM_MONITOR                    24
-/** VM-entry with invalid VM-execution control fields in executive VMCS. */
-#define VMX_ERROR_VMENTRY_INVALID_VM_EXEC_CTRL                  25
-/** VM-entry with events blocked by MOV SS. */
-#define VMX_ERROR_VMENTRY_MOV_SS                                26
-/** Invalid operand to INVEPT/INVVPID. */
-#define VMX_ERROR_INVEPTVPID_INVALID_OPERAND                    28
+ * See Intel spec. "30.4 VM Instruction Error Numbers"
+ * @{
+ */
+typedef enum
+{
+    /** VMCALL executed in VMX root operation. */
+    VMXINSTRERR_VMCALL_VMXROOTMODE             = 1,
+    /** VMCLEAR with invalid physical address. */
+    VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR       = 2,
+    /** VMCLEAR with VMXON pointer. */
+    VMXINSTRERR_VMCLEAR_VMXON_PTR              = 3,
+    /** VMLAUNCH with non-clear VMCS. */
+    VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS        = 4,
+    /** VMRESUME with non-launched VMCS. */
+    VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS     = 5,
+    /** VMRESUME after VMXOFF (VMXOFF and VMXON between VMLAUNCH and VMRESUME). */
+    VMXINSTRERR_VMRESUME_AFTER_VMXOFF          = 6,
+    /** VM-entry with invalid control field(s). */
+    VMXINSTRERR_VMENTRY_INVALID_CTL            = 7,
+    /** VM-entry with invalid host-state field(s). */
+    VMXINSTRERR_VMENTRY_INVALID_HOST_STATE     = 8,
+    /** VMPTRLD with invalid physical address. */
+    VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR       = 9,
+    /** VMPTRLD with VMXON pointer. */
+    VMXINSTRERR_VMPTRLD_VMXON_PTR              = 10,
+    /** VMPTRLD with incorrect VMCS revision identifier. */
+    VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV     = 11,
+    /** VMREAD from unsupported VMCS component. */
+    VMXINSTRERR_VMREAD_INVALID_COMPONENT       = 12,
+    /** VMWRITE to unsupported VMCS component. */
+    VMXINSTRERR_VMWRITE_INVALID_COMPONENT      = 12,
+    /** VMWRITE to read-only VMCS component. */
+    VMXINSTRERR_VMWRITE_RO_COMPONENT           = 13,
+    /** VMXON executed in VMX root operation. */
+    VMXINSTRERR_VMXON_IN_VMXROOTMODE           = 15,
+    /** VM-entry with invalid executive-VMCS pointer. */
+    VMXINSTRERR_VMENTRY_INVALID_VMCS_PTR       = 16,
+    /** VM-entry with non-launched executive VMCS. */
+    VMXINSTRERR_VMENTRY_NON_LAUNCHED_VMCS      = 17,
+    /** VM-entry with executive-VMCS pointer not VMXON pointer. */
+    VMXINSTRERR_VMENTRY_VMCS_PTR               = 18,
+    /** VMCALL with non-clear VMCS. */
+    VMXINSTRERR_VMCALL_NON_CLEAR_VMCS          = 19,
+    /** VMCALL with invalid VM-exit control fields. */
+    VMXINSTRERR_VMCALL_INVALID_EXITCTLS        = 20,
+    /** VMCALL with incorrect MSEG revision identifier. */
+    VMXINSTRERR_VMCALL_INVALID_MSEG_ID         = 22,
+    /** VMXOFF under dual-monitor treatment of SMIs and SMM. */
+    VMXINSTRERR_VMXOFF_DUAL_MON                = 23,
+    /** VMCALL with invalid SMM-monitor features. */
+    VMXINSTRERR_VMCALL_INVALID_SMMCTLS         = 24,
+    /** VM-entry with invalid VM-execution control fields in executive VMCS. */
+    VMXINSTRERR_VMENTRY_INVALID_EXECTLS        = 25,
+    /** VM-entry with events blocked by MOV SS. */
+    VMXINSTRERR_VMENTRY_BLOCK_MOVSS            = 26,
+    /** Invalid operand to INVEPT/INVVPID. */
+    VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND = 28
+} VMXINSTRERR;
 /** @} */
 
@@ -1106,7 +1182,8 @@
  */
 /** VMCS (and related regions) memory type - Uncacheable. */
-#define VMX_BASIC_MEM_TYPE_UC                                    0
+#define VMX_BASIC_MEM_TYPE_UC                                   0
 /** VMCS (and related regions) memory type - Write back. */
-#define VMX_BASIC_MEM_TYPE_WB                                    6
+#define VMX_BASIC_MEM_TYPE_WB                                   6
+
 /** Bit fields for MSR_IA32_VMX_BASIC.  */
 /** VMCS revision identifier used by the processor. */
@@ -1574,4 +1651,5 @@
 /** Default1 class when true capability MSRs are not supported. */
 #define VMX_PIN_CTLS_DEFAULT1                                   UINT32_C(0x00000016)
+
 /** Bit fields for MSR_IA32_VMX_PINBASED_CTLS and Pin-based VM-execution
  *  controls field in the VMCS. */
@@ -1646,4 +1724,5 @@
 /** Default1 class when true-capability MSRs are not supported. */
 #define VMX_PROC_CTLS_DEFAULT1                                  UINT32_C(0x0401e172)
+
 /** Bit fields for MSR_IA32_VMX_PROCBASED_CTLS and Processor-based VM-execution
  *  controls field in the VMCS. */
@@ -1758,4 +1837,5 @@
 /** Use TSC scaling. */
 #define VMX_PROC_CTLS2_TSC_SCALING                              RT_BIT(25)
+
 /** Bit fields for MSR_IA32_VMX_PROCBASED_CTLS2 and Secondary processor-based
  *  VM-execution controls field in the VMCS. */
@@ -1836,4 +1916,5 @@
 /** Default1 class when true-capability MSRs are not supported. */
 #define VMX_ENTRY_CTLS_DEFAULT1                                 UINT32_C(0x000011ff)
+
 /** Bit fields for MSR_IA32_VMX_ENTRY_CTLS and VM-entry controls field in the
  *  VMCS. */
@@ -1890,4 +1971,5 @@
 /** Default1 class when true-capability MSRs are not supported.  */
 #define VMX_EXIT_CTLS_DEFAULT1                                  UINT32_C(0x00036dff)
+
 /** Bit fields for MSR_IA32_VMX_EXIT_CTLS and VM-exit controls field in the
  *  VMCS. */
@@ -1945,4 +2027,51 @@
 
 
+/** @name VM-entry interruption information.
+ * @{ */
+#define VMX_ENTRY_INT_INFO_VECTOR(a)                             ((a) & 0xff)
+#define VMX_ENTRY_INT_INFO_TYPE_SHIFT                            8
+#define VMX_ENTRY_INT_INFO_TYPE(a)                               (((a) >> 8) & 7)
+#define VMX_ENTRY_INT_INFO_ERROR_CODE_VALID                      RT_BIT(11)
+#define VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(a)                (((a) >> 11) & 1)
+#define VMX_ENTRY_INT_INFO_NMI_UNBLOCK_IRET                      12
+#define VMX_ENTRY_INT_INFO_IS_NMI_UNBLOCK_IRET(a)                (((a) >> 12) & 1)
+#define VMX_ENTRY_INT_INFO_VALID                                 RT_BIT(31)
+#define VMX_ENTRY_INT_INFO_IS_VALID(a)                           (((a) >> 31) & 1)
+/** Construct an VM-entry interruption information field from a VM-exit interruption
+ *  info value (same except that bit 12 is reserved). */
+#define VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(a)                 ((a) & ~RT_BIT(12))
+/** Construct a VM-entry interruption information field from an IDT-vectoring
+ *  information field (same except that bit 12 is reserved). */
+#define VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(a)                 ((a) & ~RT_BIT(12))
+
+/** Bit fields for VM-entry interruption information. */
+#define VMX_BF_ENTRY_INT_INFO_VECTOR_SHIFT                       0
+#define VMX_BF_ENTRY_INT_INFO_VECTOR_MASK                        UINT32_C(0x000000ff)
+#define VMX_BF_ENTRY_INT_INFO_TYPE_SHIFT                         8
+#define VMX_BF_ENTRY_INT_INFO_TYPE_MASK                          UINT32_C(0x00000700)
+#define VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID_SHIFT               11
+#define VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID_MASK                UINT32_C(0x00000800)
+#define VMX_BF_ENTRY_INT_INFO_RSVD_12_30_SHIFT                   12
+#define VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK                    UINT32_C(0x7ffff000)
+#define VMX_BF_ENTRY_INT_INFO_VALID_SHIFT                        31
+#define VMX_BF_ENTRY_INT_INFO_VALID_MASK                         UINT32_C(0x80000000)
+RT_BF_ASSERT_COMPILE_CHECKS(VMX_BF_ENTRY_INT_INFO_, UINT32_C(0), UINT32_MAX,
+                            (VECTOR, TYPE, ERR_CODE_VALID, RSVD_12_30, VALID));
+/** @} */
+
+
+/** @name VM-entry interruption information types.
+ * @{
+ */
+#define VMX_ENTRY_INT_INFO_TYPE_EXT_INT                          0
+#define VMX_ENTRY_INT_INFO_TYPE_NMI                              2
+#define VMX_ENTRY_INT_INFO_TYPE_HW_XCPT                          3
+#define VMX_ENTRY_INT_INFO_TYPE_SW_INT                           4
+#define VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT                     5
+#define VMX_ENTRY_INT_INFO_TYPE_SW_XCPT                          6
+#define VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT                      7
+/** @} */
+
+
 /** @name VM-exit interruption information.
  * @{
@@ -1957,7 +2086,20 @@
 #define VMX_EXIT_INT_INFO_VALID                                 RT_BIT(31)
 #define VMX_EXIT_INT_INFO_IS_VALID(a)                           (((a) >> 31) & 1)
-/** Construct an irq event injection value from the exit interruption info value
- *  (same except that bit 12 is reserved). */
-#define VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(a)           ((a) & ~RT_BIT(12))
+
+/** Bit fields for VM-exit interruption infomration. */
+#define VMX_BF_EXIT_INT_INFO_VECTOR_SHIFT                       0
+#define VMX_BF_EXIT_INT_INFO_VECTOR_MASK                        UINT32_C(0x000000ff)
+#define VMX_BF_EXIT_INT_INFO_TYPE_SHIFT                         8
+#define VMX_BF_EXIT_INT_INFO_TYPE_MASK                          UINT32_C(0x00000700)
+#define VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID_SHIFT               11
+#define VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID_MASK                UINT32_C(0x00000800)
+#define VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET_SHIFT             12
+#define VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET_MASK              UINT32_C(0x00001000)
+#define VMX_BF_EXIT_INT_INFO_RSVD_13_30_SHIFT                   13
+#define VMX_BF_EXIT_INT_INFO_RSVD_13_30_MASK                    UINT32_C(0x7fffe000)
+#define VMX_BF_EXIT_INT_INFO_VALID_SHIFT                        31
+#define VMX_BF_EXIT_INT_INFO_VALID_MASK                         UINT32_C(0x80000000)
+RT_BF_ASSERT_COMPILE_CHECKS(VMX_BF_EXIT_INT_INFO_, UINT32_C(0), UINT32_MAX,
+                            (VECTOR, TYPE, ERR_CODE_VALID, NMI_UNBLOCK_IRET, RSVD_13_30, VALID));
 /** @} */
 
@@ -1972,4 +2114,5 @@
 #define VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT                     5
 #define VMX_EXIT_INT_INFO_TYPE_SW_XCPT                          6
+#define VMX_EXIT_INT_INFO_TYPE_UNUSED                           7
 /** @} */
 
@@ -1979,10 +2122,23 @@
  */
 #define VMX_IDT_VECTORING_INFO_VECTOR(a)                        ((a) & 0xff)
-#define VMX_IDT_VECTORING_INFO_TYPE_SHIFT                       8
 #define VMX_IDT_VECTORING_INFO_TYPE(a)                          (((a) >> 8) & 7)
-#define VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID                 RT_BIT(11)
 #define VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(a)           (((a) >> 11) & 1)
-#define VMX_IDT_VECTORING_INFO_VALID(a)                         ((a) & RT_BIT(31))
-#define VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(a)                ((a) & ~RT_BIT(12))
+#define VMX_IDT_VECTORING_INFO_IS_VALID(a)                      (((a) >> 31) & 1)
+
+/** Bit fields for IDT-vectoring information. */
+#define VMX_BF_IDT_VECTORING_INFO_VECTOR_SHIFT                  0
+#define VMX_BF_IDT_VECTORING_INFO_VECTOR_MASK                   UINT32_C(0x000000ff)
+#define VMX_BF_IDT_VECTORING_INFO_TYPE_SHIFT                    8
+#define VMX_BF_IDT_VECTORING_INFO_TYPE_MASK                     UINT32_C(0x00000700)
+#define VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID_SHIFT          11
+#define VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID_MASK           UINT32_C(0x00000800)
+#define VMX_BF_IDT_VECTORING_INFO_UNDEF_12_SHIFT                12
+#define VMX_BF_IDT_VECTORING_INFO_UNDEF_12_MASK                 UINT32_C(0x00001000)
+#define VMX_BF_IDT_VECTORING_INFO_RSVD_13_30_SHIFT              13
+#define VMX_BF_IDT_VECTORING_INFO_RSVD_13_30_MASK               UINT32_C(0x7fffe000)
+#define VMX_BF_IDT_VECTORING_INFO_VALID_SHIFT                   31
+#define VMX_BF_IDT_VECTORING_INFO_VALID_MASK                    UINT32_C(0x80000000)
+RT_BF_ASSERT_COMPILE_CHECKS(VMX_BF_IDT_VECTORING_INFO_, UINT32_C(0), UINT32_MAX,
+                            (VECTOR, TYPE, ERR_CODE_VALID, UNDEF_12, RSVD_13_30, VALID));
 /** @} */
 
@@ -1997,4 +2153,5 @@
 #define VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT                5
 #define VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT                     6
+#define VMX_IDT_VECTORING_INFO_TYPE_SW_UNUSED                   7
 /** @} */
 
@@ -2345,4 +2502,6 @@
 /** CR0 bits set here must always be set when in VMX operation. */
 #define VMX_V_CR0_FIXED0                                        (X86_CR0_PE | X86_CR0_NE | X86_CR0_PG)
+/** VMX_V_CR0_FIXED0 when unrestricted-guest execution is supported for the guest. */
+#define VMX_V_CR0_FIXED0_UX                                     (VMX_V_CR0_FIXED0 & ~(X86_CR0_PE | X86_CR0_PG))
 /** CR4 bits set here must always be set when in VMX operation. */
 #define VMX_V_CR4_FIXED0                                        (X86_CR4_VMXE)
@@ -2363,4 +2522,61 @@
  *  etc.) are limited to 32-bits (4G). Always 0 on 64-bit CPUs. */
 #define VMX_V_VMCS_PHYSADDR_4G_LIMIT                            0
+
+/**
+ * Virtual VMX-instruction diagnostics.
+ *
+ * These are not the same as VM instruction errors that are enumerated in the Intel
+ * spec. These are purely internal, fine-grained definitions used for diagnostic
+ * purposes and are not reported to guest software under the VM-instruction error
+ * field in its VMCS.
+ *
+ * @note Members of this enum are used as array indices, so no gaps are allowed.
+ *       Please update g_apszVmxInstrDiagDesc when you add new fields to this
+ *       enum.
+ */
+typedef enum
+{
+    /* Internal processing errors. */
+    kVmxVInstrDiag_Ipe_1 = 0,
+    kVmxVInstrDiag_Ipe_2,
+    kVmxVInstrDiag_Ipe_3,
+    kVmxVInstrDiag_Ipe_4,
+    kVmxVInstrDiag_Ipe_5,
+    kVmxVInstrDiag_Ipe_6,
+    kVmxVInstrDiag_Ipe_7,
+    kVmxVInstrDiag_Ipe_8,
+    kVmxVInstrDiag_Ipe_9,
+    /* VMXON. */
+    kVmxVInstrDiag_Vmxon_A20M,
+    kVmxVInstrDiag_Vmxon_Cpl,
+    kVmxVInstrDiag_Vmxon_Cr0Fixed0,
+    kVmxVInstrDiag_Vmxon_Cr4Fixed0,
+    kVmxVInstrDiag_Vmxon_Intercept,
+    kVmxVInstrDiag_Vmxon_LongModeCS,
+    kVmxVInstrDiag_Vmxon_MsrFeatCtl,
+    kVmxVInstrDiag_Vmxon_PtrAlign,
+    kVmxVInstrDiag_Vmxon_PtrAbnormal,
+    kVmxVInstrDiag_Vmxon_PtrMap,
+    kVmxVInstrDiag_Vmxon_PtrReadPhys,
+    kVmxVInstrDiag_Vmxon_PtrWidth,
+    kVmxVInstrDiag_Vmxon_RealOrV86Mode,
+    kVmxVInstrDiag_Vmxon_ShadowVmcs,
+    kVmxVInstrDiag_Vmxon_Success,
+    kVmxVInstrDiag_Vmxon_Vmxe,
+    kVmxVInstrDiag_Vmxon_VmcsRevId,
+    kVmxVInstrDiag_Vmxon_VmxRoot,
+    kVmxVInstrDiag_Vmxon_VmxRootCpl,
+    /* VMXOFF. */
+    kVmxVInstrDiag_Vmxoff_Cpl,
+    kVmxVInstrDiag_Vmxoff_Intercept,
+    kVmxVInstrDiag_Vmxoff_LongModeCS,
+    kVmxVInstrDiag_Vmxoff_RealOrV86Mode,
+    kVmxVInstrDiag_Vmxoff_Success,
+    kVmxVInstrDiag_Vmxoff_Vmxe,
+    kVmxVInstrDiag_Vmxoff_VmxRoot,
+    /* Last member for determining array index limit. */
+    kVmxVInstrDiag_Last
+} VMXVINSTRDIAG;
+AssertCompileSize(VMXVINSTRDIAG, 4);
 
 /**
@@ -2374,9 +2590,6 @@
 typedef struct
 {
-    /** Revision identifier. */
-    uint32_t            u31RevisionId : 31;
-    /** Whether this is a shadow VMCS. */
-    uint32_t            fIsShadowVmcs : 1;
-
+    /** VMX VMCS revision identifier.   */
+    VMXVMCSREVID        u32VmcsRevId;
     /** VMX-abort indicator. */
     uint32_t            u32VmxAbortId;
Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 73605)
+++ /trunk/include/VBox/vmm/iem.h	(revision 73606)
@@ -299,5 +299,4 @@
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvlpg(PVMCPU pVCpu,  uint8_t cbInstr, RTGCPTR GCPtrPage);
-VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr);
@@ -319,4 +318,10 @@
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
 #endif
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrVmxon, uint32_t uExitInstrInfo,
+                                                RTGCPTR GCPtrDisp);
+#endif
 /** @}  */
 
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 73605)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 73606)
@@ -175,4 +175,5 @@
 	VMMAll/HMAll.cpp \
 	VMMAll/HMSVMAll.cpp \
+	VMMAll/HMVMXAll.cpp \
 	VMMAll/IEMAll.cpp \
 	VMMAll/IEMAllAImpl.asm \
@@ -508,5 +509,5 @@
  VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \
  	$(VMM_COMMON_DEFS)
- VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM,$(VMMRC_DEFS))
+ VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM VBOX_WITH_NESTED_HWVIRT_VMX,$(VMMRC_DEFS))
  ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
   VMMRC_DEFS    += VMM_R0_SWITCH_STACK
@@ -570,6 +571,7 @@
  	VMMAll/GIMAllHv.cpp \
  	VMMAll/GIMAllKvm.cpp \
-	VMMAll/HMAll.cpp \
-	VMMAll/HMSVMAll.cpp \
+ 	VMMAll/HMAll.cpp \
+ 	VMMAll/HMSVMAll.cpp \
+ 	VMMAll/HMVMXAll.cpp \
  	VMMAll/MMAll.cpp \
  	VMMAll/MMAllHyper.cpp \
@@ -718,4 +720,5 @@
  	VMMAll/HMAll.cpp \
  	VMMAll/HMSVMAll.cpp \
+ 	VMMAll/HMVMXAll.cpp \
  	VMMAll/IEMAll.cpp \
  	VMMAll/IEMAllAImpl.asm \
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 73606)
@@ -300,10 +300,24 @@
 
 
+/**
+ * Get MSR_IA32_SMM_MONITOR_CTL value for IEM and cpumMsrRd_Ia32SmmMonitorCtl.
+ *
+ * @returns The MSR_IA32_SMM_MONITOR_CTL value.
+ * @param   pVCpu           The cross context per CPU structure.
+ */
+VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu)
+{
+    /* We do not support dual-monitor treatment for SMI and SMM. */
+    /** @todo SMM. */
+    RT_NOREF(pVCpu);
+    return 0;
+}
+
+
 /** @callback_method_impl{FNCPUMRDMSR} */
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    /** @todo SMM. */
-    *puValue = 0;
+    *puValue = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
     return VINF_SUCCESS;
 }
@@ -1288,21 +1302,35 @@
 
 
-/** @callback_method_impl{FNCPUMRDMSR} */
-static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
-{
-    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
+/**
+ * Gets IA32_VMX_BASIC for IEM and cpumMsrRd_Ia32VmxBasic.
+ *
+ * @returns IA32_VMX_BASIC value.
+ * @param   pVCpu           The cross context per CPU structure.
+ */
+VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu)
+{
     PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
+    uint64_t uVmxMsr;
     if (pGuestFeatures->fVmx)
     {
-        *puValue = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
-                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
-                 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  VMX_V_VMCS_PHYSADDR_4G_LIMIT  )
-                 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
-                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
-                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
-                 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
+        uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
+                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
+                | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  VMX_V_VMCS_PHYSADDR_4G_LIMIT  )
+                | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
+                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
+                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
+                | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
     }
     else
-        *puValue = 0;
+        uVmxMsr = 0;
+    return uVmxMsr;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
+    *puValue = CPUMGetGuestIa32VmxBasic(pVCpu);
     return VINF_SUCCESS;
 }
@@ -5101,8 +5129,8 @@
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
-#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
     /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */
-    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
-    if (CPUMIsGuestInNestedHwVirtMode(pCtx))
+    if (   CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest)
+        || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
         return VERR_CPUM_RAISE_GP_0;
 #endif
@@ -5114,8 +5142,8 @@
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
 {
-#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
     /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */
-    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
-    if (CPUMIsGuestInNestedHwVirtMode(pCtx))
+    if (   CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest)
+        || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
         return VERR_CPUM_RAISE_GP_0;
 #endif
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 73606)
@@ -439,58 +439,4 @@
 
 /**
- * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
- * incorrect code bytes may be fetched after a world-switch".
- *
- * @param   pu32Family      Where to store the CPU family (can be NULL).
- * @param   pu32Model       Where to store the CPU model (can be NULL).
- * @param   pu32Stepping    Where to store the CPU stepping (can be NULL).
- * @returns true if the erratum applies, false otherwise.
- */
-VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
-{
-    /*
-     * Erratum 170 which requires a forced TLB flush for each world switch:
-     * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
-     *
-     * All BH-G1/2 and DH-G1/2 models include a fix:
-     * Athlon X2:   0x6b 1/2
-     *              0x68 1/2
-     * Athlon 64:   0x7f 1
-     *              0x6f 2
-     * Sempron:     0x7f 1/2
-     *              0x6f 2
-     *              0x6c 2
-     *              0x7c 2
-     * Turion 64:   0x68 2
-     */
-    uint32_t u32Dummy;
-    uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
-    ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
-    u32BaseFamily = (u32Version >> 8) & 0xf;
-    u32Family     = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
-    u32Model      = ((u32Version >> 4) & 0xf);
-    u32Model      = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
-    u32Stepping   = u32Version & 0xf;
-
-    bool fErratumApplies = false;
-    if (   u32Family == 0xf
-        && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
-        && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
-    {
-        fErratumApplies = true;
-    }
-
-    if (pu32Family)
-        *pu32Family   = u32Family;
-    if (pu32Model)
-        *pu32Model    = u32Model;
-    if (pu32Stepping)
-        *pu32Stepping = u32Stepping;
-
-    return fErratumApplies;
-}
-
-
-/**
  * Sets or clears the single instruction flag.
  *
@@ -541,94 +487,4 @@
     else
         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
-}
-
-
-/**
- * VMX nested-guest VM-exit handler.
- *
- * @param   pVCpu              The cross context virtual CPU structure.
- * @param   uBasicExitReason   The basic exit reason.
- */
-VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
-{
-    RT_NOREF2(pVCpu, uBasicExitReason);
-}
-
-
-/**
- * Gets a copy of the VMX host MSRs that were read by HM during ring-0
- * initialization.
- *
- * @return VBox status code.
- * @param   pVM        The cross context VM structure.
- * @param   pVmxMsrs   Where to store the VMXMSRS struct (only valid when
- *                     VINF_SUCCESS is returned).
- *
- * @remarks Caller needs to take care not to call this function too early. Call
- *          after HM initialization is fully complete.
- */
-VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
-{
-    AssertPtrReturn(pVM,      VERR_INVALID_PARAMETER);
-    AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
-    if (pVM->hm.s.vmx.fSupported)
-    {
-        *pVmxMsrs = pVM->hm.s.vmx.Msrs;
-        return VINF_SUCCESS;
-    }
-    return VERR_VMX_NOT_SUPPORTED;
-}
-
-
-/**
- * Gets the specified VMX host MSR that was read by HM during ring-0
- * initialization.
- *
- * @return VBox status code.
- * @param   pVM        The cross context VM structure.
- * @param   idMsr      The MSR.
- * @param   puValue    Where to store the MSR value (only updated when VINF_SUCCESS
- *                     is returned).
- *
- * @remarks Caller needs to take care not to call this function too early. Call
- *          after HM initialization is fully complete.
- */
-VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
-{
-    AssertPtrReturn(pVM,     VERR_INVALID_PARAMETER);
-    AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
-
-    if (!pVM->hm.s.vmx.fSupported)
-        return VERR_VMX_NOT_SUPPORTED;
-
-    PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
-    switch (idMsr)
-    {
-        case MSR_IA32_FEATURE_CONTROL:         *puValue =  pVmxMsrs->u64FeatCtrl;      break;
-        case MSR_IA32_VMX_BASIC:               *puValue =  pVmxMsrs->u64Basic;         break;
-        case MSR_IA32_VMX_PINBASED_CTLS:       *puValue =  pVmxMsrs->PinCtls.u;        break;
-        case MSR_IA32_VMX_PROCBASED_CTLS:      *puValue =  pVmxMsrs->ProcCtls.u;       break;
-        case MSR_IA32_VMX_PROCBASED_CTLS2:     *puValue =  pVmxMsrs->ProcCtls2.u;      break;
-        case MSR_IA32_VMX_EXIT_CTLS:           *puValue =  pVmxMsrs->ExitCtls.u;       break;
-        case MSR_IA32_VMX_ENTRY_CTLS:          *puValue =  pVmxMsrs->EntryCtls.u;      break;
-        case MSR_IA32_VMX_TRUE_PINBASED_CTLS:  *puValue =  pVmxMsrs->TruePinCtls.u;    break;
-        case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue =  pVmxMsrs->TrueProcCtls.u;   break;
-        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:     *puValue =  pVmxMsrs->TrueEntryCtls.u;  break;
-        case MSR_IA32_VMX_TRUE_EXIT_CTLS:      *puValue =  pVmxMsrs->TrueExitCtls.u;   break;
-        case MSR_IA32_VMX_MISC:                *puValue =  pVmxMsrs->u64Misc;          break;
-        case MSR_IA32_VMX_CR0_FIXED0:          *puValue =  pVmxMsrs->u64Cr0Fixed0;     break;
-        case MSR_IA32_VMX_CR0_FIXED1:          *puValue =  pVmxMsrs->u64Cr0Fixed1;     break;
-        case MSR_IA32_VMX_CR4_FIXED0:          *puValue =  pVmxMsrs->u64Cr4Fixed0;     break;
-        case MSR_IA32_VMX_CR4_FIXED1:          *puValue =  pVmxMsrs->u64Cr4Fixed1;     break;
-        case MSR_IA32_VMX_VMCS_ENUM:           *puValue =  pVmxMsrs->u64VmcsEnum;      break;
-        case MSR_IA32_VMX_VMFUNC:              *puValue =  pVmxMsrs->u64VmFunc;        break;
-        case MSR_IA32_VMX_EPT_VPID_CAP:        *puValue =  pVmxMsrs->u64EptVpidCaps;   break;
-        default:
-        {
-            AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
-            return VERR_NOT_FOUND;
-        }
-    }
-    return VINF_SUCCESS;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 73606)
@@ -25,8 +25,6 @@
 #include <VBox/vmm/apic.h>
 #include <VBox/vmm/gim.h>
-#include <VBox/vmm/hm.h>
 #include <VBox/vmm/iem.h>
 #include <VBox/vmm/vm.h>
-#include <VBox/vmm/hm_svm.h>
 
 
@@ -243,5 +241,60 @@
 }
 
+
+/**
+ * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
+ * incorrect code bytes may be fetched after a world-switch".
+ *
+ * @param   pu32Family      Where to store the CPU family (can be NULL).
+ * @param   pu32Model       Where to store the CPU model (can be NULL).
+ * @param   pu32Stepping    Where to store the CPU stepping (can be NULL).
+ * @returns true if the erratum applies, false otherwise.
+ */
+VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
+{
+    /*
+     * Erratum 170 which requires a forced TLB flush for each world switch:
+     * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
+     *
+     * All BH-G1/2 and DH-G1/2 models include a fix:
+     * Athlon X2:   0x6b 1/2
+     *              0x68 1/2
+     * Athlon 64:   0x7f 1
+     *              0x6f 2
+     * Sempron:     0x7f 1/2
+     *              0x6f 2
+     *              0x6c 2
+     *              0x7c 2
+     * Turion 64:   0x68 2
+     */
+    uint32_t u32Dummy;
+    uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
+    ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
+    u32BaseFamily = (u32Version >> 8) & 0xf;
+    u32Family     = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
+    u32Model      = ((u32Version >> 4) & 0xf);
+    u32Model      = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
+    u32Stepping   = u32Version & 0xf;
+
+    bool fErratumApplies = false;
+    if (   u32Family == 0xf
+        && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
+        && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
+    {
+        fErratumApplies = true;
+    }
+
+    if (pu32Family)
+        *pu32Family   = u32Family;
+    if (pu32Model)
+        *pu32Model    = u32Model;
+    if (pu32Stepping)
+        *pu32Stepping = u32Stepping;
+
+    return fErratumApplies;
+}
+
 #endif /* !IN_RC */
+
 
 /**
Index: /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 73606)
+++ /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 73606)
@@ -0,0 +1,167 @@
+/* $Id$ */
+/** @file
+ * HM VMX (VT-x) - All contexts.
+ */
+
+/*
+ * Copyright (C) 2018 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+*   Header Files                                                                                                                 *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_HM
+#define VMCPU_INCL_CPUM_GST_CTX
+#include "HMInternal.h"
+#include <VBox/vmm/vm.h>
+
+
+/*********************************************************************************************************************************
+*   Global Variables                                                                                                             *
+*********************************************************************************************************************************/
+#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc)      #a_Def " - " #a_Desc
+static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
+{
+    /* Internal processing errors. */
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1               , "Ipe_1"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2               , "Ipe_2"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3               , "Ipe_3"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4               , "Ipe_4"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5               , "Ipe_5"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6               , "Ipe_6"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7               , "Ipe_7"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8               , "Ipe_8"        ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9               , "Ipe_9"        ),
+    /* VMXON. */
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M          , "A20M"         ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl           , "Cpl"          ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0     , "Cr0Fixed0"    ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0     , "Cr4Fixed0"    ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept     , "Intercept"    ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS    , "LongModeCS"   ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl    , "MsrFeatCtl"   ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign      , "PtrAlign"     ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal   , "PtrAbnormal"  ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap        , "PtrMap"       ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead   , "PtrPhysRead"  ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth      , "PtrWidth"     ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode"),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success       , "Success"      ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs    , "ShadowVmcs"   ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe          , "Vmxe"         ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId     , "VmcsRevId"    ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot       , "VmxRoot"      ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl    , "VmxRootCpl"   ),
+    /* VMXOFF. */
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl          , "Cpl"          ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept    , "Intercept"    ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS   , "LongModeCS"   ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode, "RealOrV86Mode"),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success      , "Success"      ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe         , "Vmxe"         ),
+    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot      , "VmxRoot"      )
+    /* kVmxVInstrDiag_Last */
+};
+#undef VMX_INSTR_DIAG_DESC
+
+
+/**
+ * Gets a copy of the VMX host MSRs that were read by HM during ring-0
+ * initialization.
+ *
+ * @return VBox status code.
+ * @param   pVM        The cross context VM structure.
+ * @param   pVmxMsrs   Where to store the VMXMSRS struct (only valid when
+ *                     VINF_SUCCESS is returned).
+ *
+ * @remarks Caller needs to take care not to call this function too early. Call
+ *          after HM initialization is fully complete.
+ */
+VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
+{
+    AssertPtrReturn(pVM,      VERR_INVALID_PARAMETER);
+    AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
+    if (pVM->hm.s.vmx.fSupported)
+    {
+        *pVmxMsrs = pVM->hm.s.vmx.Msrs;
+        return VINF_SUCCESS;
+    }
+    return VERR_VMX_NOT_SUPPORTED;
+}
+
+
+/**
+ * Gets the specified VMX host MSR that was read by HM during ring-0
+ * initialization.
+ *
+ * @return VBox status code.
+ * @param   pVM        The cross context VM structure.
+ * @param   idMsr      The MSR.
+ * @param   puValue    Where to store the MSR value (only updated when VINF_SUCCESS
+ *                     is returned).
+ *
+ * @remarks Caller needs to take care not to call this function too early. Call
+ *          after HM initialization is fully complete.
+ */
+VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
+{
+    AssertPtrReturn(pVM,     VERR_INVALID_PARAMETER);
+    AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
+
+    if (!pVM->hm.s.vmx.fSupported)
+        return VERR_VMX_NOT_SUPPORTED;
+
+    PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
+    switch (idMsr)
+    {
+        case MSR_IA32_FEATURE_CONTROL:         *puValue =  pVmxMsrs->u64FeatCtrl;      break;
+        case MSR_IA32_VMX_BASIC:               *puValue =  pVmxMsrs->u64Basic;         break;
+        case MSR_IA32_VMX_PINBASED_CTLS:       *puValue =  pVmxMsrs->PinCtls.u;        break;
+        case MSR_IA32_VMX_PROCBASED_CTLS:      *puValue =  pVmxMsrs->ProcCtls.u;       break;
+        case MSR_IA32_VMX_PROCBASED_CTLS2:     *puValue =  pVmxMsrs->ProcCtls2.u;      break;
+        case MSR_IA32_VMX_EXIT_CTLS:           *puValue =  pVmxMsrs->ExitCtls.u;       break;
+        case MSR_IA32_VMX_ENTRY_CTLS:          *puValue =  pVmxMsrs->EntryCtls.u;      break;
+        case MSR_IA32_VMX_TRUE_PINBASED_CTLS:  *puValue =  pVmxMsrs->TruePinCtls.u;    break;
+        case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue =  pVmxMsrs->TrueProcCtls.u;   break;
+        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:     *puValue =  pVmxMsrs->TrueEntryCtls.u;  break;
+        case MSR_IA32_VMX_TRUE_EXIT_CTLS:      *puValue =  pVmxMsrs->TrueExitCtls.u;   break;
+        case MSR_IA32_VMX_MISC:                *puValue =  pVmxMsrs->u64Misc;          break;
+        case MSR_IA32_VMX_CR0_FIXED0:          *puValue =  pVmxMsrs->u64Cr0Fixed0;     break;
+        case MSR_IA32_VMX_CR0_FIXED1:          *puValue =  pVmxMsrs->u64Cr0Fixed1;     break;
+        case MSR_IA32_VMX_CR4_FIXED0:          *puValue =  pVmxMsrs->u64Cr4Fixed0;     break;
+        case MSR_IA32_VMX_CR4_FIXED1:          *puValue =  pVmxMsrs->u64Cr4Fixed1;     break;
+        case MSR_IA32_VMX_VMCS_ENUM:           *puValue =  pVmxMsrs->u64VmcsEnum;      break;
+        case MSR_IA32_VMX_VMFUNC:              *puValue =  pVmxMsrs->u64VmFunc;        break;
+        case MSR_IA32_VMX_EPT_VPID_CAP:        *puValue =  pVmxMsrs->u64EptVpidCaps;   break;
+        default:
+        {
+            AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
+            return VERR_NOT_FOUND;
+        }
+    }
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Gets the description of a VMX instruction diagnostic enum member.
+ *
+ * @returns The descriptive string.
+ * @param   enmInstrDiag    The VMX instruction diagnostic.
+ */
+VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
+{
+    if (RT_LIKELY(enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
+        return g_apszVmxInstrDiagDesc[enmInstrDiag];
+    return "Unknown/invalid";
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 73606)
@@ -388,23 +388,25 @@
  * Check the common VMX instruction preconditions.
  */
-#define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
+#define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
     do { \
-    { \
         if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \
         { \
-            Log((RT_STR(a_Instr) ": CR4.VMXE not enabled -> #UD\n")); \
+            Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \
+            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \
             return iemRaiseUndefinedOpcode(a_pVCpu); \
         } \
         if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
         { \
-            Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
+            Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
+            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \
             return iemRaiseUndefinedOpcode(a_pVCpu); \
         } \
         if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
         { \
-            Log((RT_STR(a_Instr) ": Long mode without 64-bit code segment -> #UD\n")); \
+            Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
+            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \
             return iemRaiseUndefinedOpcode(a_pVCpu); \
         } \
-} while (0)
+    } while (0)
 
 /**
@@ -413,7 +415,19 @@
 # define IEM_IS_VMX_ENABLED(a_pVCpu)                         (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
 
+/**
+ * Check if the guest has entered VMX root operation.
+ */
+#define IEM_IS_VMX_ROOT_MODE(a_pVCpu)                        (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu)))
+
+/**
+ * Check if the guest has entered VMX non-root operation.
+ */
+#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu)                    (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
+
 #else
-# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)       do { } while (0)
+# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix)  do { } while (0)
 # define IEM_IS_VMX_ENABLED(a_pVCpu)                         (false)
+# define IEM_IS_VMX_ROOT_MODE(a_pVCpu)                       (false)
+# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu)                   (false)
 
 #endif
@@ -938,4 +952,9 @@
 #endif
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+IEM_STATIC VBOXSTRICTRC     iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
+                                        RTGCPTR GCPtrDisp);
+#endif
+
 /**
  * Sets the pass up status.
@@ -1037,4 +1056,5 @@
     pVCpu->iem.s.uRexReg            = 127;
     pVCpu->iem.s.uRexB              = 127;
+    pVCpu->iem.s.offModRm           = 127;
     pVCpu->iem.s.uRexIndex          = 127;
     pVCpu->iem.s.iEffSeg            = 127;
@@ -1196,4 +1216,5 @@
     pVCpu->iem.s.cbOpcode           = 0;
 #endif
+    pVCpu->iem.s.offModRm           = 0;
     pVCpu->iem.s.cActiveMappings    = 0;
     pVCpu->iem.s.iNextMapping       = 0;
@@ -1306,4 +1327,5 @@
     pVCpu->iem.s.offOpcode          = 0;
 #endif
+    pVCpu->iem.s.offModRm           = 0;
     Assert(pVCpu->iem.s.cActiveMappings == 0);
     pVCpu->iem.s.iNextMapping       = 0;
@@ -2434,5 +2456,5 @@
 # ifdef IEM_WITH_CODE_TLB
     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
-    pVCpu->iem.s.offModRm = offOpcode;
+    pVCpu->iem.s.offModRm  = offBuf;
     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
     if (RT_LIKELY(   pbBuf != NULL
@@ -2443,5 +2465,5 @@
     }
 # else
-    uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
+    uintptr_t offOpcode   = pVCpu->iem.s.offOpcode;
     pVCpu->iem.s.offModRm = offOpcode;
     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
@@ -2468,5 +2490,5 @@
     do \
     { \
-        VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pu8)); \
+        VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
         if (rcStrict2 == VINF_SUCCESS) \
         { /* likely */ } \
@@ -5523,5 +5545,6 @@
             /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
             Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
-            if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
+            if (   !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
+                && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
                 return VERR_EM_GUEST_CPU_HANG;
         }
@@ -8083,4 +8106,6 @@
             if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
                 return VINF_SUCCESS;
+            /** @todo We should probably raise #SS(0) here if segment is SS; see AMD spec.
+             *        4.12.2 "Data Limit Checks in 64-bit Mode". */
             return iemRaiseGeneralProtectionFault0(pVCpu);
         }
@@ -12547,4 +12572,20 @@
     } while (0)
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+/** This instruction raises an \#UD in real and V8086 mode or when not using a
+ *  64-bit code segment when in long mode (applicable to all VMX instructions
+ *  except VMCALL). */
+# define IEMOP_HLP_VMX_INSTR() \
+    do \
+    { \
+        if (   !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
+            && (  !IEM_IS_LONG_MODE(pVCpu) \
+                || IEM_IS_64BIT_CODE(pVCpu))) \
+        { /* likely */ } \
+        else \
+            return IEMOP_RAISE_INVALID_OPCODE(); \
+    } while (0)
+#endif
+
 /** The instruction is not available in 64-bit mode, throw \#UD if we're in
  * 64-bit mode. */
@@ -15096,26 +15137,4 @@
 
 /**
- * Interface for HM and EM to emulate the INVPCID instruction.
- *
- * @param   pVCpu               The cross context virtual CPU structure.
- * @param   cbInstr             The instruction length in bytes.
- * @param   uType               The invalidation type.
- * @param   GCPtrInvpcidDesc    The effective address of the INVPCID descriptor.
- *
- * @remarks In ring-0 not all of the state needs to be synced in.
- */
-VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
-{
-    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
-
-    iemInitExec(pVCpu, false /*fBypassHandlers*/);
-    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
-    Assert(!pVCpu->iem.s.cActiveMappings);
-    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
-}
-
-
-
-/**
  * Interface for HM and EM to emulate the CPUID instruction.
  *
@@ -15498,4 +15517,53 @@
 
 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/**
+ * Interface for HM and EM to emulate the VMXOFF instruction.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr     The instruction length in bytes.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
+    Assert(!pVCpu->iem.s.cActiveMappings);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
+
+
+/**
+ * Interface for HM and EM to emulate the VMXON instruction.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr         The instruction length in bytes.
+ * @param   GCPtrVmxon      The linear address of the VMXON pointer.
+ * @param   uExitInstrInfo  The VM-exit instruction information field.
+ * @param   GCPtrDisp       The displacement field for @a GCPtrVmxon if any.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, uint32_t uExitInstrInfo,
+                                               RTGCPTR GCPtrDisp)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
+    VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
+    if (pVCpu->iem.s.cActiveMappings)
+        iemMemRollback(pVCpu);
+    return iemExecStatusCodeFiddling(pVCpu, rcStrict);
+}
+
+#endif
+
 #ifdef IN_RING3
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 73606)
@@ -5335,4 +5335,16 @@
             }
 
+            /* Check for bits that must remain set in VMX operation. */
+            if (IEM_IS_VMX_ROOT_MODE(pVCpu))
+            {
+                uint32_t const uCr0Fixed0 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest ?
+                                            VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
+                if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
+                {
+                    Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
+                    return iemRaiseGeneralProtectionFault0(pVCpu);
+                }
+            }
+
             /** @todo check reserved PDPTR bits as AMD states. */
 
@@ -5548,4 +5560,15 @@
                 IEM_SVM_UPDATE_NRIP(pVCpu);
                 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
+            }
+
+            /* Check for bits that must remain set in VMX operation. */
+            if (IEM_IS_VMX_ROOT_MODE(pVCpu))
+            {
+                uint32_t const uCr4Fixed0 = VMX_V_CR4_FIXED0;
+                if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
+                {
+                    Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
+                    return iemRaiseGeneralProtectionFault0(pVCpu);
+                }
             }
 
@@ -5935,9 +5958,10 @@
  * Implements INVPCID.
  *
+ * @param   iEffSeg              The segment of the invpcid descriptor.
+ * @param   GCPtrInvpcidDesc     The address of invpcid descriptor.
  * @param   uInvpcidType         The invalidation type.
- * @param   GCPtrInvpcidDesc     The effective address of invpcid descriptor.
  * @remarks Updates the RIP.
  */
-IEM_CIMPL_DEF_2(iemCImpl_invpcid, uint64_t, uInvpcidType, RTGCPTR, GCPtrInvpcidDesc)
+IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint8_t, uInvpcidType)
 {
     /*
@@ -5967,5 +5991,5 @@
      */
     RTUINT128U uDesc;
-    VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, pVCpu->iem.s.iEffSeg, GCPtrInvpcidDesc);
+    VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
     if (rcStrict == VINF_SUCCESS)
     {
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 73606)
@@ -22,5 +22,5 @@
 IEM_CIMPL_DEF_0(iemCImpl_vmcall)
 {
-    /** @todo intercept. */
+    /** @todo NSTVMX: intercept. */
 
     /* Join forces with vmmcall. */
@@ -28,2 +28,330 @@
 }
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/**
+ * Implements VMSucceed for VMX instruction success.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ */
+DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
+{
+    pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
+}
+
+
+/**
+ * Implements VMFailInvalid for VMX instruction failure.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ */
+DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
+{
+    pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
+    pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
+}
+
+
+/**
+ * Implements VMFailValid for VMX instruction failure.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   enmInsErr   The VM instruction error.
+ */
+DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
+{
+    if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs))
+    {
+        pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
+        pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
+        /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
+        RT_NOREF(enmInsErr);
+    }
+}
+
+
+/**
+ * Implements VMFail for VMX instruction failure.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   enmInsErr   The VM instruction error.
+ */
+DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
+{
+    if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs))
+    {
+        iemVmxVmFailValid(pVCpu, enmInsErr);
+        /** @todo Set VM-instruction error field in the current virtual-VMCS.  */
+    }
+    else
+        iemVmxVmFailInvalid(pVCpu);
+}
+
+
+/**
+ * VMXON instruction execution worker.
+ *
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   cbInstr         The instruction length.
+ * @param   GCPtrVmxon      The linear address of the VMXON pointer.
+ * @param   ExitInstrInfo   The VM-exit instruction information field.
+ * @param   GCPtrDisp       The displacement field for @a GCPtrVmxon if any.
+ *
+ * @remarks Common VMX instruction checks are already expected to by the caller,
+ *          i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
+                                    RTGCPTR GCPtrDisp)
+{
+#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF5(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
+    return VINF_EM_RAW_EMULATE_INSTR;
+#else
+    if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
+    {
+        /* CPL. */
+        if (pVCpu->iem.s.uCpl > 0)
+        {
+            Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
+            return iemRaiseGeneralProtectionFault0(pVCpu);
+        }
+
+        /* A20M (A20 Masked) mode. */
+        if (!PGMPhysIsA20Enabled(pVCpu))
+        {
+            Log(("vmxon: A20M mode -> #GP(0)\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
+            return iemRaiseGeneralProtectionFault0(pVCpu);
+        }
+
+        /* CR0 fixed bits. */
+        bool const     fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
+        uint64_t const uCr0Fixed0         = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
+        if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
+        {
+            Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
+            return iemRaiseGeneralProtectionFault0(pVCpu);
+        }
+
+        /* CR4 fixed bits. */
+        if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
+        {
+            Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
+            return iemRaiseGeneralProtectionFault0(pVCpu);
+        }
+
+        /* Feature control MSR's LOCK and VMXON bits. */
+        uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
+        if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
+        {
+            Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
+            return iemRaiseGeneralProtectionFault0(pVCpu);
+        }
+
+        /* Get the VMXON pointer from the location specified by the source memory operand. */
+        RTGCPHYS GCPhysVmxon;
+        VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->InvVmxXsaves.iSegReg, GCPtrVmxon);
+        if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
+        {
+            Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
+            return rcStrict;
+        }
+
+        /* VMXON region pointer alignment. */
+        if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
+        {
+            Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
+            iemVmxVmFailInvalid(pVCpu);
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+            return VINF_SUCCESS;
+        }
+
+        /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
+           restriction imposed by our implementation. */
+        if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
+        {
+            Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
+            iemVmxVmFailInvalid(pVCpu);
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+            return VINF_SUCCESS;
+        }
+
+        /* Read the VMCS revision ID from the VMXON region. */
+        VMXVMCSREVID VmcsRevId;
+        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
+        if (RT_FAILURE(rc))
+        {
+            Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
+            return rc;
+        }
+
+        /* Physical-address width. */
+        uint64_t const uMsrBasic = CPUMGetGuestIa32VmxBasic(pVCpu);
+        if (   RT_BF_GET(uMsrBasic, VMX_BF_BASIC_PHYSADDR_WIDTH)
+            && RT_HI_U32(GCPhysVmxon))
+        {
+            Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
+            iemVmxVmFailInvalid(pVCpu);
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+            return VINF_SUCCESS;
+        }
+
+        /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
+        if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
+        {
+            /* Revision ID mismatch. */
+            if (!VmcsRevId.n.fIsShadowVmcs)
+            {
+                Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
+                     VmcsRevId.n.u31RevisionId));
+                pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
+                iemVmxVmFailInvalid(pVCpu);
+                iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+                return VINF_SUCCESS;
+            }
+
+            /* Shadow VMCS disallowed. */
+            Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
+            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
+            iemVmxVmFailInvalid(pVCpu);
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+            return VINF_SUCCESS;
+        }
+
+        /*
+         * Record that we're in VMX operation, block INIT, block and disable A20M.
+         */
+        pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon    = GCPhysVmxon;
+        pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
+        /** @todo NSTVMX: init. current VMCS pointer with ~0. */
+        /** @todo NSTVMX: clear address-range monitoring. */
+        /** @todo NSTVMX: Intel PT. */
+        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
+        iemVmxVmSucceed(pVCpu);
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
+        return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
+# else
+        return VINF_SUCCESS;
+# endif
+    }
+    else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
+    {
+        RT_NOREF(GCPtrDisp);
+        /** @todo NSTVMX: intercept. */
+    }
+
+    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
+
+    /* CPL. */
+    if (pVCpu->iem.s.uCpl > 0)
+    {
+        Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
+        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    /* VMXON when already in VMX root mode. */
+    iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
+    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
+#endif
+}
+
+
+/**
+ * Implements 'VMXON'.
+ */
+IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon)
+{
+    /** @todo NSTVMX: Parse ModR/M, SIB, disp.  */
+    RTGCPTR GCPtrDisp = 0;
+    VMXEXITINSTRINFO ExitInstrInfo;
+    ExitInstrInfo.u = 0;
+    ExitInstrInfo.InvVmxXsaves.u2Scaling       = 0;
+    ExitInstrInfo.InvVmxXsaves.u3AddrSize      = pVCpu->iem.s.enmEffAddrMode;
+    ExitInstrInfo.InvVmxXsaves.fIsRegOperand   = 0;
+    ExitInstrInfo.InvVmxXsaves.iSegReg         = pVCpu->iem.s.iEffSeg;
+    ExitInstrInfo.InvVmxXsaves.iIdxReg         = 0;
+    ExitInstrInfo.InvVmxXsaves.fIdxRegInvalid  = 0;
+    ExitInstrInfo.InvVmxXsaves.iBaseReg        = 0;
+    ExitInstrInfo.InvVmxXsaves.fBaseRegInvalid = 0;
+    ExitInstrInfo.InvVmxXsaves.iReg2           = 0;
+    return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp);
+}
+
+
+/**
+ * Implements 'VMXOFF'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
+{
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF2(pVCpu, cbInstr);
+    return VINF_EM_RAW_EMULATE_INSTR;
+# else
+    IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
+    if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
+    {
+        Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
+        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+
+    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
+    {
+        /** @todo NSTVMX: intercept. */
+    }
+
+    /* CPL. */
+    if (pVCpu->iem.s.uCpl > 0)
+    {
+        Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
+        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    /* Dual monitor treatment of SMIs and SMM. */
+    uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
+    if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
+    {
+        iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+        return VINF_SUCCESS;
+    }
+
+    /*
+     * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
+     */
+    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
+    Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
+
+    /** @todo NSTVMX: Unblock INIT. */
+    if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
+    { /** @todo NSTVMX: Unblock SMI. */ }
+    /** @todo NSTVMX: Unblock and enable A20M. */
+    /** @todo NSTVMX: Clear address-range monitoring. */
+
+    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
+    iemVmxVmSucceed(pVCpu);
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
+    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
+#  else
+    return VINF_SUCCESS;
+#  endif
+# endif
+}
+
+#endif
+
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h	(revision 73606)
@@ -317,20 +317,24 @@
         if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
         {
-            IEM_MC_BEGIN(2, 0);
-            IEM_MC_ARG(uint64_t, uInvpcidType,     0);
+            IEM_MC_BEGIN(3, 0);
+            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
             IEM_MC_ARG(RTGCPTR,  GCPtrInvpcidDesc, 1);
+            IEM_MC_ARG(uint64_t, uInvpcidType,     2);
             IEM_MC_FETCH_GREG_U64(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
             IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0);
-            IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc);
+            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
+            IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType);
             IEM_MC_END();
         }
         else
         {
-            IEM_MC_BEGIN(2, 0);
-            IEM_MC_ARG(uint32_t, uInvpcidType,     0);
+            IEM_MC_BEGIN(3, 0);
+            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
             IEM_MC_ARG(RTGCPTR,  GCPtrInvpcidDesc, 1);
+            IEM_MC_ARG(uint32_t, uInvpcidType,     2);
             IEM_MC_FETCH_GREG_U32(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
             IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0);
-            IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc);
+            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
+            IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType);
             IEM_MC_END();
         }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 73606)
@@ -264,4 +264,12 @@
 
 /** Opcode 0x0f 0x01 /0. */
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+FNIEMOP_DEF(iemOp_Grp7_vmxoff)
+{
+    IEMOP_MNEMONIC(vmxoff, "vmxoff");
+    IEMOP_HLP_DONE_DECODING();
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmxoff);
+}
+#else
 FNIEMOP_DEF(iemOp_Grp7_vmxoff)
 {
@@ -269,4 +277,5 @@
     return IEMOP_RAISE_INVALID_OPCODE();
 }
+#endif
 
 
@@ -8418,5 +8427,20 @@
 
 /** Opcode 0xf3 0x0f 0xc7 !11/6. */
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+FNIEMOP_DEF_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm)
+{
+    IEMOP_MNEMONIC(vmxon, "vmxon");
+    IEMOP_HLP_VMX_INSTR();
+    IEM_MC_BEGIN(1, 0);
+    IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 0);
+    IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
+    IEMOP_HLP_DONE_DECODING();
+    IEM_MC_CALL_CIMPL_1(iemCImpl_vmxon, GCPtrEffSrc);
+    IEM_MC_END();
+    return VINF_SUCCESS;
+}
+#else
 FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
+#endif
 
 /** Opcode [0xf3] 0x0f 0xc7 !11/7. */
@@ -8464,5 +8488,5 @@
 FNIEMOP_DEF(iemOp_Grp9)
 {
-    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
+    uint8_t bRm; IEM_OPCODE_GET_NEXT_RM(&bRm);
     if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
         /* register, register */
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 73606)
@@ -30,4 +30,5 @@
 #include <VBox/vmm/iom.h>
 #include <VBox/vmm/tm.h>
+#include <VBox/vmm/em.h>
 #include <VBox/vmm/gim.h>
 #include <VBox/vmm/apic.h>
@@ -711,5 +712,5 @@
     uint32_t u32Model;
     uint32_t u32Stepping;
-    if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
+    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
     {
         Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 73606)
@@ -21,9 +21,5 @@
 #include <VBox/cdefs.h>
 #include <VBox/types.h>
-#include <VBox/vmm/em.h>
-#include <VBox/vmm/stam.h>
-#include <VBox/dis.h>
 #include <VBox/vmm/hm.h>
-#include <VBox/vmm/pgm.h>
 #include <VBox/vmm/hm_svm.h>
 
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 73606)
@@ -32,4 +32,5 @@
 #include <VBox/vmm/selm.h>
 #include <VBox/vmm/tm.h>
+#include <VBox/vmm/em.h>
 #include <VBox/vmm/gim.h>
 #include <VBox/vmm/apic.h>
@@ -196,98 +197,60 @@
 {
     /** The host's rflags/eflags. */
-    RTCCUINTREG     fEFlags;
+    RTCCUINTREG         fEFlags;
 #if HC_ARCH_BITS == 32
-    uint32_t        u32Alignment0;
+    uint32_t            u32Alignment0;
 #endif
     /** The guest's TPR value used for TPR shadowing. */
-    uint8_t         u8GuestTpr;
+    uint8_t             u8GuestTpr;
     /** Alignment. */
-    uint8_t         abAlignment0[7];
+    uint8_t             abAlignment0[7];
 
     /** The basic VM-exit reason. */
-    uint16_t        uExitReason;
+    uint16_t            uExitReason;
     /** Alignment. */
-    uint16_t        u16Alignment0;
+    uint16_t            u16Alignment0;
     /** The VM-exit interruption error code. */
-    uint32_t        uExitIntErrorCode;
+    uint32_t            uExitIntErrorCode;
     /** The VM-exit exit code qualification. */
-    uint64_t        uExitQualification;
+    uint64_t            uExitQual;
 
     /** The VM-exit interruption-information field. */
-    uint32_t        uExitIntInfo;
+    uint32_t            uExitIntInfo;
     /** The VM-exit instruction-length field. */
-    uint32_t        cbInstr;
+    uint32_t            cbInstr;
     /** The VM-exit instruction-information field. */
-    union
-    {
-        /** Plain unsigned int representation. */
-        uint32_t    u;
-        /** INS and OUTS information. */
-        struct
-        {
-            uint32_t    u7Reserved0 : 7;
-            /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
-            uint32_t    u3AddrSize  : 3;
-            uint32_t    u5Reserved1 : 5;
-            /** The segment register (X86_SREG_XXX). */
-            uint32_t    iSegReg     : 3;
-            uint32_t    uReserved2  : 14;
-        } StrIo;
-        /** INVEPT, INVVPID, INVPCID information.  */
-        struct
-        {
-            /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
-            uint32_t    u2Scaling     : 2;
-            uint32_t    u5Reserved0   : 5;
-            /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
-            uint32_t    u3AddrSize    : 3;
-            uint32_t    u1Reserved0   : 1;
-            uint32_t    u4Reserved0   : 4;
-            /** The segment register (X86_SREG_XXX). */
-            uint32_t    iSegReg       : 3;
-            /** The index register (X86_GREG_XXX). */
-            uint32_t    iIdxReg       : 4;
-            /** Set if index register is invalid. */
-            uint32_t    fIdxRegValid  : 1;
-            /** The base register (X86_GREG_XXX). */
-            uint32_t    iBaseReg      : 4;
-            /** Set if base register is invalid. */
-            uint32_t    fBaseRegValid : 1;
-            /** Register 2 (X86_GREG_XXX). */
-            uint32_t    iReg2         : 4;
-        } Inv;
-    }               ExitInstrInfo;
+    VMXEXITINSTRINFO    ExitInstrInfo;
     /** Whether the VM-entry failed or not. */
-    bool            fVMEntryFailed;
+    bool                fVMEntryFailed;
     /** Alignment. */
-    uint8_t         abAlignment1[3];
+    uint8_t             abAlignment1[3];
 
     /** The VM-entry interruption-information field. */
-    uint32_t        uEntryIntInfo;
+    uint32_t            uEntryIntInfo;
     /** The VM-entry exception error code field. */
-    uint32_t        uEntryXcptErrorCode;
+    uint32_t            uEntryXcptErrorCode;
     /** The VM-entry instruction length field. */
-    uint32_t        cbEntryInstr;
+    uint32_t            cbEntryInstr;
 
     /** IDT-vectoring information field. */
-    uint32_t        uIdtVectoringInfo;
+    uint32_t            uIdtVectoringInfo;
     /** IDT-vectoring error code. */
-    uint32_t        uIdtVectoringErrorCode;
+    uint32_t            uIdtVectoringErrorCode;
 
     /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
-    uint32_t        fVmcsFieldsRead;
+    uint32_t            fVmcsFieldsRead;
 
     /** Whether the guest debug state was active at the time of VM-exit. */
-    bool            fWasGuestDebugStateActive;
+    bool                fWasGuestDebugStateActive;
     /** Whether the hyper debug state was active at the time of VM-exit. */
-    bool            fWasHyperDebugStateActive;
+    bool                fWasHyperDebugStateActive;
     /** Whether TSC-offsetting should be setup before VM-entry. */
-    bool            fUpdateTscOffsettingAndPreemptTimer;
+    bool                fUpdateTscOffsettingAndPreemptTimer;
     /** Whether the VM-exit was caused by a page-fault during delivery of a
      *  contributory exception or a page-fault. */
-    bool            fVectoringDoublePF;
+    bool                fVectoringDoublePF;
     /** Whether the VM-exit was caused by a page-fault during delivery of an
      *  external interrupt or NMI. */
-    bool            fVectoringPF;
+    bool                fVectoringPF;
 } VMXTRANSIENT;
 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,               sizeof(uint64_t));
@@ -404,4 +367,15 @@
 static FNVMXEXITHANDLER     hmR0VmxExitRdpmc;
 static FNVMXEXITHANDLER     hmR0VmxExitVmcall;
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+static FNVMXEXITHANDLER     hmR0VmxExitVmclear;
+static FNVMXEXITHANDLER     hmR0VmxExitVmlaunch;
+static FNVMXEXITHANDLER     hmR0VmxExitVmptrld;
+static FNVMXEXITHANDLER     hmR0VmxExitVmptrst;
+static FNVMXEXITHANDLER     hmR0VmxExitVmread;
+static FNVMXEXITHANDLER     hmR0VmxExitVmresume;
+static FNVMXEXITHANDLER     hmR0VmxExitVmwrite;
+static FNVMXEXITHANDLER     hmR0VmxExitVmxoff;
+static FNVMXEXITHANDLER     hmR0VmxExitVmxon;
+#endif
 static FNVMXEXITHANDLER     hmR0VmxExitRdtsc;
 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
@@ -473,4 +447,15 @@
  /* 17  VMX_EXIT_RSM                     */  hmR0VmxExitRsm,
  /* 18  VMX_EXIT_VMCALL                  */  hmR0VmxExitVmcall,
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+ /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitVmclear,
+ /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitVmlaunch,
+ /* 21  VMX_EXIT_VMPTRLD                 */  hmR0VmxExitVmptrld,
+ /* 22  VMX_EXIT_VMPTRST                 */  hmR0VmxExitVmptrst,
+ /* 23  VMX_EXIT_VMREAD                  */  hmR0VmxExitVmread,
+ /* 24  VMX_EXIT_VMRESUME                */  hmR0VmxExitVmresume,
+ /* 25  VMX_EXIT_VMWRITE                 */  hmR0VmxExitVmwrite,
+ /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitVmxoff,
+ /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitVmxon,
+#else
  /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitSetPendingXcptUD,
  /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitSetPendingXcptUD,
@@ -482,4 +467,5 @@
  /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitSetPendingXcptUD,
  /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitSetPendingXcptUD,
+#endif
  /* 28  VMX_EXIT_MOV_CRX                 */  hmR0VmxExitMovCRx,
  /* 29  VMX_EXIT_MOV_DRX                 */  hmR0VmxExitMovDRx,
@@ -719,9 +705,9 @@
  * @param   pVmxTransient   Pointer to the VMX transient structure.
  */
-DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
 {
     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
     {
-        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
+        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
         AssertRCReturn(rc, rc);
         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
@@ -4999,5 +4985,5 @@
             int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
             rc    |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
-            rc    |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+            rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
             AssertRC(rc);
 
@@ -5009,5 +4995,5 @@
                 Log4(("uExitReason        %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
                      pVmxTransient->uExitReason));
-                Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
+                Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
                 Log4(("InstrError         %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
                 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
@@ -5788,7 +5774,8 @@
 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
 {
-    uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID
-                              | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
-                              | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo,  0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
 }
@@ -5802,6 +5789,8 @@
 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
 {
-    uint32_t const u32IntInfo  = X86_XCPT_UD | VMX_EXIT_INT_INFO_VALID
-                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_UD)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
 }
@@ -5815,6 +5804,8 @@
 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
 {
-    uint32_t const u32IntInfo = X86_XCPT_DB | VMX_EXIT_INT_INFO_VALID
-                              | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DB)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
 }
@@ -5830,8 +5821,308 @@
 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr)
 {
-    uint32_t const u32IntInfo  = X86_XCPT_OF | VMX_EXIT_INT_INFO_VALID
-                               | (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_OF)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_SW_INT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
 }
+
+
+/**
+ * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
+ *
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   u32ErrCode      The error code for the general-protection exception.
+ */
+DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
+{
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
+    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
+}
+
+
+/**
+ * Sets a stack (\#SS) exception as pending-for-injection into the VM.
+ *
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   u32ErrCode      The error code for the stack exception.
+ */
+DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
+{
+    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_SS)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
+                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
+    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
+}
+
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/**
+ * Decodes the memory operand of a VM-exit due to instruction execution.
+ *
+ * For instructions with two operands, the second operand is usually found in the
+ * VM-exit qualification field.
+ *
+ * @returns Strict VBox status code (i.e. informational status codes too).
+ * @retval  VINF_SUCCESS if the operand was successfully decoded.
+ * @retval  VINF_HM_PENDING_XCPT if an exception was raised while decoding the
+ *          operand.
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   pExitInstrInfo  Pointer to the VM-exit instruction information.
+ * @param   fIsWrite        Whether the operand is a destination memory operand
+ *                          (i.e. writeable memory location) or not.
+ * @param   GCPtrDisp       The instruction displacement field, if any. For
+ *                          RIP-relative addressing pass RIP + displacement here.
+ * @param   pGCPtrMem       Where to store the destination memory operand.
+ */
+static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite,
+                                            PRTGCPTR pGCPtrMem)
+{
+    Assert(pExitInstrInfo);
+    Assert(pGCPtrMem);
+    Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
+
+    static uint64_t const s_auAddrSizeMasks[]   = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
+    static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
+    AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
+
+    uint8_t const   uAddrSize     =  pExitInstrInfo->InvVmxXsaves.u3AddrSize;
+    uint8_t const   iSegReg       =  pExitInstrInfo->InvVmxXsaves.iSegReg;
+    bool const      fIdxRegValid  = !pExitInstrInfo->InvVmxXsaves.fIdxRegInvalid;
+    uint8_t const   iIdxReg       =  pExitInstrInfo->InvVmxXsaves.iIdxReg;
+    uint8_t const   uScale        =  pExitInstrInfo->InvVmxXsaves.u2Scaling;
+    bool const      fBaseRegValid = !pExitInstrInfo->InvVmxXsaves.fBaseRegInvalid;
+    uint8_t const   iBaseReg      =  pExitInstrInfo->InvVmxXsaves.iBaseReg;
+    bool const      fIsMemOperand = !pExitInstrInfo->InvVmxXsaves.fIsRegOperand;
+    bool const      fIsLongMode   =  CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
+
+    /*
+     * Validate instruction information.
+     * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
+     */
+    AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
+                          ("Invalid address size. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_1);
+    AssertLogRelMsgReturn(iSegReg  < X86_SREG_COUNT,
+                          ("Invalid segment register. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_2);
+    AssertLogRelMsgReturn(fIsMemOperand,
+                          ("Expected memory operand. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_3);
+
+    /*
+     * Compute the complete effective address.
+     *
+     * See AMD instruction spec. 1.4.2 "SIB Byte Format"
+     * See AMD spec. 4.5.2 "Segment Registers".
+     */
+    RTGCPTR GCPtrMem  = GCPtrDisp;
+    if (fBaseRegValid)
+        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
+    if (fIdxRegValid)
+        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
+
+    RTGCPTR const GCPtrOff = GCPtrMem;
+    if (   !fIsLongMode
+        || iSegReg >= X86_SREG_FS)
+        GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
+    GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
+
+    /*
+     * Validate effective address.
+     * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
+     */
+    uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
+    Assert(cbAccess > 0);
+    if (fIsLongMode)
+    {
+        if (X86_IS_CANONICAL(GCPtrMem))
+        {
+            *pGCPtrMem = GCPtrMem;
+            return VINF_SUCCESS;
+        }
+
+        Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
+        hmR0VmxSetPendingXcptGP(pVCpu, 0);
+        return VINF_HM_PENDING_XCPT;
+    }
+
+    /*
+     * This is a watered down version of iemMemApplySegment().
+     * Parts that are not applicable for VMX instructions like real-or-v8086 mode
+     * and segment CPL/DPL checks are skipped.
+     */
+    RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
+    RTGCPTR32 const GCPtrLast32  = GCPtrFirst32 + cbAccess - 1;
+    PCCPUMSELREG    pSel         = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
+
+    /* Check if the segment is present and usable. */
+    if (    pSel->Attr.n.u1Present
+        && !pSel->Attr.n.u1Unusable)
+    {
+        Assert(pSel->Attr.n.u1DescType);
+        if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
+        {
+            /* Check permissions for the data segment. */
+            if (   fIsWrite
+                && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
+            {
+                Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
+                hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
+                return VINF_HM_PENDING_XCPT;
+            }
+
+            /* Check limits if it's a normal data segment. */
+            if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
+            {
+                if (   GCPtrFirst32 > pSel->u32Limit
+                    || GCPtrLast32  > pSel->u32Limit)
+                {
+                    Log4Func(("Data segment limit exceeded."
+                              "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
+                              GCPtrLast32, pSel->u32Limit));
+                    if (iSegReg == X86_SREG_SS)
+                        hmR0VmxSetPendingXcptSS(pVCpu, 0);
+                    else
+                        hmR0VmxSetPendingXcptGP(pVCpu, 0);
+                    return VINF_HM_PENDING_XCPT;
+                }
+            }
+            else
+            {
+               /* Check limits if it's an expand-down data segment.
+                  Note! The upper boundary is defined by the B bit, not the G bit! */
+               if (   GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
+                   || GCPtrLast32  > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
+               {
+                   Log4Func(("Expand-down data segment limit exceeded."
+                             "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
+                             GCPtrLast32, pSel->u32Limit));
+                   if (iSegReg == X86_SREG_SS)
+                       hmR0VmxSetPendingXcptSS(pVCpu, 0);
+                   else
+                       hmR0VmxSetPendingXcptGP(pVCpu, 0);
+                   return VINF_HM_PENDING_XCPT;
+               }
+            }
+        }
+        else
+        {
+            /* Check permissions for the code segment. */
+            if (   fIsWrite
+                || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ))
+            {
+                Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
+                Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
+                hmR0VmxSetPendingXcptGP(pVCpu, 0);
+                return VINF_HM_PENDING_XCPT;
+            }
+
+            /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
+            if (   GCPtrFirst32 > pSel->u32Limit
+                || GCPtrLast32  > pSel->u32Limit)
+            {
+                Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
+                          GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
+                if (iSegReg == X86_SREG_SS)
+                    hmR0VmxSetPendingXcptSS(pVCpu, 0);
+                else
+                    hmR0VmxSetPendingXcptGP(pVCpu, 0);
+                return VINF_HM_PENDING_XCPT;
+            }
+        }
+    }
+    else
+    {
+        Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
+        hmR0VmxSetPendingXcptGP(pVCpu, 0);
+        return VINF_HM_PENDING_XCPT;
+    }
+
+    *pGCPtrMem = GCPtrMem;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
+ * guest attempting to execute a VMX instruction.
+ *
+ * @returns Strict VBox status code (i.e. informational status codes too).
+ * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
+ * @retval  VINF_HM_PENDING_XCPT if an exception was raised.
+ *
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   pVmxTransient   Pointer to the VMX transient structure.
+ *
+ * @todo    NstVmx: Document other error codes when VM-exit is implemented.
+ * @remarks No-long-jump zone!!!
+ */
+static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
+                              | CPUMCTX_EXTRN_HWVIRT);
+
+    if (   CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)
+        || (    CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
+            && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
+    {
+        Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n"));
+        hmR0VmxSetPendingXcptUD(pVCpu);
+        return VINF_HM_PENDING_XCPT;
+    }
+
+    if (pVmxTransient->uExitReason == VMX_EXIT_VMXON)
+    {
+        /*
+         * We check CR4.VMXE because it is required to be always set while in VMX operation
+         * by physical CPUs and our CR4 read shadow is only consulted when executing specific
+         * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
+         * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
+         */
+        if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
+        {
+            Log4Func(("CR4.VMXE is not set -> #UD\n"));
+            hmR0VmxSetPendingXcptUD(pVCpu);
+            return VINF_HM_PENDING_XCPT;
+        }
+    }
+    else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
+    {
+        /*
+         * The guest has not entered VMX operation but attempted to execute a VMX instruction
+         * (other than VMXON), we need to raise a #UD.
+         */
+        Log4Func(("Not in VMX root mode -> #UD\n"));
+        hmR0VmxSetPendingXcptUD(pVCpu);
+        return VINF_HM_PENDING_XCPT;
+    }
+
+    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
+    {
+        /*
+         * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let
+         * the guest hypervisor deal with it.
+         */
+        /** @todo NSTVMX: Trigger a VM-exit */
+    }
+
+    /*
+     * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept
+     * (above) takes preceedence over the CPL check.
+     */
+    if (CPUMGetGuestCPL(pVCpu) > 0)
+    {
+        Log4Func(("CPL > 0 -> #GP(0)\n"));
+        hmR0VmxSetPendingXcptGP(pVCpu, 0);
+        return VINF_HM_PENDING_XCPT;
+    }
+
+    return VINF_SUCCESS;
+}
+
+#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
 
 
@@ -5861,5 +6152,5 @@
 
     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
-    if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
+    if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
     {
         uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
@@ -8190,4 +8481,9 @@
     Assert(VMMRZCallRing3IsEnabled(pVCpu));
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_ONLY_IN_IEM
+    Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
+    return VINF_EM_RESCHEDULE_REM;
+#endif
+
 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     PGMRZDynMapFlushAutoSet(pVCpu);
@@ -9319,19 +9615,19 @@
         case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
         case VMX_EXIT_MOV_CRX:
-            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
-            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
+            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
+            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
                 SET_BOTH(CRX_READ);
             else
                 SET_BOTH(CRX_WRITE);
-            uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
+            uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
             break;
         case VMX_EXIT_MOV_DRX:
-            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
-            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
+            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
+            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
                 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
                 SET_BOTH(DRX_READ);
             else
                 SET_BOTH(DRX_WRITE);
-            uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
+            uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
             break;
         case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
@@ -9408,5 +9704,5 @@
     if (fDtrace1 || fDtrace2)
     {
-        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
         hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
@@ -9593,8 +9889,8 @@
     else
     {
-        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
         int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
         AssertRC(rc);
-        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
+        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
     }
 
@@ -10833,11 +11129,11 @@
 }
 
+
+/** @name VM-exit handlers.
+ * @{
+ */
 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
-
-/** @name VM-exit handlers.
- * @{
- */
 
 /**
@@ -10961,5 +11257,5 @@
                         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
                         AssertRCReturn(rc, rc);
-                        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
+                        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
                                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
                                                0 /* GCPtrFaultAddress */);
@@ -11270,10 +11566,10 @@
     Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
 
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
     AssertRCReturn(rc, rc);
 
-    VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQualification);
+    VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
 
     if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
@@ -11285,6 +11581,6 @@
     }
     else
-        AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n",
-                         pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
+        AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual,
+                         VBOXSTRICTRC_VAL(rcStrict)));
     return rcStrict;
 }
@@ -11888,5 +12184,5 @@
     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
 
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
@@ -11895,6 +12191,6 @@
     VBOXSTRICTRC rcStrict;
     PVM pVM  = pVCpu->CTX_SUFF(pVM);
-    RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
-    uint32_t const uAccessType           = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
+    RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
+    uint32_t const uAccessType  = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
     switch (uAccessType)
     {
@@ -11902,12 +12198,11 @@
         {
             uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
-            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
-                                                 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
-                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
+            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
+                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQual));
             AssertMsg(   rcStrict == VINF_SUCCESS
                       || rcStrict == VINF_IEM_RAISED_XCPT
                       || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
 
-            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
+            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
             {
                 case 0:
@@ -11916,5 +12211,5 @@
                                      HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
                     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
-                    Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
+                    Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
 
                     /*
@@ -11935,5 +12230,5 @@
                     {
                         /** @todo check selectors rather than returning all the time.  */
-                        Log4(("CRx CR0 write: back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
+                        Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
                         rcStrict = VINF_EM_RESCHEDULE_REM;
                     }
@@ -11956,5 +12251,5 @@
                     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
                                      HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
-                    Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
+                    Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
                     break;
                 }
@@ -11965,6 +12260,6 @@
                     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
                                      HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
-                    Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
-                          pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
+                    Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
+                              pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
                     break;
                 }
@@ -11979,5 +12274,5 @@
                 }
                 default:
-                    AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
+                    AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)));
                     break;
             }
@@ -11990,16 +12285,15 @@
                    || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
                    || pVCpu->hm.s.fUsingDebugLoop
-                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
+                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3);
             /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
-            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
+            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
                    || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
 
-            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
-                                                VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
-                                                VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
+            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
+                                                VMX_EXIT_QUAL_CRX_REGISTER(uExitQual));
             AssertMsg(   rcStrict == VINF_SUCCESS
                       || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
 #ifdef VBOX_WITH_STATISTICS
-            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
+            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
             {
                 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
@@ -12010,7 +12304,7 @@
             }
 #endif
-            Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
+            Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
                   VBOXSTRICTRC_VAL(rcStrict)));
-            if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
+            if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP)
                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
             else
@@ -12027,5 +12321,5 @@
             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
-            Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
+            Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
             break;
         }
@@ -12034,6 +12328,5 @@
         {
             /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
-            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
-                                          VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
+            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual));
             AssertMsg(   rcStrict == VINF_SUCCESS
                       || rcStrict == VINF_IEM_RAISED_XCPT
@@ -12042,5 +12335,5 @@
             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
-            Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
+            Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
             break;
         }
@@ -12075,5 +12368,5 @@
 
     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
@@ -12082,9 +12375,8 @@
 
     /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
-    uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
-    uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
-    bool     fIOWrite     = (   VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
-                             == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
-    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
+    uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
+    uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
+    bool     fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
+    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
     bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
     bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
@@ -12124,5 +12416,5 @@
              * interpreting the instruction.
              */
-            Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
+            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
             AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
             bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
@@ -12134,5 +12426,5 @@
                 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
                 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
-                bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
+                bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
                 if (fIOWrite)
                     rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
@@ -12160,7 +12452,7 @@
              * IN/OUT - I/O instruction.
              */
-            Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
+            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
             uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
-            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
+            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
             if (fIOWrite)
             {
@@ -12296,5 +12588,5 @@
         Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
-              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
+              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
               fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
 
@@ -12319,11 +12611,11 @@
 
     /* Check if this task-switch occurred while delivery an event through the guest IDT. */
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     AssertRCReturn(rc, rc);
-    if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
+    if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
     {
         rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
         AssertRCReturn(rc, rc);
-        if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
+        if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
         {
             uint32_t       uErrCode;
@@ -12350,5 +12642,5 @@
                                    0 /* cbInstr */, uErrCode, GCPtrFaultAddress);
 
-            Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
+            Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
             return VINF_EM_RAW_INJECT_TRPM_EVENT;
@@ -12406,9 +12698,9 @@
     /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
     int rc  = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
-    rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     AssertRCReturn(rc, rc);
 
     /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
-    uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
+    uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
     VBOXSTRICTRC rcStrict2;
     switch (uAccessType)
@@ -12418,13 +12710,13 @@
         {
             AssertMsg(   !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
-                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
+                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
                       ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
 
             RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
             GCPhys &= PAGE_BASE_GC_MASK;
-            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
+            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
             PVM pVM = pVCpu->CTX_SUFF(pVM);
             Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
-                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
+                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
 
             PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
@@ -12494,7 +12786,7 @@
 
 #ifdef VBOX_WITH_STATISTICS
-        rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+        rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
         AssertRCReturn(rc, rc);
-        if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
+        if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
         else
@@ -12510,5 +12802,5 @@
      */
     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
     AssertRCReturn(rc, rc);
@@ -12516,9 +12808,9 @@
 
     PVM pVM = pVCpu->CTX_SUFF(pVM);
-    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
+    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
     {
         rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
-                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
-                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
+                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
+                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
         if (RT_SUCCESS(rc))
             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
@@ -12528,6 +12820,6 @@
     {
         rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
-                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
-                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
+                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
+                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
     }
@@ -12596,5 +12888,5 @@
         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
         rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
-        Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
+        Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
         if (   rcStrict == VINF_SUCCESS
             || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
@@ -12655,17 +12947,17 @@
     RTGCPHYS GCPhys;
     int rc  = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
-    rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc     |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     AssertRCReturn(rc, rc);
 
     /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
-    AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
+    AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
 
     RTGCUINT uErrorCode = 0;
-    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
+    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
         uErrorCode |= X86_TRAP_PF_ID;
-    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
+    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
         uErrorCode |= X86_TRAP_PF_RW;
-    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
+    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
         uErrorCode |= X86_TRAP_PF_P;
 
@@ -12677,6 +12969,6 @@
     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
 
-    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
-              uErrorCode, pCtx->cs.Sel, pCtx->rip));
+    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
+              pCtx->cs.Sel, pCtx->rip));
 
     VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
@@ -12700,11 +12992,10 @@
 /** @} */
 
+/** @name VM-exit exception handlers.
+ * @{
+ */
 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
-
-/** @name VM-exit exception handlers.
- * @{
- */
 
 /**
@@ -12732,6 +13023,6 @@
     }
 
-    hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                           pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     return rc;
 }
@@ -12758,6 +13049,6 @@
         AssertRCReturn(rc, rc);
 
-        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     }
 
@@ -12782,6 +13073,6 @@
     Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
 
-    hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                           pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     return VINF_SUCCESS;
 }
@@ -12800,10 +13091,9 @@
      * for processing.
      */
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
 
     /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
     uint64_t uDR6 = X86_DR6_INIT_VAL;
-    uDR6         |= (  pVmxTransient->uExitQualification
-                     & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
+    uDR6         |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
 
     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
@@ -12856,6 +13146,6 @@
         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
         AssertRCReturn(rc, rc);
-        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
         return VINF_SUCCESS;
     }
@@ -12899,6 +13189,6 @@
         Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
                   pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
-        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
         return rc;
     }
@@ -13166,5 +13456,5 @@
                     && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
                 {
-                    Log4(("hmR0VmxExitXcptGP: mode changed -> VINF_EM_RESCHEDULE\n"));
+                    Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
                     /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */
                     rc = VINF_EM_RESCHEDULE;
@@ -13217,6 +13507,6 @@
 #endif
 
-    hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                           pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
+    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
+                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     return VINF_SUCCESS;
 }
@@ -13230,5 +13520,5 @@
     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     PVM pVM = pVCpu->CTX_SUFF(pVM);
-    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
+    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
@@ -13245,6 +13535,6 @@
         if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
         {
-            hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                                   0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
+            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
+                                   pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
         }
         else
@@ -13270,10 +13560,9 @@
     AssertRCReturn(rc, rc);
 
-    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
-              pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
-
-    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
-    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx),
-                          (RTGCPTR)pVmxTransient->uExitQualification);
+    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
+              pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
+
+    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
+    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
 
     Log4Func(("#PF: rc=%Rrc\n", rc));
@@ -13298,6 +13587,6 @@
             TRPMResetTrap(pVCpu);
             pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
-            hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
-                                   0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
+            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
+                                   uGstErrorCode, pVmxTransient->uExitQual);
         }
         else
@@ -13321,2 +13610,186 @@
 /** @} */
 
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+
+/** @name Nested-guest VM-exit handlers.
+ * @{
+ */
+/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
+/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
+/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
+
+/**
+ * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmclear. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmlaunch. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmptrld. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmptrst. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmread. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmresume. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    /** @todo NSTVMX: Vmwrite. */
+    hmR0VmxSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
+    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
+    AssertRCReturn(rc, rc);
+
+    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
+    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+    {
+        /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */
+        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
+    }
+    else if (rcStrict == VINF_IEM_RAISED_XCPT)
+    {
+        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
+        rcStrict = VINF_SUCCESS;
+    }
+    return rcStrict;
+}
+
+
+/**
+ * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
+ */
+HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
+{
+    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
+
+    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
+    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
+    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
+    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
+    AssertRCReturn(rc, rc);
+
+    VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient);
+    if (rcStrict == VINF_SUCCESS)
+    { /* likely */ }
+    else if (rcStrict == VINF_HM_PENDING_XCPT)
+    {
+        Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
+        return VINF_SUCCESS;
+    }
+    else
+    {
+        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+        return rcStrict;
+    }
+
+    RTGCPTR            GCPtrVmxon;
+    PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo;
+    RTGCPTR const      GCPtrDisp      =  pVmxTransient->uExitQual;
+    rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/,  &GCPtrVmxon);
+    if (rcStrict == VINF_SUCCESS)
+    { /* likely */ }
+    else if (rcStrict == VINF_HM_PENDING_XCPT)
+    {
+        Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
+        return VINF_SUCCESS;
+    }
+    else
+    {
+        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
+        return rcStrict;
+    }
+
+    rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, GCPtrVmxon, pExitInstrInfo->u, GCPtrDisp);
+    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
+        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
+    else if (rcStrict == VINF_IEM_RAISED_XCPT)
+    {
+        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
+        rcStrict = VINF_SUCCESS;
+    }
+    return rcStrict;
+}
+
+/** @} */
+#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
+
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 73606)
@@ -2775,6 +2775,7 @@
     PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
     static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" };
-    uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM
-                                 : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE;
+    bool const fSvm = pVM->cpum.ro.GuestFeatures.fSvm;
+    bool const fVmx = pVM->cpum.ro.GuestFeatures.fVmx;
+    uint8_t const idxHwvirtState = fSvm ? CPUMHWVIRTDUMP_SVM : (fVmx ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE);
     AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes));
     Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes));
@@ -2788,15 +2789,14 @@
 
     if (fDumpState & CPUMHWVIRTDUMP_COMMON)
-    {
-        pHlp->pfnPrintf(pHlp, "fGif                           = %RTbool\n", pCtx->hwvirt.fGif);
-        pHlp->pfnPrintf(pHlp, "fLocalForcedActions            = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
-    }
+        pHlp->pfnPrintf(pHlp, "fLocalForcedActions          = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
+
     pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, (fDumpState & (CPUMHWVIRTDUMP_SVM | CPUMHWVIRTDUMP_VMX)) ?
                                                                  ":" : "");
     if (fDumpState & CPUMHWVIRTDUMP_SVM)
     {
+        pHlp->pfnPrintf(pHlp, "  fGif                       = %RTbool\n", pCtx->hwvirt.fGif);
+
         char szEFlags[80];
         cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u);
-
         pHlp->pfnPrintf(pHlp, "  uMsrHSavePa                = %#RX64\n",    pCtx->hwvirt.svm.uMsrHSavePa);
         pHlp->pfnPrintf(pHlp, "  GCPhysVmcb                 = %#RGp\n",     pCtx->hwvirt.svm.GCPhysVmcb);
@@ -2839,10 +2839,14 @@
     }
 
-    /** @todo Intel.  */
-#if 0
     if (fDumpState & CPUMHWVIRTDUMP_VMX)
     {
+        pHlp->pfnPrintf(pHlp, "  fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode);
+        pHlp->pfnPrintf(pHlp, "  fInVmxNonRootMode          = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxNonRootMode);
+        pHlp->pfnPrintf(pHlp, "  GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon);
+        pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
+        pHlp->pfnPrintf(pHlp, "  enmInstrDiag               = %u (%s)\n",   pCtx->hwvirt.vmx.enmInstrDiag,
+                        HMVmxGetInstrDiagDesc(pCtx->hwvirt.vmx.enmInstrDiag));
+        /** @todo NSTVMX: Dump remaining/new fields. */
     }
-#endif
 
 #undef CPUMHWVIRTDUMP_NONE
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 73606)
@@ -3937,25 +3937,34 @@
     AssertLogRelRCReturn(rc, rc);
 
-#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
-    /** @cfgm{/CPUM/NestedHWVirt, bool, false}
-     * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
-     * The default is false, and when enabled requires nested paging and AMD-V or
-     * unrestricted guest mode.
-     */
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
-    AssertLogRelRCReturn(rc, rc);
-    if (   pConfig->fNestedHWVirt
-        && !fNestedPagingAndFullGuestExec)
-        return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
-                          "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
-
-    /** @todo Think about enabling this later with NEM/KVM. */
-    if (   pConfig->fNestedHWVirt
-        && VM_IS_NEM_ENABLED(pVM))
-    {
-        LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
-        pConfig->fNestedHWVirt = false;
-    }
+    bool fQueryNestedHwvirt = false;
+#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+    fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD);
 #endif
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+    fQueryNestedHwvirt |= RT_BOOL(   pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
+                                  || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA);
+#endif
+    if (fQueryNestedHwvirt)
+    {
+        /** @cfgm{/CPUM/NestedHWVirt, bool, false}
+         * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
+         * The default is false, and when enabled requires nested paging and AMD-V or
+         * unrestricted guest mode.
+         */
+        rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
+        AssertLogRelRCReturn(rc, rc);
+        if (   pConfig->fNestedHWVirt
+            && !fNestedPagingAndFullGuestExec)
+            return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
+                              "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
+
+        /** @todo Think about enabling this later with NEM/KVM. */
+        if (   pConfig->fNestedHWVirt
+            && VM_IS_NEM_ENABLED(pVM))
+        {
+            LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
+            pConfig->fNestedHWVirt = false;
+        }
+    }
 
     /*
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 73606)
@@ -1815,5 +1815,5 @@
     }
 
-    if (CPUMIsGuestInVmxNestedHwVirtMode(&pVCpu->cpum.GstCtx))
+    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     { /** @todo Nested VMX. */ }
 
@@ -2147,5 +2147,5 @@
                 Assert(!HMR3IsEventPending(pVCpu));
 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
-                if (CPUMIsGuestInNestedHwVirtMode(&pVCpu->cpum.GstCtx))
+                if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
                 {
                     bool fResched, fInject;
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 73606)
@@ -42,4 +42,5 @@
 #include <VBox/vmm/stam.h>
 #include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
 #include <VBox/vmm/pdmapi.h>
 #include <VBox/vmm/pgm.h>
@@ -77,6 +78,7 @@
 #define EXIT_REASON(def, val, str) #def " - " #val " - " str
 #define EXIT_REASON_NIL() NULL
-/** Exit reason descriptions for VT-x, used to describe statistics. */
-static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
+/** Exit reason descriptions for VT-x, used to describe statistics and exit
+ *  history. */
+static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
 {
     EXIT_REASON(VMX_EXIT_XCPT_OR_NMI            ,   0, "Exception or non-maskable interrupt (NMI)."),
@@ -149,11 +151,11 @@
 #define MAX_EXITREASON_VTX                         64
 
-/** A partial list of Exit reason descriptions for AMD-V, used to describe
- *  statistics.
+/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
+ *  statistics and exit history.
  *
  *  @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
  *        this array doesn't contain the entire set of exit reasons, we
  *        handle them via hmSvmGetSpecialExitReasonDesc(). */
-static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
+static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
 {
     EXIT_REASON(SVM_EXIT_READ_CR0     ,    0, "Read CR0."),
@@ -310,5 +312,5 @@
 /**
  * Gets the SVM exit reason if it's one of the reasons not present in the @c
- * g_apszAmdVExitReasons array.
+ * g_apszSvmExitReasons array.
  *
  * @returns The exit reason or NULL if unknown.
@@ -1061,6 +1063,6 @@
 #undef HM_REG_COUNTER
 
-        const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVTxExitReasons[0]
-                                                                               : &g_apszAmdVExitReasons[0];
+        const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVmxExitReasons[0]
+                                                                               : &g_apszSvmExitReasons[0];
 
         /*
@@ -1938,5 +1940,5 @@
     uint32_t u32Model;
     uint32_t u32Stepping;
-    if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
+    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
         LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
     LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoops));
@@ -2948,5 +2950,6 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
-    if (CPUMIsGuestInNestedHwVirtMode(pCtx))
+    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
+        || CPUMIsGuestVmxEnabled(pCtx))
     {
         Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));
@@ -3151,7 +3154,5 @@
         &&  CPUMIsGuestInRealModeEx(pCtx)
         && !PDMVmmDevHeapIsEnabled(pVM))
-    {
         return true;
-    }
 
     return false;
@@ -3429,11 +3430,11 @@
                 LogRel(("HM: CPU[%u] Exit reason          %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
 
-                if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS
-                    || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS)
+                if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
+                    || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
                 {
                     LogRel(("HM: CPU[%u] Entered Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
                     LogRel(("HM: CPU[%u] Current Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
                 }
-                else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
+                else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTL)
                 {
                     LogRel(("HM: CPU[%u] PinCtls          %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls));
@@ -3756,6 +3757,6 @@
 VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit)
 {
-    if (uExit < RT_ELEMENTS(g_apszVTxExitReasons))
-        return g_apszVTxExitReasons[uExit];
+    if (uExit < RT_ELEMENTS(g_apszVmxExitReasons))
+        return g_apszVmxExitReasons[uExit];
     return NULL;
 }
@@ -3770,6 +3771,6 @@
 VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit)
 {
-    if (uExit < RT_ELEMENTS(g_apszAmdVExitReasons))
-        return g_apszAmdVExitReasons[uExit];
+    if (uExit < RT_ELEMENTS(g_apszSvmExitReasons))
+        return g_apszSvmExitReasons[uExit];
     return hmSvmGetSpecialExitReasonDesc(uExit);
 }
Index: /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 73606)
@@ -4549,4 +4549,13 @@
     if (pVCpu->pgm.s.fA20Enabled != fEnable)
     {
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+        PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+        if (   CPUMIsGuestInVmxRootMode(pCtx)
+            && !fEnable)
+        {
+            Log(("Cannot enter A20M mode while in VMX root mode\n"));
+            return;
+        }
+#endif
         pVCpu->pgm.s.fA20Enabled = fEnable;
         pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 73605)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 73606)
@@ -21,5 +21,4 @@
 #include <VBox/cdefs.h>
 #include <VBox/types.h>
-#include <VBox/vmm/em.h>
 #include <VBox/vmm/stam.h>
 #include <VBox/dis.h>
Index: /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 73605)
+++ /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 73606)
@@ -127,4 +127,7 @@
 #define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV()        do { } while (0)
 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES()                                    do { } while (0)
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+# define IEMOP_HLP_VMX_INSTR()                              do { } while (0)
+#endif
 
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 73605)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 73606)
@@ -146,4 +146,11 @@
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR3);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HCPhysVmcb);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmxon);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmcs);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.enmInstrDiag);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxRootMode);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxNonRootMode);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Index: /trunk/src/recompiler/VBoxRecompiler.c
===================================================================
--- /trunk/src/recompiler/VBoxRecompiler.c	(revision 73605)
+++ /trunk/src/recompiler/VBoxRecompiler.c	(revision 73606)
@@ -2136,5 +2136,6 @@
 
     Assert(pCtx);
-    if (CPUMIsGuestInNestedHwVirtMode(pCtx))
+    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
+        || CPUMIsGuestInVmxNonRootMode(pCtx))
     {
         AssertMsgFailed(("Bad scheduling - can't exec. nested-guest in REM!\n"));
