Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 76992)
+++ /trunk/include/VBox/vmm/hm.h	(revision 76993)
@@ -137,14 +137,16 @@
 VMM_INT_DECL(bool)              HMIsSvmActive(PVM pVM);
 VMM_INT_DECL(bool)              HMIsVmxActive(PVM pVM);
-VMM_INT_DECL(bool)              HMIsVmxSupported(PVM pVM);
-VMM_INT_DECL(const char *)      HMVmxGetDiagDesc(VMXVDIAG enmDiag);
-VMM_INT_DECL(const char *)      HMVmxGetAbortDesc(VMXABORT enmAbort);
-VMM_INT_DECL(const char *)      HMVmxGetVmcsStateDesc(uint8_t fVmcsState);
-VMM_INT_DECL(const char *)      HMVmxGetIdtVectoringInfoTypeDesc(uint8_t uType);
-VMM_INT_DECL(const char *)      HMVmxGetExitIntInfoTypeDesc(uint8_t uType);
-VMM_INT_DECL(const char *)      HMVmxGetEntryIntInfoTypeDesc(uint8_t uType);
-VMM_INT_DECL(void)              HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
-VMM_INT_DECL(void)              HMVmxGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PVMXMSRS pVmxMsrs);
-VMM_INT_DECL(void)              HMVmxGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PSVMMSRS pSvmMsrs);
+VMM_INT_DECL(const char *)      HMGetVmxDiagDesc(VMXVDIAG enmDiag);
+VMM_INT_DECL(const char *)      HMGetVmxAbortDesc(VMXABORT enmAbort);
+VMM_INT_DECL(const char *)      HMGetVmxVmcsStateDesc(uint8_t fVmcsState);
+VMM_INT_DECL(const char *)      HMGetVmxIdtVectoringInfoTypeDesc(uint8_t uType);
+VMM_INT_DECL(const char *)      HMGetVmxExitIntInfoTypeDesc(uint8_t uType);
+VMM_INT_DECL(const char *)      HMGetVmxEntryIntInfoTypeDesc(uint8_t uType);
+VMM_INT_DECL(const char *)      HMGetVmxExitName(uint32_t uExit);
+VMM_INT_DECL(const char *)      HMGetSvmExitName(uint32_t uExit);
+VMM_INT_DECL(void)              HMDumpHwvirtVmxState(PVMCPU pVCpu);
+VMM_INT_DECL(void)              HMHCChangedPagingMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
+VMM_INT_DECL(void)              HMGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PVMXMSRS pVmxMsrs);
+VMM_INT_DECL(void)              HMGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PSVMMSRS pSvmMsrs);
 /** @} */
 
@@ -155,5 +157,5 @@
  * found in CPUM.
  * @{ */
-VMM_INT_DECL(bool)              HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
+VMM_INT_DECL(bool)              HMCanExecuteVmxGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
 /** @} */
 
@@ -171,6 +173,6 @@
 /** @name R0, R3 HM (VMX/SVM agnostic) handlers.
  * @{ */
-VMM_INT_DECL(int)               HMFlushTLB(PVMCPU pVCpu);
-VMM_INT_DECL(int)               HMFlushTLBOnAllVCpus(PVM pVM);
+VMM_INT_DECL(int)               HMFlushTlb(PVMCPU pVCpu);
+VMM_INT_DECL(int)               HMFlushTlbOnAllVCpus(PVM pVM);
 VMM_INT_DECL(int)               HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt);
 VMM_INT_DECL(int)               HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys);
@@ -183,11 +185,11 @@
 /** @name R0, R3 SVM handlers.
  * @{ */
-VMM_INT_DECL(bool)              HMSvmIsVGifActive(PVM pVM);
-VMM_INT_DECL(uint64_t)          HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks);
+VMM_INT_DECL(bool)              HMIsSvmVGifActive(PVM pVM);
+VMM_INT_DECL(uint64_t)          HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks);
 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
-VMM_INT_DECL(void)              HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx);
+VMM_INT_DECL(void)              HMNotifySvmNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
 # endif
-VMM_INT_DECL(int)               HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
-VMM_INT_DECL(int)               HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu);
+VMM_INT_DECL(int)               HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
+VMM_INT_DECL(int)               HMHCMaybeMovTprSvmHypercall(PVMCPU pVCpu);
 /** @} */
 
@@ -196,6 +198,6 @@
 /** @name RC HM (VMX/SVM agnostic) handlers.
  * @{ */
-# define HMFlushTLB(pVCpu)                                            do { } while (0)
-# define HMFlushTLBOnAllVCpus(pVM)                                    do { } while (0)
+# define HMFlushTlb(pVCpu)                                            do { } while (0)
+# define HMFlushTlbOnAllVCpus(pVM)                                    do { } while (0)
 # define HMInvalidatePageOnAllVCpus(pVM, GCVirt)                      do { } while (0)
 # define HMInvalidatePhysPage(pVM,  GCVirt)                           do { } while (0)
@@ -208,9 +210,9 @@
 /** @name RC SVM handlers.
  * @{ */
-# define HMSvmIsVGifActive(pVM)                                       false
-# define HMSvmNstGstApplyTscOffset(pVCpu, uTicks)                     (uTicks)
-# define HMSvmNstGstVmExitNotify(pVCpu, pCtx)                         do { } while (0)
-# define HMSvmIsSubjectToErratum170(puFamily, puModel, puStepping)    false
-# define HMHCSvmMaybeMovTprHypercall(pVCpu)                           do { } while (0)
+# define HMIsSvmVGifActive(pVM)                                       false
+# define HMApplySvmNstGstTscOffset(pVCpu, uTicks)                     (uTicks)
+# define HMNotifySvmNstGstVmexit(pVCpu, pCtx)                         do { } while (0)
+# define HMIsSubjectToSvmErratum170(puFamily, puModel, puStepping)    false
+# define HMHCMaybeMovTprSvmHypercall(pVCpu)                           do { } while (0)
 /** @} */
 
@@ -281,6 +283,4 @@
 VMMR3_INT_DECL(bool)            HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx);
 VMMR3_INT_DECL(bool)            HMR3IsVmxPreemptionTimerUsed(PVM pVM);
-VMMR3DECL(const char *)         HMR3GetVmxExitName(uint32_t uExit);
-VMMR3DECL(const char *)         HMR3GetSvmExitName(uint32_t uExit);
 /** @} */
 #endif /* IN_RING3 */
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 76992)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 76993)
@@ -1162,6 +1162,6 @@
  * @{
  */
-VMM_INT_DECL(int)       HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
-VMM_INT_DECL(bool)      HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
+VMM_INT_DECL(int)       HMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
+VMM_INT_DECL(bool)      HMIsSvmIoInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
                                                  uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
                                                  PSVMIOIOEXITINFO pIoExitInfo);
Index: /trunk/include/VBox/vmm/hm_vmx.h
===================================================================
--- /trunk/include/VBox/vmm/hm_vmx.h	(revision 76992)
+++ /trunk/include/VBox/vmm/hm_vmx.h	(revision 76993)
@@ -1400,6 +1400,5 @@
 /** @name VMX abort reasons.
  * See Intel spec. "27.7 VMX Aborts".
- * Update HMVmxGetAbortDesc() if new reasons are added.
- * @{
+ * Update HMGetVmxAbortDesc() if new reasons are added. @{
  */
 typedef enum
@@ -4094,7 +4093,7 @@
  * @{
  */
-VMM_INT_DECL(int)   HMVmxGetMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
+VMM_INT_DECL(int)   HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
                                           PVMXMSREXITWRITE penmWrite);
-VMM_INT_DECL(bool)  HMVmxGetIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
+VMM_INT_DECL(bool)  HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
                                                uint8_t cbAccess);
 /** @} */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 76993)
@@ -6248,5 +6248,5 @@
     {
         /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
-        HMFlushTLB(pVCpu);
+        HMFlushTlb(pVCpu);
 
         /* Notify PGM about NXE changes. */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 76993)
@@ -2985,5 +2985,5 @@
  * @param   uTicks      The guest TSC.
  *
- * @sa      HMSvmNstGstApplyTscOffset.
+ * @sa      HMApplySvmNstGstTscOffset.
  */
 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
@@ -3006,5 +3006,5 @@
             return uTicks + pVmcb->ctrl.u64TSCOffset;
         }
-        return HMSvmNstGstApplyTscOffset(pVCpu, uTicks);
+        return HMApplySvmNstGstTscOffset(pVCpu, uTicks);
     }
 #else
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 76993)
@@ -39,4 +39,263 @@
 
 
+/*********************************************************************************************************************************
+*   Global Variables                                                                                                             *
+*********************************************************************************************************************************/
+#define EXIT_REASON(a_Def, a_Val, a_Str)      #a_Def " - " #a_Val " - " a_Str
+#define EXIT_REASON_NIL()                     NULL
+
+/** Exit reason descriptions for VT-x, used to describe statistics and exit
+ *  history. */
+static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
+{
+    EXIT_REASON(VMX_EXIT_XCPT_OR_NMI            ,   0, "Exception or non-maskable interrupt (NMI)."),
+    EXIT_REASON(VMX_EXIT_EXT_INT                ,   1, "External interrupt."),
+    EXIT_REASON(VMX_EXIT_TRIPLE_FAULT           ,   2, "Triple fault."),
+    EXIT_REASON(VMX_EXIT_INIT_SIGNAL            ,   3, "INIT signal."),
+    EXIT_REASON(VMX_EXIT_SIPI                   ,   4, "Start-up IPI (SIPI)."),
+    EXIT_REASON(VMX_EXIT_IO_SMI_IRQ             ,   5, "I/O system-management interrupt (SMI)."),
+    EXIT_REASON(VMX_EXIT_SMI_IRQ                ,   6, "Other SMI."),
+    EXIT_REASON(VMX_EXIT_INT_WINDOW             ,   7, "Interrupt window."),
+    EXIT_REASON(VMX_EXIT_NMI_WINDOW             ,   8, "NMI window."),
+    EXIT_REASON(VMX_EXIT_TASK_SWITCH            ,   9, "Task switch."),
+    EXIT_REASON(VMX_EXIT_CPUID                  ,  10, "CPUID instruction."),
+    EXIT_REASON(VMX_EXIT_GETSEC                 ,  11, "GETSEC instrunction."),
+    EXIT_REASON(VMX_EXIT_HLT                    ,  12, "HLT instruction."),
+    EXIT_REASON(VMX_EXIT_INVD                   ,  13, "INVD instruction."),
+    EXIT_REASON(VMX_EXIT_INVLPG                 ,  14, "INVLPG instruction."),
+    EXIT_REASON(VMX_EXIT_RDPMC                  ,  15, "RDPMCinstruction."),
+    EXIT_REASON(VMX_EXIT_RDTSC                  ,  16, "RDTSC instruction."),
+    EXIT_REASON(VMX_EXIT_RSM                    ,  17, "RSM instruction in SMM."),
+    EXIT_REASON(VMX_EXIT_VMCALL                 ,  18, "VMCALL instruction."),
+    EXIT_REASON(VMX_EXIT_VMCLEAR                ,  19, "VMCLEAR instruction."),
+    EXIT_REASON(VMX_EXIT_VMLAUNCH               ,  20, "VMLAUNCH instruction."),
+    EXIT_REASON(VMX_EXIT_VMPTRLD                ,  21, "VMPTRLD instruction."),
+    EXIT_REASON(VMX_EXIT_VMPTRST                ,  22, "VMPTRST instruction."),
+    EXIT_REASON(VMX_EXIT_VMREAD                 ,  23, "VMREAD instruction."),
+    EXIT_REASON(VMX_EXIT_VMRESUME               ,  24, "VMRESUME instruction."),
+    EXIT_REASON(VMX_EXIT_VMWRITE                ,  25, "VMWRITE instruction."),
+    EXIT_REASON(VMX_EXIT_VMXOFF                 ,  26, "VMXOFF instruction."),
+    EXIT_REASON(VMX_EXIT_VMXON                  ,  27, "VMXON instruction."),
+    EXIT_REASON(VMX_EXIT_MOV_CRX                ,  28, "Control-register accesses."),
+    EXIT_REASON(VMX_EXIT_MOV_DRX                ,  29, "Debug-register accesses."),
+    EXIT_REASON(VMX_EXIT_PORT_IO                ,  30, "I/O instruction."),
+    EXIT_REASON(VMX_EXIT_RDMSR                  ,  31, "RDMSR instruction."),
+    EXIT_REASON(VMX_EXIT_WRMSR                  ,  32, "WRMSR instruction."),
+    EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE,  33, "VM-entry failure due to invalid guest state."),
+    EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD           ,  34, "VM-entry failure due to MSR loading."),
+    EXIT_REASON_NIL(),
+    EXIT_REASON(VMX_EXIT_MWAIT                  ,  36, "MWAIT instruction."),
+    EXIT_REASON(VMX_EXIT_MTF                    ,  37, "Monitor Trap Flag."),
+    EXIT_REASON_NIL(),
+    EXIT_REASON(VMX_EXIT_MONITOR                ,  39, "MONITOR instruction."),
+    EXIT_REASON(VMX_EXIT_PAUSE                  ,  40, "PAUSE instruction."),
+    EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK      ,  41, "VM-entry failure due to machine-check."),
+    EXIT_REASON_NIL(),
+    EXIT_REASON(VMX_EXIT_TPR_BELOW_THRESHOLD    ,  43, "TPR below threshold (MOV to CR8)."),
+    EXIT_REASON(VMX_EXIT_APIC_ACCESS            ,  44, "APIC access."),
+    EXIT_REASON(VMX_EXIT_VIRTUALIZED_EOI        ,  45, "Virtualized EOI."),
+    EXIT_REASON(VMX_EXIT_GDTR_IDTR_ACCESS       ,  46, "GDTR/IDTR access using LGDT/SGDT/LIDT/SIDT."),
+    EXIT_REASON(VMX_EXIT_LDTR_TR_ACCESS         ,  47, "LDTR/TR access using LLDT/SLDT/LTR/STR."),
+    EXIT_REASON(VMX_EXIT_EPT_VIOLATION          ,  48, "EPT violation."),
+    EXIT_REASON(VMX_EXIT_EPT_MISCONFIG          ,  49, "EPT misconfiguration."),
+    EXIT_REASON(VMX_EXIT_INVEPT                 ,  50, "INVEPT instruction."),
+    EXIT_REASON(VMX_EXIT_RDTSCP                 ,  51, "RDTSCP instruction."),
+    EXIT_REASON(VMX_EXIT_PREEMPT_TIMER          ,  52, "VMX-preemption timer expired."),
+    EXIT_REASON(VMX_EXIT_INVVPID                ,  53, "INVVPID instruction."),
+    EXIT_REASON(VMX_EXIT_WBINVD                 ,  54, "WBINVD instruction."),
+    EXIT_REASON(VMX_EXIT_XSETBV                 ,  55, "XSETBV instruction."),
+    EXIT_REASON(VMX_EXIT_APIC_WRITE             ,  56, "APIC write completed to virtual-APIC page."),
+    EXIT_REASON(VMX_EXIT_RDRAND                 ,  57, "RDRAND instruction."),
+    EXIT_REASON(VMX_EXIT_INVPCID                ,  58, "INVPCID instruction."),
+    EXIT_REASON(VMX_EXIT_VMFUNC                 ,  59, "VMFUNC instruction."),
+    EXIT_REASON(VMX_EXIT_ENCLS                  ,  60, "ENCLS instruction."),
+    EXIT_REASON(VMX_EXIT_RDSEED                 ,  61, "RDSEED instruction."),
+    EXIT_REASON(VMX_EXIT_PML_FULL               ,  62, "Page-modification log full."),
+    EXIT_REASON(VMX_EXIT_XSAVES                 ,  63, "XSAVES instruction."),
+    EXIT_REASON(VMX_EXIT_XRSTORS                ,  64, "XRSTORS instruction.")
+};
+/** Array index of the last valid VT-x exit reason. */
+#define MAX_EXITREASON_VTX                         64
+
+/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
+ *  statistics and exit history.
+ *
+ *  @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
+ *        this array doesn't contain the entire set of exit reasons, we
+ *        handle them via hmSvmGetSpecialExitReasonDesc(). */
+static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
+{
+    EXIT_REASON(SVM_EXIT_READ_CR0     ,    0, "Read CR0."),
+    EXIT_REASON(SVM_EXIT_READ_CR1     ,    1, "Read CR1."),
+    EXIT_REASON(SVM_EXIT_READ_CR2     ,    2, "Read CR2."),
+    EXIT_REASON(SVM_EXIT_READ_CR3     ,    3, "Read CR3."),
+    EXIT_REASON(SVM_EXIT_READ_CR4     ,    4, "Read CR4."),
+    EXIT_REASON(SVM_EXIT_READ_CR5     ,    5, "Read CR5."),
+    EXIT_REASON(SVM_EXIT_READ_CR6     ,    6, "Read CR6."),
+    EXIT_REASON(SVM_EXIT_READ_CR7     ,    7, "Read CR7."),
+    EXIT_REASON(SVM_EXIT_READ_CR8     ,    8, "Read CR8."),
+    EXIT_REASON(SVM_EXIT_READ_CR9     ,    9, "Read CR9."),
+    EXIT_REASON(SVM_EXIT_READ_CR10    ,   10, "Read CR10."),
+    EXIT_REASON(SVM_EXIT_READ_CR11    ,   11, "Read CR11."),
+    EXIT_REASON(SVM_EXIT_READ_CR12    ,   12, "Read CR12."),
+    EXIT_REASON(SVM_EXIT_READ_CR13    ,   13, "Read CR13."),
+    EXIT_REASON(SVM_EXIT_READ_CR14    ,   14, "Read CR14."),
+    EXIT_REASON(SVM_EXIT_READ_CR15    ,   15, "Read CR15."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR0    ,   16, "Write CR0."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR1    ,   17, "Write CR1."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR2    ,   18, "Write CR2."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR3    ,   19, "Write CR3."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR4    ,   20, "Write CR4."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR5    ,   21, "Write CR5."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR6    ,   22, "Write CR6."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR7    ,   23, "Write CR7."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR8    ,   24, "Write CR8."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR9    ,   25, "Write CR9."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR10   ,   26, "Write CR10."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR11   ,   27, "Write CR11."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR12   ,   28, "Write CR12."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR13   ,   29, "Write CR13."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR14   ,   30, "Write CR14."),
+    EXIT_REASON(SVM_EXIT_WRITE_CR15   ,   31, "Write CR15."),
+    EXIT_REASON(SVM_EXIT_READ_DR0     ,   32, "Read DR0."),
+    EXIT_REASON(SVM_EXIT_READ_DR1     ,   33, "Read DR1."),
+    EXIT_REASON(SVM_EXIT_READ_DR2     ,   34, "Read DR2."),
+    EXIT_REASON(SVM_EXIT_READ_DR3     ,   35, "Read DR3."),
+    EXIT_REASON(SVM_EXIT_READ_DR4     ,   36, "Read DR4."),
+    EXIT_REASON(SVM_EXIT_READ_DR5     ,   37, "Read DR5."),
+    EXIT_REASON(SVM_EXIT_READ_DR6     ,   38, "Read DR6."),
+    EXIT_REASON(SVM_EXIT_READ_DR7     ,   39, "Read DR7."),
+    EXIT_REASON(SVM_EXIT_READ_DR8     ,   40, "Read DR8."),
+    EXIT_REASON(SVM_EXIT_READ_DR9     ,   41, "Read DR9."),
+    EXIT_REASON(SVM_EXIT_READ_DR10    ,   42, "Read DR10."),
+    EXIT_REASON(SVM_EXIT_READ_DR11    ,   43, "Read DR11"),
+    EXIT_REASON(SVM_EXIT_READ_DR12    ,   44, "Read DR12."),
+    EXIT_REASON(SVM_EXIT_READ_DR13    ,   45, "Read DR13."),
+    EXIT_REASON(SVM_EXIT_READ_DR14    ,   46, "Read DR14."),
+    EXIT_REASON(SVM_EXIT_READ_DR15    ,   47, "Read DR15."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR0    ,   48, "Write DR0."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR1    ,   49, "Write DR1."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR2    ,   50, "Write DR2."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR3    ,   51, "Write DR3."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR4    ,   52, "Write DR4."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR5    ,   53, "Write DR5."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR6    ,   54, "Write DR6."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR7    ,   55, "Write DR7."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR8    ,   56, "Write DR8."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR9    ,   57, "Write DR9."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR10   ,   58, "Write DR10."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR11   ,   59, "Write DR11."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR12   ,   60, "Write DR12."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR13   ,   61, "Write DR13."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR14   ,   62, "Write DR14."),
+    EXIT_REASON(SVM_EXIT_WRITE_DR15   ,   63, "Write DR15."),
+    EXIT_REASON(SVM_EXIT_XCPT_0       ,   64, "Exception 0  (#DE)."),
+    EXIT_REASON(SVM_EXIT_XCPT_1       ,   65, "Exception 1  (#DB)."),
+    EXIT_REASON(SVM_EXIT_XCPT_2       ,   66, "Exception 2  (#NMI)."),
+    EXIT_REASON(SVM_EXIT_XCPT_3       ,   67, "Exception 3  (#BP)."),
+    EXIT_REASON(SVM_EXIT_XCPT_4       ,   68, "Exception 4  (#OF)."),
+    EXIT_REASON(SVM_EXIT_XCPT_5       ,   69, "Exception 5  (#BR)."),
+    EXIT_REASON(SVM_EXIT_XCPT_6       ,   70, "Exception 6  (#UD)."),
+    EXIT_REASON(SVM_EXIT_XCPT_7       ,   71, "Exception 7  (#NM)."),
+    EXIT_REASON(SVM_EXIT_XCPT_8       ,   72, "Exception 8  (#DF)."),
+    EXIT_REASON(SVM_EXIT_XCPT_9       ,   73, "Exception 9  (#CO_SEG_OVERRUN)."),
+    EXIT_REASON(SVM_EXIT_XCPT_10      ,   74, "Exception 10 (#TS)."),
+    EXIT_REASON(SVM_EXIT_XCPT_11      ,   75, "Exception 11 (#NP)."),
+    EXIT_REASON(SVM_EXIT_XCPT_12      ,   76, "Exception 12 (#SS)."),
+    EXIT_REASON(SVM_EXIT_XCPT_13      ,   77, "Exception 13 (#GP)."),
+    EXIT_REASON(SVM_EXIT_XCPT_14      ,   78, "Exception 14 (#PF)."),
+    EXIT_REASON(SVM_EXIT_XCPT_15      ,   79, "Exception 15 (0x0f)."),
+    EXIT_REASON(SVM_EXIT_XCPT_16      ,   80, "Exception 16 (#MF)."),
+    EXIT_REASON(SVM_EXIT_XCPT_17      ,   81, "Exception 17 (#AC)."),
+    EXIT_REASON(SVM_EXIT_XCPT_18      ,   82, "Exception 18 (#MC)."),
+    EXIT_REASON(SVM_EXIT_XCPT_19      ,   83, "Exception 19 (#XF)."),
+    EXIT_REASON(SVM_EXIT_XCPT_20      ,   84, "Exception 20 (#VE)."),
+    EXIT_REASON(SVM_EXIT_XCPT_21      ,   85, "Exception 22 (0x15)."),
+    EXIT_REASON(SVM_EXIT_XCPT_22      ,   86, "Exception 22 (0x16)."),
+    EXIT_REASON(SVM_EXIT_XCPT_23      ,   87, "Exception 23 (0x17)."),
+    EXIT_REASON(SVM_EXIT_XCPT_24      ,   88, "Exception 24 (0x18)."),
+    EXIT_REASON(SVM_EXIT_XCPT_25      ,   89, "Exception 25 (0x19)."),
+    EXIT_REASON(SVM_EXIT_XCPT_26      ,   90, "Exception 26 (0x1a)."),
+    EXIT_REASON(SVM_EXIT_XCPT_27      ,   91, "Exception 27 (0x1b)."),
+    EXIT_REASON(SVM_EXIT_XCPT_28      ,   92, "Exception 28 (0x1c)."),
+    EXIT_REASON(SVM_EXIT_XCPT_29      ,   93, "Exception 29 (0x1d)."),
+    EXIT_REASON(SVM_EXIT_XCPT_30      ,   94, "Exception 30 (#SX)."),
+    EXIT_REASON(SVM_EXIT_XCPT_31      ,   95, "Exception 31 (0x1F)."),
+    EXIT_REASON(SVM_EXIT_INTR         ,   96, "Physical maskable interrupt (host)."),
+    EXIT_REASON(SVM_EXIT_NMI          ,   97, "Physical non-maskable interrupt (host)."),
+    EXIT_REASON(SVM_EXIT_SMI          ,   98, "System management interrupt (host)."),
+    EXIT_REASON(SVM_EXIT_INIT         ,   99, "Physical INIT signal (host)."),
+    EXIT_REASON(SVM_EXIT_VINTR        ,  100, "Virtual interrupt-window exit."),
+    EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE,  101, "Selective CR0 Write (to bits other than CR0.TS and CR0.MP)."),
+    EXIT_REASON(SVM_EXIT_IDTR_READ    ,  102, "Read IDTR."),
+    EXIT_REASON(SVM_EXIT_GDTR_READ    ,  103, "Read GDTR."),
+    EXIT_REASON(SVM_EXIT_LDTR_READ    ,  104, "Read LDTR."),
+    EXIT_REASON(SVM_EXIT_TR_READ      ,  105, "Read TR."),
+    EXIT_REASON(SVM_EXIT_IDTR_WRITE   ,  106, "Write IDTR."),
+    EXIT_REASON(SVM_EXIT_GDTR_WRITE   ,  107, "Write GDTR."),
+    EXIT_REASON(SVM_EXIT_LDTR_WRITE   ,  108, "Write LDTR."),
+    EXIT_REASON(SVM_EXIT_TR_WRITE     ,  109, "Write TR."),
+    EXIT_REASON(SVM_EXIT_RDTSC        ,  110, "RDTSC instruction."),
+    EXIT_REASON(SVM_EXIT_RDPMC        ,  111, "RDPMC instruction."),
+    EXIT_REASON(SVM_EXIT_PUSHF        ,  112, "PUSHF instruction."),
+    EXIT_REASON(SVM_EXIT_POPF         ,  113, "POPF instruction."),
+    EXIT_REASON(SVM_EXIT_CPUID        ,  114, "CPUID instruction."),
+    EXIT_REASON(SVM_EXIT_RSM          ,  115, "RSM instruction."),
+    EXIT_REASON(SVM_EXIT_IRET         ,  116, "IRET instruction."),
+    EXIT_REASON(SVM_EXIT_SWINT        ,  117, "Software interrupt (INTn instructions)."),
+    EXIT_REASON(SVM_EXIT_INVD         ,  118, "INVD instruction."),
+    EXIT_REASON(SVM_EXIT_PAUSE        ,  119, "PAUSE instruction."),
+    EXIT_REASON(SVM_EXIT_HLT          ,  120, "HLT instruction."),
+    EXIT_REASON(SVM_EXIT_INVLPG       ,  121, "INVLPG instruction."),
+    EXIT_REASON(SVM_EXIT_INVLPGA      ,  122, "INVLPGA instruction."),
+    EXIT_REASON(SVM_EXIT_IOIO         ,  123, "IN/OUT/INS/OUTS instruction."),
+    EXIT_REASON(SVM_EXIT_MSR          ,  124, "RDMSR or WRMSR access to protected MSR."),
+    EXIT_REASON(SVM_EXIT_TASK_SWITCH  ,  125, "Task switch."),
+    EXIT_REASON(SVM_EXIT_FERR_FREEZE  ,  126, "FERR Freeze; CPU frozen in an x87/mmx instruction waiting for interrupt."),
+    EXIT_REASON(SVM_EXIT_SHUTDOWN     ,  127, "Shutdown."),
+    EXIT_REASON(SVM_EXIT_VMRUN        ,  128, "VMRUN instruction."),
+    EXIT_REASON(SVM_EXIT_VMMCALL      ,  129, "VMCALL instruction."),
+    EXIT_REASON(SVM_EXIT_VMLOAD       ,  130, "VMLOAD instruction."),
+    EXIT_REASON(SVM_EXIT_VMSAVE       ,  131, "VMSAVE instruction."),
+    EXIT_REASON(SVM_EXIT_STGI         ,  132, "STGI instruction."),
+    EXIT_REASON(SVM_EXIT_CLGI         ,  133, "CLGI instruction."),
+    EXIT_REASON(SVM_EXIT_SKINIT       ,  134, "SKINIT instruction."),
+    EXIT_REASON(SVM_EXIT_RDTSCP       ,  135, "RDTSCP instruction."),
+    EXIT_REASON(SVM_EXIT_ICEBP        ,  136, "ICEBP instruction."),
+    EXIT_REASON(SVM_EXIT_WBINVD       ,  137, "WBINVD instruction."),
+    EXIT_REASON(SVM_EXIT_MONITOR      ,  138, "MONITOR instruction."),
+    EXIT_REASON(SVM_EXIT_MWAIT        ,  139, "MWAIT instruction."),
+    EXIT_REASON(SVM_EXIT_MWAIT_ARMED  ,  140, "MWAIT instruction when armed."),
+    EXIT_REASON(SVM_EXIT_XSETBV       ,  141, "XSETBV instruction."),
+};
+/** Array index of the last valid AMD-V exit reason. */
+#define MAX_EXITREASON_AMDV              141
+
+/** Special exit reasons not covered in the array above. */
+#define SVM_EXIT_REASON_NPF                  EXIT_REASON(SVM_EXIT_NPF                , 1024, "Nested Page Fault.")
+#define SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI  EXIT_REASON(SVM_EXIT_AVIC_INCOMPLETE_IPI, 1025, "AVIC - Incomplete IPI delivery.")
+#define SVM_EXIT_REASON_AVIC_NOACCEL         EXIT_REASON(SVM_EXIT_AVIC_NOACCEL       , 1026, "AVIC - Unhandled register.")
+
+/**
+ * Gets the SVM exit reason if it's one of the reasons not present in the @c
+ * g_apszSvmExitReasons array.
+ *
+ * @returns The exit reason or NULL if unknown.
+ * @param   uExit       The exit.
+ */
+DECLINLINE(const char *) hmSvmGetSpecialExitReasonDesc(uint16_t uExit)
+{
+    switch (uExit)
+    {
+        case SVM_EXIT_NPF:                 return SVM_EXIT_REASON_NPF;
+        case SVM_EXIT_AVIC_INCOMPLETE_IPI: return SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI;
+        case SVM_EXIT_AVIC_NOACCEL:        return SVM_EXIT_REASON_AVIC_NOACCEL;
+    }
+    return EXIT_REASON_NIL();
+}
+#undef EXIT_REASON_NIL
+#undef EXIT_REASON
+
+
 /**
  * Checks whether HM (VT-x/AMD-V) is being used by this VM.
@@ -86,5 +345,5 @@
     }
 
-    return HMVmxCanExecuteGuest(pVCpu, pCtx);
+    return HMCanExecuteVmxGuest(pVCpu, pCtx);
 }
 
@@ -192,7 +451,7 @@
  * @param   pVCpu       The cross context virtual CPU structure.
  */
-VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
-{
-    LogFlow(("HMFlushTLB\n"));
+VMM_INT_DECL(int) HMFlushTlb(PVMCPU pVCpu)
+{
+    LogFlow(("HMFlushTlb\n"));
 
     VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
@@ -277,8 +536,8 @@
  * @param   pVM       The cross context VM structure.
  */
-VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
+VMM_INT_DECL(int) HMFlushTlbOnAllVCpus(PVM pVM)
 {
     if (pVM->cCpus == 1)
-        return HMFlushTLB(&pVM->aCpus[0]);
+        return HMFlushTlb(&pVM->aCpus[0]);
 
     VMCPUID idThisCpu = VMMGetCpuId(pVM);
@@ -331,5 +590,5 @@
     /** @todo Remove or figure out to way to update the Phys STAT counter.  */
     /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
-    return HMFlushTLBOnAllVCpus(pVM);
+    return HMFlushTlbOnAllVCpus(pVM);
 }
 
@@ -429,19 +688,5 @@
 VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
 {
-    return HMIsVmxSupported(pVM) && HMIsEnabled(pVM);
-}
-
-
-/**
- * Checks if VT-x is supported by the host CPU.
- *
- * @returns true if VT-x is supported, false otherwise.
- * @param   pVM         The cross context VM structure.
- *
- * @remarks Works before hmR3InitFinalizeR0.
- */
-VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM)
-{
-    return pVM->hm.s.vmx.fSupported;
+    return pVM->hm.s.vmx.fSupported && HMIsEnabled(pVM);
 }
 
@@ -537,5 +782,5 @@
  * @param   enmGuestMode    New guest paging mode.
  */
-VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
+VMM_INT_DECL(void) HMHCChangedPagingMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
 {
 # ifdef IN_RING3
@@ -572,5 +817,5 @@
 # endif
 
-    Log4(("HMHCPagingModeChanged: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
+    Log4(("HMHCChangedPagingMode: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
           PGMGetModeName(enmShadowMode)));
 }
@@ -587,5 +832,5 @@
  * @param   pVmxMsrs        Where to store the VMX MSRs.
  */
-VMM_INT_DECL(void) HMVmxGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PVMXMSRS pVmxMsrs)
+VMM_INT_DECL(void) HMGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PVMXMSRS pVmxMsrs)
 {
     AssertReturnVoid(pHwvirtMsrs);
@@ -622,5 +867,5 @@
  * @param   pSvmMsrs        Where to store the SVM MSRs.
  */
-VMM_INT_DECL(void) HMVmxGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PSVMMSRS pSvmMsrs)
+VMM_INT_DECL(void) HMGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PSVMMSRS pSvmMsrs)
 {
     AssertReturnVoid(pHwvirtMsrs);
@@ -629,2 +874,36 @@
 }
 
+
+/**
+ * Gets the name of a VT-x exit code.
+ *
+ * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
+ * @param   uExit               The VT-x exit to name.
+ */
+VMM_INT_DECL(const char *) HMGetVmxExitName(uint32_t uExit)
+{
+    if (uExit <= MAX_EXITREASON_VTX)
+    {
+        Assert(uExit < RT_ELEMENTS(g_apszVmxExitReasons));
+        return g_apszVmxExitReasons[uExit];
+    }
+    return NULL;
+}
+
+
+/**
+ * Gets the name of an AMD-V exit code.
+ *
+ * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
+ * @param   uExit               The AMD-V exit to name.
+ */
+VMM_INT_DECL(const char *) HMGetSvmExitName(uint32_t uExit)
+{
+    if (uExit <= MAX_EXITREASON_AMDV)
+    {
+        Assert(uExit < RT_ELEMENTS(g_apszSvmExitReasons));
+        return g_apszSvmExitReasons[uExit];
+    }
+    return hmSvmGetSpecialExitReasonDesc(uExit);
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 76993)
@@ -130,5 +130,5 @@
  * @sa      hmR0SvmVmRunCacheVmcb.
  */
-VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
+VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
     PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
@@ -184,5 +184,5 @@
  *          to change throughout the lifetime of the VM.
  */
-VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM)
+VMM_INT_DECL(bool) HMIsSvmVGifActive(PVM pVM)
 {
     bool const fVGif    = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
@@ -209,5 +209,5 @@
  * @sa      CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
  */
-VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks)
+VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
 {
     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
@@ -229,5 +229,5 @@
  * @param   pVCpu               The cross context virtual CPU structure.
  */
-VMM_INT_DECL(int) HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu)
+VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCPU pVCpu)
 {
     PVM pVM = pVCpu->CTX_SUFF(pVM);
@@ -252,5 +252,5 @@
  * @returns true if the erratum applies, false otherwise.
  */
-VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
+VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
 {
     /*
@@ -308,5 +308,5 @@
  *                      returned in @a pbOffMsrpm.
  */
-VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
+VMM_INT_DECL(int) HMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
 {
     Assert(pbOffMsrpm);
@@ -372,5 +372,5 @@
  *                          Optional, can be NULL.
  */
-VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
+VMM_INT_DECL(bool) HMIsSvmIoInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
                                             uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
                                             PSVMIOIOEXITINFO pIoExitInfo)
Index: /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 76993)
@@ -370,5 +370,5 @@
  * @param   enmDiag    The VMX diagnostic.
  */
-VMM_INT_DECL(const char *) HMVmxGetDiagDesc(VMXVDIAG enmDiag)
+VMM_INT_DECL(const char *) HMGetVmxDiagDesc(VMXVDIAG enmDiag)
 {
     if (RT_LIKELY((unsigned)enmDiag < RT_ELEMENTS(g_apszVmxVDiagDesc)))
@@ -384,5 +384,5 @@
  * @param   enmAbort    The VMX abort reason.
  */
-VMM_INT_DECL(const char *) HMVmxGetAbortDesc(VMXABORT enmAbort)
+VMM_INT_DECL(const char *) HMGetVmxAbortDesc(VMXABORT enmAbort)
 {
     switch (enmAbort)
@@ -408,5 +408,5 @@
  * @param   fVmcsState      The virtual-VMCS state.
  */
-VMM_INT_DECL(const char *) HMVmxGetVmcsStateDesc(uint8_t fVmcsState)
+VMM_INT_DECL(const char *) HMGetVmxVmcsStateDesc(uint8_t fVmcsState)
 {
     switch (fVmcsState)
@@ -425,5 +425,5 @@
  * @param   uType    The event type.
  */
-VMM_INT_DECL(const char *) HMVmxGetEntryIntInfoTypeDesc(uint8_t uType)
+VMM_INT_DECL(const char *) HMGetVmxEntryIntInfoTypeDesc(uint8_t uType)
 {
     switch (uType)
@@ -449,5 +449,5 @@
  * @param   uType    The event type.
  */
-VMM_INT_DECL(const char *) HMVmxGetExitIntInfoTypeDesc(uint8_t uType)
+VMM_INT_DECL(const char *) HMGetVmxExitIntInfoTypeDesc(uint8_t uType)
 {
     switch (uType)
@@ -472,5 +472,5 @@
  * @param   uType    The event type.
  */
-VMM_INT_DECL(const char *) HMVmxGetIdtVectoringInfoTypeDesc(uint8_t uType)
+VMM_INT_DECL(const char *) HMGetVmxIdtVectoringInfoTypeDesc(uint8_t uType)
 {
     switch (uType)
@@ -649,5 +649,5 @@
  *          state, make sure REM (which supplies a partial state) is updated.
  */
-VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
+VMM_INT_DECL(bool) HMCanExecuteVmxGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
 {
     PVM pVM = pVCpu->CTX_SUFF(pVM);
@@ -831,5 +831,5 @@
  *                          NULL.
  */
-VMM_INT_DECL(int) HMVmxGetMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
+VMM_INT_DECL(int) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
                                         PVMXMSREXITWRITE penmWrite)
 {
@@ -906,5 +906,5 @@
  * @param   cbAccess        The size of the I/O access in bytes (1, 2 or 4 bytes).
  */
-VMM_INT_DECL(bool) HMVmxGetIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
+VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
                                                 uint8_t cbAccess)
 {
@@ -929,2 +929,279 @@
 }
 
+
+/**
+ * Dumps the virtual VMCS state to the release log.
+ *
+ * @param   pVCpu   The cross context virtual CPU structure.
+ */
+VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu)
+{
+#ifndef IN_RC
+    /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */
+# define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+    do { \
+        LogRel(("  %s%-4s                       = {base=%016RX64}\n", \
+            (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u)); \
+    } while (0)
+# define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+    do { \
+        LogRel(("  %s%-4s                       = {%04x base=%016RX64}\n", \
+                (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u)); \
+    } while (0)
+# define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+    do { \
+        LogRel(("  %s%-4s                       = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \
+                (a_pszPrefix), (a_SegName), (a_pVmcs)->Guest##a_Seg, (a_pVmcs)->u64Guest##a_Seg##Base.u, \
+                (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr)); \
+    } while (0)
+# define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
+    do { \
+        LogRel(("  %s%-4s                       = {base=%016RX64 limit=%08x}\n", \
+                (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Guest##a_Seg##Base.u, (a_pVmcs)->u32Guest##a_Seg##Limit)); \
+    } while (0)
+
+    PCCPUMCTX  pCtx  = &pVCpu->cpum.GstCtx;
+    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
+    if (!pVmcs)
+    {
+        LogRel(("Virtual VMCS not allocated\n"));
+        return;
+    }
+    LogRel(("GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon));
+    LogRel(("GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs));
+    LogRel(("GCPhysShadowVmcs           = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysShadowVmcs));
+    LogRel(("enmDiag                    = %u (%s)\n",   pCtx->hwvirt.vmx.enmDiag, HMGetVmxDiagDesc(pCtx->hwvirt.vmx.enmDiag)));
+    LogRel(("enmAbort                   = %u (%s)\n",   pCtx->hwvirt.vmx.enmAbort, HMGetVmxAbortDesc(pCtx->hwvirt.vmx.enmAbort)));
+    LogRel(("uAbortAux                  = %u (%#x)\n",  pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux));
+    LogRel(("fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode));
+    LogRel(("fInVmxNonRootMode          = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxNonRootMode));
+    LogRel(("fInterceptEvents           = %RTbool\n",   pCtx->hwvirt.vmx.fInterceptEvents));
+    LogRel(("fNmiUnblockingIret         = %RTbool\n",   pCtx->hwvirt.vmx.fNmiUnblockingIret));
+    LogRel(("uFirstPauseLoopTick        = %RX64\n",     pCtx->hwvirt.vmx.uFirstPauseLoopTick));
+    LogRel(("uPrevPauseTick             = %RX64\n",     pCtx->hwvirt.vmx.uPrevPauseTick));
+    LogRel(("uVmentryTick               = %RX64\n",     pCtx->hwvirt.vmx.uVmentryTick));
+    LogRel(("offVirtApicWrite           = %#RX16\n",    pCtx->hwvirt.vmx.offVirtApicWrite));
+    LogRel(("VMCS cache:\n"));
+
+    const char *pszPrefix = "  ";
+    /* Header. */
+    {
+        LogRel(("%sHeader:\n", pszPrefix));
+        LogRel(("  %sVMCS revision id           = %#RX32\n",      pszPrefix, pVmcs->u32VmcsRevId));
+        LogRel(("  %sVMX-abort id               = %#RX32 (%s)\n", pszPrefix, pVmcs->enmVmxAbort, HMGetVmxAbortDesc(pVmcs->enmVmxAbort)));
+        LogRel(("  %sVMCS state                 = %#x (%s)\n",    pszPrefix, pVmcs->fVmcsState, HMGetVmxVmcsStateDesc(pVmcs->fVmcsState)));
+    }
+
+    /* Control fields. */
+    {
+        /* 16-bit. */
+        LogRel(("%sControl:\n", pszPrefix));
+        LogRel(("  %sVPID                       = %#RX16\n",   pszPrefix, pVmcs->u16Vpid));
+        LogRel(("  %sPosted intr notify vector  = %#RX16\n",   pszPrefix, pVmcs->u16PostIntNotifyVector));
+        LogRel(("  %sEPTP index                 = %#RX16\n",   pszPrefix, pVmcs->u16EptpIndex));
+
+        /* 32-bit. */
+        LogRel(("  %sPinCtls                    = %#RX32\n",   pszPrefix, pVmcs->u32PinCtls));
+        LogRel(("  %sProcCtls                   = %#RX32\n",   pszPrefix, pVmcs->u32ProcCtls));
+        LogRel(("  %sProcCtls2                  = %#RX32\n",   pszPrefix, pVmcs->u32ProcCtls2));
+        LogRel(("  %sExitCtls                   = %#RX32\n",   pszPrefix, pVmcs->u32ExitCtls));
+        LogRel(("  %sEntryCtls                  = %#RX32\n",   pszPrefix, pVmcs->u32EntryCtls));
+        LogRel(("  %sException bitmap           = %#RX32\n",   pszPrefix, pVmcs->u32XcptBitmap));
+        LogRel(("  %sPage-fault mask            = %#RX32\n",   pszPrefix, pVmcs->u32XcptPFMask));
+        LogRel(("  %sPage-fault match           = %#RX32\n",   pszPrefix, pVmcs->u32XcptPFMatch));
+        LogRel(("  %sCR3-target count           = %RU32\n",    pszPrefix, pVmcs->u32Cr3TargetCount));
+        LogRel(("  %sVM-exit MSR store count    = %RU32\n",    pszPrefix, pVmcs->u32ExitMsrStoreCount));
+        LogRel(("  %sVM-exit MSR load count     = %RU32\n",    pszPrefix, pVmcs->u32ExitMsrLoadCount));
+        LogRel(("  %sVM-entry MSR load count    = %RU32\n",    pszPrefix, pVmcs->u32EntryMsrLoadCount));
+        LogRel(("  %sVM-entry interruption info = %#RX32\n",   pszPrefix, pVmcs->u32EntryIntInfo));
+        {
+            uint32_t const fInfo = pVmcs->u32EntryIntInfo;
+            uint8_t  const uType = VMX_ENTRY_INT_INFO_TYPE(fInfo);
+            LogRel(("    %sValid                      = %RTbool\n",  pszPrefix, VMX_ENTRY_INT_INFO_IS_VALID(fInfo)));
+            LogRel(("    %sType                       = %#x (%s)\n", pszPrefix, uType, HMGetVmxEntryIntInfoTypeDesc(uType)));
+            LogRel(("    %sVector                     = %#x\n",      pszPrefix, VMX_ENTRY_INT_INFO_VECTOR(fInfo)));
+            LogRel(("    %sNMI-unblocking-IRET        = %RTbool\n",  pszPrefix, VMX_ENTRY_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo)));
+            LogRel(("    %sError-code valid           = %RTbool\n",  pszPrefix, VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(fInfo)));
+        }
+        LogRel(("  %sVM-entry xcpt error-code   = %#RX32\n",   pszPrefix, pVmcs->u32EntryXcptErrCode));
+        LogRel(("  %sVM-entry instruction len   = %u bytes\n", pszPrefix, pVmcs->u32EntryInstrLen));
+        LogRel(("  %sTPR threshold              = %#RX32\n",   pszPrefix, pVmcs->u32TprThreshold));
+        LogRel(("  %sPLE gap                    = %#RX32\n",   pszPrefix, pVmcs->u32PleGap));
+        LogRel(("  %sPLE window                 = %#RX32\n",   pszPrefix, pVmcs->u32PleWindow));
+
+        /* 64-bit. */
+        LogRel(("  %sIO-bitmap A addr           = %#RX64\n",   pszPrefix, pVmcs->u64AddrIoBitmapA.u));
+        LogRel(("  %sIO-bitmap B addr           = %#RX64\n",   pszPrefix, pVmcs->u64AddrIoBitmapB.u));
+        LogRel(("  %sMSR-bitmap addr            = %#RX64\n",   pszPrefix, pVmcs->u64AddrMsrBitmap.u));
+        LogRel(("  %sVM-exit MSR store addr     = %#RX64\n",   pszPrefix, pVmcs->u64AddrExitMsrStore.u));
+        LogRel(("  %sVM-exit MSR load addr      = %#RX64\n",   pszPrefix, pVmcs->u64AddrExitMsrLoad.u));
+        LogRel(("  %sVM-entry MSR load addr     = %#RX64\n",   pszPrefix, pVmcs->u64AddrEntryMsrLoad.u));
+        LogRel(("  %sExecutive VMCS ptr         = %#RX64\n",   pszPrefix, pVmcs->u64ExecVmcsPtr.u));
+        LogRel(("  %sPML addr                   = %#RX64\n",   pszPrefix, pVmcs->u64AddrPml.u));
+        LogRel(("  %sTSC offset                 = %#RX64\n",   pszPrefix, pVmcs->u64TscOffset.u));
+        LogRel(("  %sVirtual-APIC addr          = %#RX64\n",   pszPrefix, pVmcs->u64AddrVirtApic.u));
+        LogRel(("  %sAPIC-access addr           = %#RX64\n",   pszPrefix, pVmcs->u64AddrApicAccess.u));
+        LogRel(("  %sPosted-intr desc addr      = %#RX64\n",   pszPrefix, pVmcs->u64AddrPostedIntDesc.u));
+        LogRel(("  %sVM-functions control       = %#RX64\n",   pszPrefix, pVmcs->u64VmFuncCtls.u));
+        LogRel(("  %sEPTP ptr                   = %#RX64\n",   pszPrefix, pVmcs->u64EptpPtr.u));
+        LogRel(("  %sEOI-exit bitmap 0 addr     = %#RX64\n",   pszPrefix, pVmcs->u64EoiExitBitmap0.u));
+        LogRel(("  %sEOI-exit bitmap 1 addr     = %#RX64\n",   pszPrefix, pVmcs->u64EoiExitBitmap1.u));
+        LogRel(("  %sEOI-exit bitmap 2 addr     = %#RX64\n",   pszPrefix, pVmcs->u64EoiExitBitmap2.u));
+        LogRel(("  %sEOI-exit bitmap 3 addr     = %#RX64\n",   pszPrefix, pVmcs->u64EoiExitBitmap3.u));
+        LogRel(("  %sEPTP-list addr             = %#RX64\n",   pszPrefix, pVmcs->u64AddrEptpList.u));
+        LogRel(("  %sVMREAD-bitmap addr         = %#RX64\n",   pszPrefix, pVmcs->u64AddrVmreadBitmap.u));
+        LogRel(("  %sVMWRITE-bitmap addr        = %#RX64\n",   pszPrefix, pVmcs->u64AddrVmwriteBitmap.u));
+        LogRel(("  %sVirt-Xcpt info addr        = %#RX64\n",   pszPrefix, pVmcs->u64AddrXcptVeInfo.u));
+        LogRel(("  %sXSS-bitmap                 = %#RX64\n",   pszPrefix, pVmcs->u64XssBitmap.u));
+        LogRel(("  %sENCLS-exiting bitmap addr  = %#RX64\n",   pszPrefix, pVmcs->u64AddrEnclsBitmap.u));
+        LogRel(("  %sTSC multiplier             = %#RX64\n",   pszPrefix, pVmcs->u64TscMultiplier.u));
+
+        /* Natural width. */
+        LogRel(("  %sCR0 guest/host mask        = %#RX64\n",   pszPrefix, pVmcs->u64Cr0Mask.u));
+        LogRel(("  %sCR4 guest/host mask        = %#RX64\n",   pszPrefix, pVmcs->u64Cr4Mask.u));
+        LogRel(("  %sCR0 read shadow            = %#RX64\n",   pszPrefix, pVmcs->u64Cr0ReadShadow.u));
+        LogRel(("  %sCR4 read shadow            = %#RX64\n",   pszPrefix, pVmcs->u64Cr4ReadShadow.u));
+        LogRel(("  %sCR3-target 0               = %#RX64\n",   pszPrefix, pVmcs->u64Cr3Target0.u));
+        LogRel(("  %sCR3-target 1               = %#RX64\n",   pszPrefix, pVmcs->u64Cr3Target1.u));
+        LogRel(("  %sCR3-target 2               = %#RX64\n",   pszPrefix, pVmcs->u64Cr3Target2.u));
+        LogRel(("  %sCR3-target 3               = %#RX64\n",   pszPrefix, pVmcs->u64Cr3Target3.u));
+    }
+
+    /* Guest state. */
+    {
+        LogRel(("%sGuest state:\n", pszPrefix));
+
+        /* 16-bit. */
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Cs,   "cs",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ss,   "ss",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Es,   "es",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ds,   "ds",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Fs,   "fs",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Gs,   "gs",   pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ldtr, "ldtr", pszPrefix);
+        HMVMX_DUMP_GUEST_SEGREG(pVmcs, Tr,   "tr",   pszPrefix);
+        HMVMX_DUMP_GUEST_XDTR(  pVmcs, Gdtr, "gdtr", pszPrefix);
+        HMVMX_DUMP_GUEST_XDTR(  pVmcs, Idtr, "idtr", pszPrefix);
+        LogRel(("  %sInterrupt status           = %#RX16\n",   pszPrefix, pVmcs->u16GuestIntStatus));
+        LogRel(("  %sPML index                  = %#RX16\n",   pszPrefix, pVmcs->u16PmlIndex));
+
+        /* 32-bit. */
+        LogRel(("  %sInterruptibility state     = %#RX32\n",   pszPrefix, pVmcs->u32GuestIntrState));
+        LogRel(("  %sActivity state             = %#RX32\n",   pszPrefix, pVmcs->u32GuestActivityState));
+        LogRel(("  %sSMBASE                     = %#RX32\n",   pszPrefix, pVmcs->u32GuestSmBase));
+        LogRel(("  %sSysEnter CS                = %#RX32\n",   pszPrefix, pVmcs->u32GuestSysenterCS));
+        LogRel(("  %sVMX-preemption timer value = %#RX32\n",   pszPrefix, pVmcs->u32PreemptTimer));
+
+        /* 64-bit. */
+        LogRel(("  %sVMCS link ptr              = %#RX64\n",   pszPrefix, pVmcs->u64VmcsLinkPtr.u));
+        LogRel(("  %sDBGCTL                     = %#RX64\n",   pszPrefix, pVmcs->u64GuestDebugCtlMsr.u));
+        LogRel(("  %sPAT                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestPatMsr.u));
+        LogRel(("  %sEFER                       = %#RX64\n",   pszPrefix, pVmcs->u64GuestEferMsr.u));
+        LogRel(("  %sPERFGLOBALCTRL             = %#RX64\n",   pszPrefix, pVmcs->u64GuestPerfGlobalCtlMsr.u));
+        LogRel(("  %sPDPTE 0                    = %#RX64\n",   pszPrefix, pVmcs->u64GuestPdpte0.u));
+        LogRel(("  %sPDPTE 1                    = %#RX64\n",   pszPrefix, pVmcs->u64GuestPdpte1.u));
+        LogRel(("  %sPDPTE 2                    = %#RX64\n",   pszPrefix, pVmcs->u64GuestPdpte2.u));
+        LogRel(("  %sPDPTE 3                    = %#RX64\n",   pszPrefix, pVmcs->u64GuestPdpte3.u));
+        LogRel(("  %sBNDCFGS                    = %#RX64\n",   pszPrefix, pVmcs->u64GuestBndcfgsMsr.u));
+
+        /* Natural width. */
+        LogRel(("  %scr0                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestCr0.u));
+        LogRel(("  %scr3                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestCr3.u));
+        LogRel(("  %scr4                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestCr4.u));
+        LogRel(("  %sdr7                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestDr7.u));
+        LogRel(("  %srsp                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestRsp.u));
+        LogRel(("  %srip                        = %#RX64\n",   pszPrefix, pVmcs->u64GuestRip.u));
+        LogRel(("  %srflags                     = %#RX64\n",   pszPrefix, pVmcs->u64GuestRFlags.u));
+        LogRel(("  %sPending debug xcpts        = %#RX64\n",   pszPrefix, pVmcs->u64GuestPendingDbgXcpt.u));
+        LogRel(("  %sSysEnter ESP               = %#RX64\n",   pszPrefix, pVmcs->u64GuestSysenterEsp.u));
+        LogRel(("  %sSysEnter EIP               = %#RX64\n",   pszPrefix, pVmcs->u64GuestSysenterEip.u));
+    }
+
+    /* Host state. */
+    {
+        LogRel(("%sHost state:\n", pszPrefix));
+
+        /* 16-bit. */
+        LogRel(("  %scs                         = %#RX16\n",   pszPrefix, pVmcs->HostCs));
+        LogRel(("  %sss                         = %#RX16\n",   pszPrefix, pVmcs->HostSs));
+        LogRel(("  %sds                         = %#RX16\n",   pszPrefix, pVmcs->HostDs));
+        LogRel(("  %ses                         = %#RX16\n",   pszPrefix, pVmcs->HostEs));
+        HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Fs, "fs", pszPrefix);
+        HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Gs, "gs", pszPrefix);
+        HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Tr, "tr", pszPrefix);
+        HMVMX_DUMP_HOST_XDTR(pVmcs, Gdtr, "gdtr", pszPrefix);
+        HMVMX_DUMP_HOST_XDTR(pVmcs, Idtr, "idtr", pszPrefix);
+
+        /* 32-bit. */
+        LogRel(("  %sSysEnter CS                = %#RX32\n",   pszPrefix, pVmcs->u32HostSysenterCs));
+
+        /* 64-bit. */
+        LogRel(("  %sEFER                       = %#RX64\n",   pszPrefix, pVmcs->u64HostEferMsr.u));
+        LogRel(("  %sPAT                        = %#RX64\n",   pszPrefix, pVmcs->u64HostPatMsr.u));
+        LogRel(("  %sPERFGLOBALCTRL             = %#RX64\n",   pszPrefix, pVmcs->u64HostPerfGlobalCtlMsr.u));
+
+        /* Natural width. */
+        LogRel(("  %scr0                        = %#RX64\n",   pszPrefix, pVmcs->u64HostCr0.u));
+        LogRel(("  %scr3                        = %#RX64\n",   pszPrefix, pVmcs->u64HostCr3.u));
+        LogRel(("  %scr4                        = %#RX64\n",   pszPrefix, pVmcs->u64HostCr4.u));
+        LogRel(("  %sSysEnter ESP               = %#RX64\n",   pszPrefix, pVmcs->u64HostSysenterEsp.u));
+        LogRel(("  %sSysEnter EIP               = %#RX64\n",   pszPrefix, pVmcs->u64HostSysenterEip.u));
+        LogRel(("  %srsp                        = %#RX64\n",   pszPrefix, pVmcs->u64HostRsp.u));
+        LogRel(("  %srip                        = %#RX64\n",   pszPrefix, pVmcs->u64HostRip.u));
+    }
+
+    /* Read-only fields. */
+    {
+        LogRel(("%sRead-only data fields:\n", pszPrefix));
+
+        /* 16-bit (none currently). */
+
+        /* 32-bit. */
+        uint32_t const uExitReason = pVmcs->u32RoExitReason;
+        LogRel(("  %sExit reason                = %u (%s)\n",  pszPrefix, uExitReason, HMGetVmxExitName(uExitReason)));
+        LogRel(("  %sExit qualification         = %#RX64\n",   pszPrefix, pVmcs->u64RoExitQual.u));
+        LogRel(("  %sVM-instruction error       = %#RX32\n",   pszPrefix, pVmcs->u32RoVmInstrError));
+        LogRel(("  %sVM-exit intr info          = %#RX32\n",   pszPrefix, pVmcs->u32RoExitIntInfo));
+        {
+            uint32_t const fInfo = pVmcs->u32RoExitIntInfo;
+            uint8_t  const uType = VMX_EXIT_INT_INFO_TYPE(fInfo);
+            LogRel(("    %sValid                      = %RTbool\n",  pszPrefix, VMX_EXIT_INT_INFO_IS_VALID(fInfo)));
+            LogRel(("    %sType                       = %#x (%s)\n", pszPrefix, uType, HMGetVmxExitIntInfoTypeDesc(uType)));
+            LogRel(("    %sVector                     = %#x\n",      pszPrefix, VMX_EXIT_INT_INFO_VECTOR(fInfo)));
+            LogRel(("    %sNMI-unblocking-IRET        = %RTbool\n",  pszPrefix, VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo)));
+            LogRel(("    %sError-code valid           = %RTbool\n",  pszPrefix, VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(fInfo)));
+        }
+        LogRel(("  %sVM-exit intr error-code    = %#RX32\n",   pszPrefix, pVmcs->u32RoExitIntErrCode));
+        LogRel(("  %sIDT-vectoring info         = %#RX32\n",   pszPrefix, pVmcs->u32RoIdtVectoringInfo));
+        {
+            uint32_t const fInfo = pVmcs->u32RoIdtVectoringInfo;
+            uint8_t  const uType = VMX_IDT_VECTORING_INFO_TYPE(fInfo);
+            LogRel(("    %sValid                      = %RTbool\n",  pszPrefix, VMX_IDT_VECTORING_INFO_IS_VALID(fInfo)));
+            LogRel(("    %sType                       = %#x (%s)\n", pszPrefix, uType, HMGetVmxIdtVectoringInfoTypeDesc(uType)));
+            LogRel(("    %sVector                     = %#x\n",      pszPrefix, VMX_IDT_VECTORING_INFO_VECTOR(fInfo)));
+            LogRel(("    %sError-code valid           = %RTbool\n",  pszPrefix, VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(fInfo)));
+        }
+        LogRel(("  %sIDT-vectoring error-code   = %#RX32\n",   pszPrefix, pVmcs->u32RoIdtVectoringErrCode));
+        LogRel(("  %sVM-exit instruction length = %u bytes\n", pszPrefix, pVmcs->u32RoExitInstrLen));
+        LogRel(("  %sVM-exit instruction info   = %#RX64\n",   pszPrefix, pVmcs->u32RoExitInstrInfo));
+
+        /* 64-bit. */
+        LogRel(("  %sGuest-physical addr        = %#RX64\n",   pszPrefix, pVmcs->u64RoGuestPhysAddr.u));
+
+        /* Natural width. */
+        LogRel(("  %sI/O RCX                    = %#RX64\n",   pszPrefix, pVmcs->u64RoIoRcx.u));
+        LogRel(("  %sI/O RSI                    = %#RX64\n",   pszPrefix, pVmcs->u64RoIoRsi.u));
+        LogRel(("  %sI/O RDI                    = %#RX64\n",   pszPrefix, pVmcs->u64RoIoRdi.u));
+        LogRel(("  %sI/O RIP                    = %#RX64\n",   pszPrefix, pVmcs->u64RoIoRip.u));
+        LogRel(("  %sGuest-linear addr          = %#RX64\n",   pszPrefix, pVmcs->u64RoGuestLinearAddr.u));
+    }
+
+# undef HMVMX_DUMP_HOST_XDTR
+# undef HMVMX_DUMP_HOST_FS_GS_TR
+# undef HMVMX_DUMP_GUEST_SEGREG
+# undef HMVMX_DUMP_GUEST_XDTR
+#else
+    NOREF(pVCpu);
+#endif /* !IN_RC */
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 76993)
@@ -160,5 +160,5 @@
              * writing the VMCB back to guest memory.
              */
-            HMSvmNstGstVmExitNotify(pVCpu, IEM_GET_CTX(pVCpu));
+            HMNotifySvmNstGstVmexit(pVCpu, IEM_GET_CTX(pVCpu));
 
             Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
@@ -341,5 +341,5 @@
     /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
     if (   HMIsEnabled(pVCpu->CTX_SUFF(pVM))
-        && HMSvmIsVGifActive(pVCpu->CTX_SUFF(pVM)))
+        && HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)))
         return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
 # endif
@@ -842,5 +842,5 @@
         /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
         if (   HMIsEnabled(pVM)
-            && HMSvmIsVGifActive(pVM))
+            && HMIsSvmVGifActive(pVM))
             return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
 # endif
@@ -990,5 +990,5 @@
     SVMIOIOEXITINFO IoExitInfo;
     void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap);
-    bool const fIntercept = HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep,
+    bool const fIntercept = HMIsSvmIoInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep,
                                                        fStrIo, &IoExitInfo);
     if (fIntercept)
@@ -1039,5 +1039,5 @@
     uint16_t offMsrpm;
     uint8_t  uMsrpmBit;
-    int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
+    int rc = HMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
     if (RT_SUCCESS(rc))
     {
@@ -1417,5 +1417,5 @@
     if (VM_IS_HM_ENABLED(pVCpu->CTX_SUFF(pVM)))
     {
-        int rc = HMHCSvmMaybeMovTprHypercall(pVCpu);
+        int rc = HMHCMaybeMovTprSvmHypercall(pVCpu);
         if (RT_SUCCESS(rc))
         {
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 76993)
@@ -148,5 +148,5 @@
     { \
         Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
-            HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
+            HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
         (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
         return VERR_VMX_VMENTRY_FAILED; \
@@ -158,5 +158,5 @@
     { \
         Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
-            HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
+            HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
         (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
         return VERR_VMX_VMEXIT_FAILED; \
@@ -2039,5 +2039,5 @@
      * See Intel spec. 27.7 "VMX Aborts".
      */
-    LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
+    LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMGetVmxAbortDesc(enmAbort)));
 
     /* We don't support SMX yet. */
@@ -3073,5 +3073,5 @@
         Assert(pbIoBitmapA);
         Assert(pbIoBitmapB);
-        return HMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
+        return HMGetVmxIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
     }
 
@@ -7485,5 +7485,5 @@
         {
             VMXMSREXITREAD enmRead;
-            int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
+            int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
                                              NULL /* penmWrite */);
             AssertRC(rc);
@@ -7494,5 +7494,5 @@
         {
             VMXMSREXITWRITE enmWrite;
-            int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
+            int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
                                              &enmWrite);
             AssertRC(rc);
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 76993)
@@ -3422,5 +3422,5 @@
      * Notify HM.
      */
-    HMHCPagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
+    HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
     return rc;
 }
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 76993)
@@ -732,5 +732,5 @@
         PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
 #else
-    HMFlushTLBOnAllVCpus(pVM);
+    HMFlushTlbOnAllVCpus(pVM);
 #endif
 
Index: /trunk/src/VBox/VMM/VMMR0/HMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 76993)
@@ -1176,5 +1176,5 @@
         pVM->hm.s.vmx.u64HostEfer           = g_HmR0.hwvirt.u.vmx.u64HostEfer;
         pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl;
-        HMVmxGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs);
+        HMGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs);
     }
     else if (pVM->hm.s.svm.fSupported)
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 76993)
@@ -712,5 +712,5 @@
     uint32_t u32Model;
     uint32_t u32Stepping;
-    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
+    if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
     {
         Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
@@ -872,5 +872,5 @@
     uint16_t    offMsrpm;
     uint8_t     uMsrpmBit;
-    int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
+    int rc = HMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
     AssertRC(rc);
 
@@ -2453,5 +2453,5 @@
     {
         Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);    /* Hardware supports it. */
-        Assert(HMSvmIsVGifActive(pVCpu->CTX_SUFF(pVM)));                                        /* VM has configured it. */
+        Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)));                                        /* VM has configured it. */
         pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
     }
@@ -2543,5 +2543,5 @@
  * @param   pVCpu           The cross context virtual CPU structure.
  *
- * @sa      HMSvmNstGstVmExitNotify.
+ * @sa      HMNotifySvmNstGstVmexit.
  */
 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu)
@@ -2806,5 +2806,5 @@
             {
                 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));    /* We don't yet support passing VGIF feature to the guest. */
-                Assert(HMSvmIsVGifActive(pVCpu->CTX_SUFF(pVM)));    /* VM has configured it. */
+                Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)));    /* VM has configured it. */
                 CPUMSetGuestGif(pCtx, pVmcbCtrl->IntCtrl.n.u1VGif);
             }
@@ -3337,5 +3337,5 @@
         /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
         if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
-            uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset);
+            uTscOffset = HMApplySvmNstGstTscOffset(pVCpu, uTscOffset);
 #endif
 
@@ -4686,5 +4686,5 @@
  *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
  *
- * @sa      HMSvmNstGstApplyTscOffset().
+ * @sa      HMApplySvmNstGstTscOffset().
  */
 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks)
@@ -4727,5 +4727,5 @@
         else
         {
-            /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMSvmNstGstVmExitNotify(). */
+            /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
             uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
             TMCpuTickSetLastSeen(pVCpu, uGstTsc);
@@ -5167,5 +5167,5 @@
     const bool        fStrIo        = pIoExitInfo->n.u1Str;
 
-    return HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
+    return HMIsSvmIoInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
                                       NULL /* pIoExitInfo */);
 }
@@ -5259,5 +5259,5 @@
                 uint16_t offMsrpm;
                 uint8_t  uMsrpmBit;
-                int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
+                int rc = HMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
                 if (RT_SUCCESS(rc))
                 {
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 76993)
@@ -1756,6 +1756,6 @@
             VMXMSREXITREAD  enmRead;
             VMXMSREXITWRITE enmWrite;
-            rc = HMVmxGetMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, pGuestMsr->u32Msr, &enmRead, &enmWrite);
-            AssertMsgReturnVoid(rc == VINF_SUCCESS, ("HMVmxGetMsrPermission! failed. rc=%Rrc\n", rc));
+            rc = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, pGuestMsr->u32Msr, &enmRead, &enmWrite);
+            AssertMsgReturnVoid(rc == VINF_SUCCESS, ("HMGetVmxMsrPermission! failed. rc=%Rrc\n", rc));
             if (pGuestMsr->u32Msr == MSR_K6_EFER)
             {
@@ -11986,5 +11986,5 @@
             VMXMSREXITREAD  enmRead;
             VMXMSREXITWRITE enmWrite;
-            int rc2 = HMVmxGetMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
+            int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
             AssertRCReturn(rc2, rc2);
             if (enmRead == VMXMSREXIT_PASSTHRU_READ)
@@ -12131,5 +12131,5 @@
                         VMXMSREXITREAD  enmRead;
                         VMXMSREXITWRITE enmWrite;
-                        int rc2 = HMVmxGetMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
+                        int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
                         AssertRCReturn(rc2, rc2);
                         if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
@@ -13295,5 +13295,5 @@
              * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
              */
-            if (HMVmxCanExecuteGuest(pVCpu, pCtx))
+            if (HMCanExecuteVmxGuest(pVCpu, pCtx))
             {
                 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n"));
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 76993)
@@ -1763,7 +1763,7 @@
             {
                 if (fCaps & SUPVTCAPS_VT_X)
-                    HMVmxGetVmxMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.vmx);
+                    HMGetVmxMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.vmx);
                 else
-                    HMVmxGetSvmMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.svm);
+                    HMGetSvmMsrsFromHwvirtMsrs(&HwvirtMsrs, &pMsrs->hwvirt.svm);
                 return VINF_SUCCESS;
             }
@@ -3390,6 +3390,6 @@
         pHlp->pfnPrintf(pHlp, "%sHeader:\n", pszPrefix);
         pHlp->pfnPrintf(pHlp, "  %sVMCS revision id           = %#RX32\n",   pszPrefix, pVmcs->u32VmcsRevId);
-        pHlp->pfnPrintf(pHlp, "  %sVMX-abort id               = %#RX32 (%s)\n", pszPrefix, pVmcs->enmVmxAbort, HMVmxGetAbortDesc(pVmcs->enmVmxAbort));
-        pHlp->pfnPrintf(pHlp, "  %sVMCS state                 = %#x (%s)\n", pszPrefix, pVmcs->fVmcsState, HMVmxGetVmcsStateDesc(pVmcs->fVmcsState));
+        pHlp->pfnPrintf(pHlp, "  %sVMX-abort id               = %#RX32 (%s)\n", pszPrefix, pVmcs->enmVmxAbort, HMGetVmxAbortDesc(pVmcs->enmVmxAbort));
+        pHlp->pfnPrintf(pHlp, "  %sVMCS state                 = %#x (%s)\n", pszPrefix, pVmcs->fVmcsState, HMGetVmxVmcsStateDesc(pVmcs->fVmcsState));
     }
 
@@ -3420,5 +3420,5 @@
             uint8_t  const uType = VMX_ENTRY_INT_INFO_TYPE(fInfo);
             pHlp->pfnPrintf(pHlp, "    %sValid                      = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_VALID(fInfo));
-            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n", pszPrefix, uType, HMVmxGetEntryIntInfoTypeDesc(uType));
+            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n", pszPrefix, uType, HMGetVmxEntryIntInfoTypeDesc(uType));
             pHlp->pfnPrintf(pHlp, "    %sVector                     = %#x\n",     pszPrefix, VMX_ENTRY_INT_INFO_VECTOR(fInfo));
             pHlp->pfnPrintf(pHlp, "    %sNMI-unblocking-IRET        = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo));
@@ -3561,5 +3561,5 @@
 
         /* 32-bit. */
-        pHlp->pfnPrintf(pHlp, "  %sExit reason                = %u (%s)\n",  pszPrefix, pVmcs->u32RoExitReason, HMR3GetVmxExitName(pVmcs->u32RoExitReason));
+        pHlp->pfnPrintf(pHlp, "  %sExit reason                = %u (%s)\n",  pszPrefix, pVmcs->u32RoExitReason, HMGetVmxExitName(pVmcs->u32RoExitReason));
         pHlp->pfnPrintf(pHlp, "  %sExit qualification         = %#RX64\n",   pszPrefix, pVmcs->u64RoExitQual.u);
         pHlp->pfnPrintf(pHlp, "  %sVM-instruction error       = %#RX32\n",   pszPrefix, pVmcs->u32RoVmInstrError);
@@ -3569,5 +3569,5 @@
             uint8_t  const uType = VMX_EXIT_INT_INFO_TYPE(fInfo);
             pHlp->pfnPrintf(pHlp, "    %sValid                      = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_VALID(fInfo));
-            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n",     pszPrefix, uType, HMVmxGetExitIntInfoTypeDesc(uType));
+            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n",     pszPrefix, uType, HMGetVmxExitIntInfoTypeDesc(uType));
             pHlp->pfnPrintf(pHlp, "    %sVector                     = %#x\n",     pszPrefix, VMX_EXIT_INT_INFO_VECTOR(fInfo));
             pHlp->pfnPrintf(pHlp, "    %sNMI-unblocking-IRET        = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo));
@@ -3580,5 +3580,5 @@
             uint8_t  const uType = VMX_IDT_VECTORING_INFO_TYPE(fInfo);
             pHlp->pfnPrintf(pHlp, "    %sValid                      = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_VALID(fInfo));
-            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n",     pszPrefix, uType, HMVmxGetIdtVectoringInfoTypeDesc(uType));
+            pHlp->pfnPrintf(pHlp, "    %sType                       = %#x (%s)\n",     pszPrefix, uType, HMGetVmxIdtVectoringInfoTypeDesc(uType));
             pHlp->pfnPrintf(pHlp, "    %sVector                     = %#x\n",     pszPrefix, VMX_IDT_VECTORING_INFO_VECTOR(fInfo));
             pHlp->pfnPrintf(pHlp, "    %sError-code valid           = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(fInfo));
@@ -3707,6 +3707,6 @@
         pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
         pHlp->pfnPrintf(pHlp, "  GCPhysShadowVmcs           = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysShadowVmcs);
-        pHlp->pfnPrintf(pHlp, "  enmDiag                    = %u (%s)\n",   pCtx->hwvirt.vmx.enmDiag, HMVmxGetDiagDesc(pCtx->hwvirt.vmx.enmDiag));
-        pHlp->pfnPrintf(pHlp, "  enmAbort                   = %u (%s)\n",   pCtx->hwvirt.vmx.enmAbort, HMVmxGetAbortDesc(pCtx->hwvirt.vmx.enmAbort));
+        pHlp->pfnPrintf(pHlp, "  enmDiag                    = %u (%s)\n",   pCtx->hwvirt.vmx.enmDiag, HMGetVmxDiagDesc(pCtx->hwvirt.vmx.enmDiag));
+        pHlp->pfnPrintf(pHlp, "  enmAbort                   = %u (%s)\n",   pCtx->hwvirt.vmx.enmAbort, HMGetVmxAbortDesc(pCtx->hwvirt.vmx.enmAbort));
         pHlp->pfnPrintf(pHlp, "  uAbortAux                  = %u (%#x)\n",  pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux);
         pHlp->pfnPrintf(pHlp, "  fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode);
Index: /trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp	(revision 76993)
@@ -130,9 +130,9 @@
 
         case EMEXIT_F_KIND_VMX:
-            pszExitName = HMR3GetVmxExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
+            pszExitName = HMGetVmxExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
             break;
 
         case EMEXIT_F_KIND_SVM:
-            pszExitName = HMR3GetSvmExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
+            pszExitName = HMGetSvmExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK);
             break;
 
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 76993)
@@ -75,260 +75,6 @@
 
 /*********************************************************************************************************************************
-*   Global Variables                                                                                                             *
+*   Defined Constants And Macros                                                                                                 *
 *********************************************************************************************************************************/
-#define EXIT_REASON(def, val, str) #def " - " #val " - " str
-#define EXIT_REASON_NIL() NULL
-/** Exit reason descriptions for VT-x, used to describe statistics and exit
- *  history. */
-static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
-{
-    EXIT_REASON(VMX_EXIT_XCPT_OR_NMI            ,   0, "Exception or non-maskable interrupt (NMI)."),
-    EXIT_REASON(VMX_EXIT_EXT_INT                ,   1, "External interrupt."),
-    EXIT_REASON(VMX_EXIT_TRIPLE_FAULT           ,   2, "Triple fault."),
-    EXIT_REASON(VMX_EXIT_INIT_SIGNAL            ,   3, "INIT signal."),
-    EXIT_REASON(VMX_EXIT_SIPI                   ,   4, "Start-up IPI (SIPI)."),
-    EXIT_REASON(VMX_EXIT_IO_SMI_IRQ             ,   5, "I/O system-management interrupt (SMI)."),
-    EXIT_REASON(VMX_EXIT_SMI_IRQ                ,   6, "Other SMI."),
-    EXIT_REASON(VMX_EXIT_INT_WINDOW             ,   7, "Interrupt window."),
-    EXIT_REASON(VMX_EXIT_NMI_WINDOW             ,   8, "NMI window."),
-    EXIT_REASON(VMX_EXIT_TASK_SWITCH            ,   9, "Task switch."),
-    EXIT_REASON(VMX_EXIT_CPUID                  ,  10, "CPUID instruction."),
-    EXIT_REASON(VMX_EXIT_GETSEC                 ,  11, "GETSEC instrunction."),
-    EXIT_REASON(VMX_EXIT_HLT                    ,  12, "HLT instruction."),
-    EXIT_REASON(VMX_EXIT_INVD                   ,  13, "INVD instruction."),
-    EXIT_REASON(VMX_EXIT_INVLPG                 ,  14, "INVLPG instruction."),
-    EXIT_REASON(VMX_EXIT_RDPMC                  ,  15, "RDPMCinstruction."),
-    EXIT_REASON(VMX_EXIT_RDTSC                  ,  16, "RDTSC instruction."),
-    EXIT_REASON(VMX_EXIT_RSM                    ,  17, "RSM instruction in SMM."),
-    EXIT_REASON(VMX_EXIT_VMCALL                 ,  18, "VMCALL instruction."),
-    EXIT_REASON(VMX_EXIT_VMCLEAR                ,  19, "VMCLEAR instruction."),
-    EXIT_REASON(VMX_EXIT_VMLAUNCH               ,  20, "VMLAUNCH instruction."),
-    EXIT_REASON(VMX_EXIT_VMPTRLD                ,  21, "VMPTRLD instruction."),
-    EXIT_REASON(VMX_EXIT_VMPTRST                ,  22, "VMPTRST instruction."),
-    EXIT_REASON(VMX_EXIT_VMREAD                 ,  23, "VMREAD instruction."),
-    EXIT_REASON(VMX_EXIT_VMRESUME               ,  24, "VMRESUME instruction."),
-    EXIT_REASON(VMX_EXIT_VMWRITE                ,  25, "VMWRITE instruction."),
-    EXIT_REASON(VMX_EXIT_VMXOFF                 ,  26, "VMXOFF instruction."),
-    EXIT_REASON(VMX_EXIT_VMXON                  ,  27, "VMXON instruction."),
-    EXIT_REASON(VMX_EXIT_MOV_CRX                ,  28, "Control-register accesses."),
-    EXIT_REASON(VMX_EXIT_MOV_DRX                ,  29, "Debug-register accesses."),
-    EXIT_REASON(VMX_EXIT_PORT_IO                ,  30, "I/O instruction."),
-    EXIT_REASON(VMX_EXIT_RDMSR                  ,  31, "RDMSR instruction."),
-    EXIT_REASON(VMX_EXIT_WRMSR                  ,  32, "WRMSR instruction."),
-    EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE,  33, "VM-entry failure due to invalid guest state."),
-    EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD           ,  34, "VM-entry failure due to MSR loading."),
-    EXIT_REASON_NIL(),
-    EXIT_REASON(VMX_EXIT_MWAIT                  ,  36, "MWAIT instruction."),
-    EXIT_REASON(VMX_EXIT_MTF                    ,  37, "Monitor Trap Flag."),
-    EXIT_REASON_NIL(),
-    EXIT_REASON(VMX_EXIT_MONITOR                ,  39, "MONITOR instruction."),
-    EXIT_REASON(VMX_EXIT_PAUSE                  ,  40, "PAUSE instruction."),
-    EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK      ,  41, "VM-entry failure due to machine-check."),
-    EXIT_REASON_NIL(),
-    EXIT_REASON(VMX_EXIT_TPR_BELOW_THRESHOLD    ,  43, "TPR below threshold (MOV to CR8)."),
-    EXIT_REASON(VMX_EXIT_APIC_ACCESS            ,  44, "APIC access."),
-    EXIT_REASON(VMX_EXIT_VIRTUALIZED_EOI        ,  45, "Virtualized EOI."),
-    EXIT_REASON(VMX_EXIT_GDTR_IDTR_ACCESS       ,  46, "GDTR/IDTR access using LGDT/SGDT/LIDT/SIDT."),
-    EXIT_REASON(VMX_EXIT_LDTR_TR_ACCESS         ,  47, "LDTR/TR access using LLDT/SLDT/LTR/STR."),
-    EXIT_REASON(VMX_EXIT_EPT_VIOLATION          ,  48, "EPT violation."),
-    EXIT_REASON(VMX_EXIT_EPT_MISCONFIG          ,  49, "EPT misconfiguration."),
-    EXIT_REASON(VMX_EXIT_INVEPT                 ,  50, "INVEPT instruction."),
-    EXIT_REASON(VMX_EXIT_RDTSCP                 ,  51, "RDTSCP instruction."),
-    EXIT_REASON(VMX_EXIT_PREEMPT_TIMER          ,  52, "VMX-preemption timer expired."),
-    EXIT_REASON(VMX_EXIT_INVVPID                ,  53, "INVVPID instruction."),
-    EXIT_REASON(VMX_EXIT_WBINVD                 ,  54, "WBINVD instruction."),
-    EXIT_REASON(VMX_EXIT_XSETBV                 ,  55, "XSETBV instruction."),
-    EXIT_REASON(VMX_EXIT_APIC_WRITE             ,  56, "APIC write completed to virtual-APIC page."),
-    EXIT_REASON(VMX_EXIT_RDRAND                 ,  57, "RDRAND instruction."),
-    EXIT_REASON(VMX_EXIT_INVPCID                ,  58, "INVPCID instruction."),
-    EXIT_REASON(VMX_EXIT_VMFUNC                 ,  59, "VMFUNC instruction."),
-    EXIT_REASON(VMX_EXIT_ENCLS                  ,  60, "ENCLS instruction."),
-    EXIT_REASON(VMX_EXIT_RDSEED                 ,  61, "RDSEED instruction."),
-    EXIT_REASON(VMX_EXIT_PML_FULL               ,  62, "Page-modification log full."),
-    EXIT_REASON(VMX_EXIT_XSAVES                 ,  63, "XSAVES instruction."),
-    EXIT_REASON(VMX_EXIT_XRSTORS                ,  64, "XRSTORS instruction.")
-};
-/** Array index of the last valid VT-x exit reason. */
-#define MAX_EXITREASON_VTX                         64
-
-/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
- *  statistics and exit history.
- *
- *  @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
- *        this array doesn't contain the entire set of exit reasons, we
- *        handle them via hmSvmGetSpecialExitReasonDesc(). */
-static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
-{
-    EXIT_REASON(SVM_EXIT_READ_CR0     ,    0, "Read CR0."),
-    EXIT_REASON(SVM_EXIT_READ_CR1     ,    1, "Read CR1."),
-    EXIT_REASON(SVM_EXIT_READ_CR2     ,    2, "Read CR2."),
-    EXIT_REASON(SVM_EXIT_READ_CR3     ,    3, "Read CR3."),
-    EXIT_REASON(SVM_EXIT_READ_CR4     ,    4, "Read CR4."),
-    EXIT_REASON(SVM_EXIT_READ_CR5     ,    5, "Read CR5."),
-    EXIT_REASON(SVM_EXIT_READ_CR6     ,    6, "Read CR6."),
-    EXIT_REASON(SVM_EXIT_READ_CR7     ,    7, "Read CR7."),
-    EXIT_REASON(SVM_EXIT_READ_CR8     ,    8, "Read CR8."),
-    EXIT_REASON(SVM_EXIT_READ_CR9     ,    9, "Read CR9."),
-    EXIT_REASON(SVM_EXIT_READ_CR10    ,   10, "Read CR10."),
-    EXIT_REASON(SVM_EXIT_READ_CR11    ,   11, "Read CR11."),
-    EXIT_REASON(SVM_EXIT_READ_CR12    ,   12, "Read CR12."),
-    EXIT_REASON(SVM_EXIT_READ_CR13    ,   13, "Read CR13."),
-    EXIT_REASON(SVM_EXIT_READ_CR14    ,   14, "Read CR14."),
-    EXIT_REASON(SVM_EXIT_READ_CR15    ,   15, "Read CR15."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR0    ,   16, "Write CR0."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR1    ,   17, "Write CR1."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR2    ,   18, "Write CR2."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR3    ,   19, "Write CR3."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR4    ,   20, "Write CR4."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR5    ,   21, "Write CR5."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR6    ,   22, "Write CR6."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR7    ,   23, "Write CR7."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR8    ,   24, "Write CR8."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR9    ,   25, "Write CR9."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR10   ,   26, "Write CR10."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR11   ,   27, "Write CR11."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR12   ,   28, "Write CR12."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR13   ,   29, "Write CR13."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR14   ,   30, "Write CR14."),
-    EXIT_REASON(SVM_EXIT_WRITE_CR15   ,   31, "Write CR15."),
-    EXIT_REASON(SVM_EXIT_READ_DR0     ,   32, "Read DR0."),
-    EXIT_REASON(SVM_EXIT_READ_DR1     ,   33, "Read DR1."),
-    EXIT_REASON(SVM_EXIT_READ_DR2     ,   34, "Read DR2."),
-    EXIT_REASON(SVM_EXIT_READ_DR3     ,   35, "Read DR3."),
-    EXIT_REASON(SVM_EXIT_READ_DR4     ,   36, "Read DR4."),
-    EXIT_REASON(SVM_EXIT_READ_DR5     ,   37, "Read DR5."),
-    EXIT_REASON(SVM_EXIT_READ_DR6     ,   38, "Read DR6."),
-    EXIT_REASON(SVM_EXIT_READ_DR7     ,   39, "Read DR7."),
-    EXIT_REASON(SVM_EXIT_READ_DR8     ,   40, "Read DR8."),
-    EXIT_REASON(SVM_EXIT_READ_DR9     ,   41, "Read DR9."),
-    EXIT_REASON(SVM_EXIT_READ_DR10    ,   42, "Read DR10."),
-    EXIT_REASON(SVM_EXIT_READ_DR11    ,   43, "Read DR11"),
-    EXIT_REASON(SVM_EXIT_READ_DR12    ,   44, "Read DR12."),
-    EXIT_REASON(SVM_EXIT_READ_DR13    ,   45, "Read DR13."),
-    EXIT_REASON(SVM_EXIT_READ_DR14    ,   46, "Read DR14."),
-    EXIT_REASON(SVM_EXIT_READ_DR15    ,   47, "Read DR15."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR0    ,   48, "Write DR0."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR1    ,   49, "Write DR1."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR2    ,   50, "Write DR2."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR3    ,   51, "Write DR3."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR4    ,   52, "Write DR4."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR5    ,   53, "Write DR5."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR6    ,   54, "Write DR6."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR7    ,   55, "Write DR7."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR8    ,   56, "Write DR8."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR9    ,   57, "Write DR9."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR10   ,   58, "Write DR10."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR11   ,   59, "Write DR11."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR12   ,   60, "Write DR12."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR13   ,   61, "Write DR13."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR14   ,   62, "Write DR14."),
-    EXIT_REASON(SVM_EXIT_WRITE_DR15   ,   63, "Write DR15."),
-    EXIT_REASON(SVM_EXIT_XCPT_0       ,   64, "Exception 0  (#DE)."),
-    EXIT_REASON(SVM_EXIT_XCPT_1       ,   65, "Exception 1  (#DB)."),
-    EXIT_REASON(SVM_EXIT_XCPT_2       ,   66, "Exception 2  (#NMI)."),
-    EXIT_REASON(SVM_EXIT_XCPT_3       ,   67, "Exception 3  (#BP)."),
-    EXIT_REASON(SVM_EXIT_XCPT_4       ,   68, "Exception 4  (#OF)."),
-    EXIT_REASON(SVM_EXIT_XCPT_5       ,   69, "Exception 5  (#BR)."),
-    EXIT_REASON(SVM_EXIT_XCPT_6       ,   70, "Exception 6  (#UD)."),
-    EXIT_REASON(SVM_EXIT_XCPT_7       ,   71, "Exception 7  (#NM)."),
-    EXIT_REASON(SVM_EXIT_XCPT_8       ,   72, "Exception 8  (#DF)."),
-    EXIT_REASON(SVM_EXIT_XCPT_9       ,   73, "Exception 9  (#CO_SEG_OVERRUN)."),
-    EXIT_REASON(SVM_EXIT_XCPT_10      ,   74, "Exception 10 (#TS)."),
-    EXIT_REASON(SVM_EXIT_XCPT_11      ,   75, "Exception 11 (#NP)."),
-    EXIT_REASON(SVM_EXIT_XCPT_12      ,   76, "Exception 12 (#SS)."),
-    EXIT_REASON(SVM_EXIT_XCPT_13      ,   77, "Exception 13 (#GP)."),
-    EXIT_REASON(SVM_EXIT_XCPT_14      ,   78, "Exception 14 (#PF)."),
-    EXIT_REASON(SVM_EXIT_XCPT_15      ,   79, "Exception 15 (0x0f)."),
-    EXIT_REASON(SVM_EXIT_XCPT_16      ,   80, "Exception 16 (#MF)."),
-    EXIT_REASON(SVM_EXIT_XCPT_17      ,   81, "Exception 17 (#AC)."),
-    EXIT_REASON(SVM_EXIT_XCPT_18      ,   82, "Exception 18 (#MC)."),
-    EXIT_REASON(SVM_EXIT_XCPT_19      ,   83, "Exception 19 (#XF)."),
-    EXIT_REASON(SVM_EXIT_XCPT_20      ,   84, "Exception 20 (#VE)."),
-    EXIT_REASON(SVM_EXIT_XCPT_21      ,   85, "Exception 22 (0x15)."),
-    EXIT_REASON(SVM_EXIT_XCPT_22      ,   86, "Exception 22 (0x16)."),
-    EXIT_REASON(SVM_EXIT_XCPT_23      ,   87, "Exception 23 (0x17)."),
-    EXIT_REASON(SVM_EXIT_XCPT_24      ,   88, "Exception 24 (0x18)."),
-    EXIT_REASON(SVM_EXIT_XCPT_25      ,   89, "Exception 25 (0x19)."),
-    EXIT_REASON(SVM_EXIT_XCPT_26      ,   90, "Exception 26 (0x1a)."),
-    EXIT_REASON(SVM_EXIT_XCPT_27      ,   91, "Exception 27 (0x1b)."),
-    EXIT_REASON(SVM_EXIT_XCPT_28      ,   92, "Exception 28 (0x1c)."),
-    EXIT_REASON(SVM_EXIT_XCPT_29      ,   93, "Exception 29 (0x1d)."),
-    EXIT_REASON(SVM_EXIT_XCPT_30      ,   94, "Exception 30 (#SX)."),
-    EXIT_REASON(SVM_EXIT_XCPT_31      ,   95, "Exception 31 (0x1F)."),
-    EXIT_REASON(SVM_EXIT_INTR         ,   96, "Physical maskable interrupt (host)."),
-    EXIT_REASON(SVM_EXIT_NMI          ,   97, "Physical non-maskable interrupt (host)."),
-    EXIT_REASON(SVM_EXIT_SMI          ,   98, "System management interrupt (host)."),
-    EXIT_REASON(SVM_EXIT_INIT         ,   99, "Physical INIT signal (host)."),
-    EXIT_REASON(SVM_EXIT_VINTR        ,  100, "Virtual interrupt-window exit."),
-    EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE,  101, "Selective CR0 Write (to bits other than CR0.TS and CR0.MP)."),
-    EXIT_REASON(SVM_EXIT_IDTR_READ    ,  102, "Read IDTR."),
-    EXIT_REASON(SVM_EXIT_GDTR_READ    ,  103, "Read GDTR."),
-    EXIT_REASON(SVM_EXIT_LDTR_READ    ,  104, "Read LDTR."),
-    EXIT_REASON(SVM_EXIT_TR_READ      ,  105, "Read TR."),
-    EXIT_REASON(SVM_EXIT_IDTR_WRITE   ,  106, "Write IDTR."),
-    EXIT_REASON(SVM_EXIT_GDTR_WRITE   ,  107, "Write GDTR."),
-    EXIT_REASON(SVM_EXIT_LDTR_WRITE   ,  108, "Write LDTR."),
-    EXIT_REASON(SVM_EXIT_TR_WRITE     ,  109, "Write TR."),
-    EXIT_REASON(SVM_EXIT_RDTSC        ,  110, "RDTSC instruction."),
-    EXIT_REASON(SVM_EXIT_RDPMC        ,  111, "RDPMC instruction."),
-    EXIT_REASON(SVM_EXIT_PUSHF        ,  112, "PUSHF instruction."),
-    EXIT_REASON(SVM_EXIT_POPF         ,  113, "POPF instruction."),
-    EXIT_REASON(SVM_EXIT_CPUID        ,  114, "CPUID instruction."),
-    EXIT_REASON(SVM_EXIT_RSM          ,  115, "RSM instruction."),
-    EXIT_REASON(SVM_EXIT_IRET         ,  116, "IRET instruction."),
-    EXIT_REASON(SVM_EXIT_SWINT        ,  117, "Software interrupt (INTn instructions)."),
-    EXIT_REASON(SVM_EXIT_INVD         ,  118, "INVD instruction."),
-    EXIT_REASON(SVM_EXIT_PAUSE        ,  119, "PAUSE instruction."),
-    EXIT_REASON(SVM_EXIT_HLT          ,  120, "HLT instruction."),
-    EXIT_REASON(SVM_EXIT_INVLPG       ,  121, "INVLPG instruction."),
-    EXIT_REASON(SVM_EXIT_INVLPGA      ,  122, "INVLPGA instruction."),
-    EXIT_REASON(SVM_EXIT_IOIO         ,  123, "IN/OUT/INS/OUTS instruction."),
-    EXIT_REASON(SVM_EXIT_MSR          ,  124, "RDMSR or WRMSR access to protected MSR."),
-    EXIT_REASON(SVM_EXIT_TASK_SWITCH  ,  125, "Task switch."),
-    EXIT_REASON(SVM_EXIT_FERR_FREEZE  ,  126, "FERR Freeze; CPU frozen in an x87/mmx instruction waiting for interrupt."),
-    EXIT_REASON(SVM_EXIT_SHUTDOWN     ,  127, "Shutdown."),
-    EXIT_REASON(SVM_EXIT_VMRUN        ,  128, "VMRUN instruction."),
-    EXIT_REASON(SVM_EXIT_VMMCALL      ,  129, "VMCALL instruction."),
-    EXIT_REASON(SVM_EXIT_VMLOAD       ,  130, "VMLOAD instruction."),
-    EXIT_REASON(SVM_EXIT_VMSAVE       ,  131, "VMSAVE instruction."),
-    EXIT_REASON(SVM_EXIT_STGI         ,  132, "STGI instruction."),
-    EXIT_REASON(SVM_EXIT_CLGI         ,  133, "CLGI instruction."),
-    EXIT_REASON(SVM_EXIT_SKINIT       ,  134, "SKINIT instruction."),
-    EXIT_REASON(SVM_EXIT_RDTSCP       ,  135, "RDTSCP instruction."),
-    EXIT_REASON(SVM_EXIT_ICEBP        ,  136, "ICEBP instruction."),
-    EXIT_REASON(SVM_EXIT_WBINVD       ,  137, "WBINVD instruction."),
-    EXIT_REASON(SVM_EXIT_MONITOR      ,  138, "MONITOR instruction."),
-    EXIT_REASON(SVM_EXIT_MWAIT        ,  139, "MWAIT instruction."),
-    EXIT_REASON(SVM_EXIT_MWAIT_ARMED  ,  140, "MWAIT instruction when armed."),
-    EXIT_REASON(SVM_EXIT_XSETBV       ,  141, "XSETBV instruction."),
-};
-/** Array index of the last valid AMD-V exit reason. */
-#define MAX_EXITREASON_AMDV              141
-
-/** Special exit reasons not covered in the array above. */
-#define SVM_EXIT_REASON_NPF                  EXIT_REASON(SVM_EXIT_NPF                , 1024, "Nested Page Fault.")
-#define SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI  EXIT_REASON(SVM_EXIT_AVIC_INCOMPLETE_IPI, 1025, "AVIC - Incomplete IPI delivery.")
-#define SVM_EXIT_REASON_AVIC_NOACCEL         EXIT_REASON(SVM_EXIT_AVIC_NOACCEL       , 1026, "AVIC - Unhandled register.")
-
-/**
- * Gets the SVM exit reason if it's one of the reasons not present in the @c
- * g_apszSvmExitReasons array.
- *
- * @returns The exit reason or NULL if unknown.
- * @param   uExit       The exit.
- */
-DECLINLINE(const char *) hmSvmGetSpecialExitReasonDesc(uint16_t uExit)
-{
-    switch (uExit)
-    {
-        case SVM_EXIT_NPF:                 return SVM_EXIT_REASON_NPF;
-        case SVM_EXIT_AVIC_INCOMPLETE_IPI: return SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI;
-        case SVM_EXIT_AVIC_NOACCEL:        return SVM_EXIT_REASON_AVIC_NOACCEL;
-    }
-    return EXIT_REASON_NIL();
-}
-#undef EXIT_REASON_NIL
-#undef EXIT_REASON
-
 /** @def HMVMX_REPORT_FEAT
  * Reports VT-x feature to the release log.
@@ -1090,8 +836,5 @@
 #undef HM_REG_COUNTER
 
-        const char *const *papszDesc =
-            ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu()
-            ? &g_apszVmxExitReasons[0]
-            : &g_apszSvmExitReasons[0];
+        bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
 
         /*
@@ -1102,11 +845,29 @@
                           (void **)&pVCpu->hm.s.paStatExitReason);
         AssertRCReturn(rc, rc);
-        for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+
+        if (fCpuSupportsVmx)
         {
-            if (papszDesc[j])
+            for (int j = 0; j < MAX_EXITREASON_STAT; j++)
             {
-                rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
-                                     STAMUNIT_OCCURENCES, papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j);
-                AssertRCReturn(rc, rc);
+                const char *pszExitName = HMGetVmxExitName(j);
+                if (pszExitName)
+                {
+                    rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+                                         STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/Reason/%02x", i, j);
+                    AssertRCReturn(rc, rc);
+                }
+            }
+        }
+        else
+        {
+            for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+            {
+                const char *pszExitName = HMGetSvmExitName(j);
+                if (pszExitName)
+                {
+                    rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+                                         STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/Reason/%02x", i, j);
+                    AssertRCReturn(rc, rc);
+                }
             }
         }
@@ -1121,7 +882,7 @@
 # endif
 
-#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
+#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
         /*
-         * Nested-guest Exit reason stats.
+         * Nested-guest VM-exit reason stats.
          */
         pVCpu->hm.s.paStatNestedExitReason = NULL;
@@ -1129,11 +890,28 @@
                           (void **)&pVCpu->hm.s.paStatNestedExitReason);
         AssertRCReturn(rc, rc);
-        for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+        if (fCpuSupportsVmx)
         {
-            if (papszDesc[j])
+            for (int j = 0; j < MAX_EXITREASON_STAT; j++)
             {
-                rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
-                                     STAMUNIT_OCCURENCES, papszDesc[j], "/HM/CPU%d/NestedExit/Reason/%02x", i, j);
-                AssertRC(rc);
+                const char *pszExitName = HMGetVmxExitName(j);
+                if (pszExitName)
+                {
+                    rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+                                         STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/NestedExit/Reason/%02x", i, j);
+                    AssertRC(rc);
+                }
+            }
+        }
+        else
+        {
+            for (int j = 0; j < MAX_EXITREASON_STAT; j++)
+            {
+                const char *pszExitName = HMGetSvmExitName(j);
+                if (pszExitName)
+                {
+                    rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
+                                         STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/NestedExit/Reason/%02x", i, j);
+                    AssertRC(rc);
+                }
             }
         }
@@ -1981,5 +1759,5 @@
     uint32_t u32Model;
     uint32_t u32Stepping;
-    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
+    if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
         LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
     LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoops));
@@ -3443,32 +3221,4 @@
 
 /**
- * Gets the name of a VT-x exit code.
- *
- * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
- * @param   uExit               The VT-x exit to name.
- */
-VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit)
-{
-    if (uExit < RT_ELEMENTS(g_apszVmxExitReasons))
-        return g_apszVmxExitReasons[uExit];
-    return NULL;
-}
-
-
-/**
- * Gets the name of an AMD-V exit code.
- *
- * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
- * @param   uExit               The AMD-V exit to name.
- */
-VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit)
-{
-    if (uExit < RT_ELEMENTS(g_apszSvmExitReasons))
-        return g_apszSvmExitReasons[uExit];
-    return hmSvmGetSpecialExitReasonDesc(uExit);
-}
-
-
-/**
  * Displays HM info.
  *
Index: /trunk/src/VBox/VMM/VMMR3/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 76993)
@@ -2181,5 +2181,5 @@
             VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
             pgmR3RefreshShadowModeAfterA20Change(pVCpu);
-            HMFlushTLB(pVCpu);
+            HMFlushTlb(pVCpu);
 #endif
         }
Index: /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 76992)
+++ /trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp	(revision 76993)
@@ -4574,5 +4574,5 @@
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
         pgmR3RefreshShadowModeAfterA20Change(pVCpu);
-        HMFlushTLB(pVCpu);
+        HMFlushTlb(pVCpu);
 #endif
         IEMTlbInvalidateAllPhysical(pVCpu);
Index: /trunk/src/VBox/VMM/include/PGMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 76992)
+++ /trunk/src/VBox/VMM/include/PGMInternal.h	(revision 76993)
@@ -418,7 +418,7 @@
 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         ASMReloadCR3()
 #elif defined(IN_RING0)
-# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTLB(pVCpu)
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTlb(pVCpu)
 #else
-# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTLB(pVCpu)
+# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTlb(pVCpu)
 #endif
 
@@ -431,7 +431,7 @@
 # define PGM_INVL_VCPU_TLBS(pVCpu)             ASMReloadCR3()
 #elif defined(IN_RING0)
-# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTLB(pVCpu)
+# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTlb(pVCpu)
 #else
-# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTLB(pVCpu)
+# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTlb(pVCpu)
 #endif
 
@@ -444,7 +444,7 @@
 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            ASMReloadCR3()
 #elif defined(IN_RING0)
-# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTLBOnAllVCpus(pVM)
+# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTlbOnAllVCpus(pVM)
 #else
-# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTLBOnAllVCpus(pVM)
+# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTlbOnAllVCpus(pVM)
 #endif
 
