Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 46663)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 46664)
@@ -56,4 +56,7 @@
 ifdef VBOX_WITH_OLD_VTX_CODE
  VMM_COMMON_DEFS += VBOX_WITH_OLD_VTX_CODE
+endif
+ifdef VBOX_WITH_OLD_AMDV_CODE
+ VMM_COMMON_DEFS += VBOX_WITH_OLD_AMDV_CODE
 endif
 ifdef VBOX_WITH_SAFE_STR 
@@ -529,5 +532,4 @@
  	VMMR0/HMR0.cpp \
  	VMMR0/HMR0A.asm \
- 	VMMR0/HWSVMR0.cpp \
  	VMMR0/PDMR0Device.cpp \
  	VMMR0/PDMR0Driver.cpp \
@@ -595,4 +597,9 @@
   VMMR0_SOURCES += VMMR0/HMVMXR0.cpp
  endif
+ ifdef VBOX_WITH_OLD_AMDV_CODE
+  VMMR0_SOURCES += VMMR0/HWSVMR0.cpp
+ else
+  VMMR0_SOURCES += VMMR0/HMSVMR0.cpp
+ endif
  VMMR0_SOURCES.amd64 = \
  	VMMR0/VMMR0JmpA-amd64.asm
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46663)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46664)
@@ -19,4 +19,15 @@
 *   Header Files                                                               *
 *******************************************************************************/
+#define LOG_GROUP LOG_GROUP_HM
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/thread.h>
+
+#include "HMInternal.h"
+#include <VBox/vmm/vm.h>
+#include "HWSVMR0.h"
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/vmm/iom.h>
+#include <VBox/vmm/tm.h>
 
 #ifdef DEBUG_ramshankar
@@ -75,10 +86,10 @@
     do \
     { \
-        pCtx->reg.Sel       = pVmcb->guest.REG.u16Sel; \
-        pCtx->reg.ValidSel  = pVmcb->guest.REG.u16Sel; \
-        pCtx->reg.fFlags    = CPUMSELREG_FLAGS_VALID; \
-        pCtx->reg.u32Limit  = pVmcb->guest.REG.u32Limit; \
-        pCtx->reg.u64Base   = pVmcb->guest.REG.u64Base; \
-        pCtx->reg.Attr.u    = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
+        pMixedCtx->reg.Sel       = pVmcb->guest.REG.u16Sel; \
+        pMixedCtx->reg.ValidSel  = pVmcb->guest.REG.u16Sel; \
+        pMixedCtx->reg.fFlags    = CPUMSELREG_FLAGS_VALID; \
+        pMixedCtx->reg.u32Limit  = pVmcb->guest.REG.u32Limit; \
+        pMixedCtx->reg.u64Base   = pVmcb->guest.REG.u64Base; \
+        pMixedCtx->reg.Attr.u    = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
     } while (0)
 /** @} */
@@ -145,15 +156,15 @@
 #define HMSVM_VMCB_CLEAN_AVIC                   RT_BIT(11)
 /** Mask of all valid VMCB Clean bits. */
-#define HMSVM_VMCB_CLEAN_ALL                    (  HMSVM_VMCB_CLEAN_INTERCEPTS
-                                                 | HMSVM_VMCB_CLEAN_IOPM_MSRPM
-                                                 | HMSVM_VMCB_CLEAN_ASID
-                                                 | HMSVM_VMCB_CLEAN_TPR
-                                                 | HMSVM_VMCB_CLEAN_NP
-                                                 | HMSVM_VMCB_CLEAN_CRX
-                                                 | HMSVM_VMCB_CLEAN_DRX
-                                                 | HMSVM_VMCB_CLEAN_DT
-                                                 | HMSVM_VMCB_CLEAN_SEG
-                                                 | HMSVM_VMCB_CLEAN_CR2
-                                                 | HMSVM_VMCB_CLEAN_LBR
+#define HMSVM_VMCB_CLEAN_ALL                    (  HMSVM_VMCB_CLEAN_INTERCEPTS  \
+                                                 | HMSVM_VMCB_CLEAN_IOPM_MSRPM  \
+                                                 | HMSVM_VMCB_CLEAN_ASID        \
+                                                 | HMSVM_VMCB_CLEAN_TPR         \
+                                                 | HMSVM_VMCB_CLEAN_NP          \
+                                                 | HMSVM_VMCB_CLEAN_CRX_EFER    \
+                                                 | HMSVM_VMCB_CLEAN_DRX         \
+                                                 | HMSVM_VMCB_CLEAN_DT          \
+                                                 | HMSVM_VMCB_CLEAN_SEG         \
+                                                 | HMSVM_VMCB_CLEAN_CR2         \
+                                                 | HMSVM_VMCB_CLEAN_LBR         \
                                                  | HMSVM_VMCB_CLEAN_AVIC)
 /** @} */
@@ -212,4 +223,33 @@
 *******************************************************************************/
 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
+static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
+
+HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
+HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
 
 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
@@ -331,5 +371,6 @@
 
     /* Set all bits to intercept all IO accesses. */
-    ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
+    ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
+    return VINF_SUCCESS;
 }
 
@@ -342,5 +383,5 @@
     if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
     {
-        RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false /* fFreeMappings */);
+        RTR0MemObjFree(g_hMemObjIOBitmap, false /* fFreeMappings */);
         g_pvIOBitmap      = NULL;
         g_HCPhysIOBitmap  = 0;
@@ -424,4 +465,6 @@
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
+        PVMCPU pVCpu = &pVM->aCpus[i];
+
         /*
          * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
@@ -455,5 +498,5 @@
         rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
         if (RT_FAILURE(rc))
-            failure_cleanup;
+            goto failure_cleanup;
 
         pVCpu->hm.s.svm.pvMsrBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
@@ -466,5 +509,5 @@
 
 failure_cleanup:
-    hmR0SvmFreeVMStructs(pVM);
+    hmR0SvmFreeStructs(pVM);
     return rc;
 }
@@ -479,5 +522,5 @@
 VMMR0DECL(int) SVMR0TermVM(PVM pVM)
 {
-    hmR0SvmFreeVMStructs(pVM);
+    hmR0SvmFreeStructs(pVM);
     return VINF_SUCCESS;
 }
@@ -492,5 +535,5 @@
  * @param   enmWrite    MSR write permissions.
  */
-static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
+static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
 {
     unsigned ulBit;
@@ -541,4 +584,5 @@
         ASMBitClear(pbMsrBitmap, ulBit + 1);
 
+    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
 }
@@ -821,5 +865,5 @@
     }
     else
-        Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE)
+        Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
 #endif
 }
@@ -900,7 +944,14 @@
 
 
-DECLINLINE(void) hmR0SvmAddXcptIntercept(uint32_t u32Xcpt)
-{
-    if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
+/**
+ * Adds an exception to the intercept exception bitmap in the VMCB and updates
+ * the corresponding VMCB Clean Bit.
+ *
+ * @param   pVmcb       Pointer to the VMCB.
+ * @param   u32Xcpt     The value of the exception (X86_XCPT_*).
+ */
+DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
+{
+    if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)))
     {
         pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
@@ -909,5 +960,13 @@
 }
 
-DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt)
+
+/**
+ * Removes an exception from the intercept-exception bitmap in the VMCB and
+ * updates the corresponding VMCB Clean Bit.
+ *
+ * @param   pVmcb       Pointer to the VMCB.
+ * @param   u32Xcpt     The value of the exception (X86_XCPT_*).
+ */
+DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
 {
 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
@@ -936,4 +995,5 @@
      * Guest CR0.
      */
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     {
@@ -971,5 +1031,5 @@
         {
             fInterceptNM = true;           /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
-            u32GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
+            u64GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
                           | X86_CR0_MP;    /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
         }
@@ -979,12 +1039,12 @@
          */
         if (fInterceptNM)
-            hmR0SvmAddXcptIntercept(X86_XCPT_NM);
+            hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
         else
-            hmR0SvmRemoveXcptIntercept(X86_XCPT_NM);
+            hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
 
         if (fInterceptMF)
-            hmR0SvmAddXcptIntercept(X86_XCPT_MF);
+            hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
         else
-            hmR0SvmRemoveXcptIntercept(X86_XCPT_MF);
+            hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
 
         pVmcb->guest.u64CR0 = u64GuestCR0;
@@ -1160,5 +1220,5 @@
      * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
      */
-    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR
+    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR)
     {
         pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
@@ -1198,4 +1258,5 @@
  *
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVmcb       Pointer to the VMCB.
  * @param   pCtx        Pointer to the guest-CPU context.
  *
@@ -1203,5 +1264,5 @@
  * @remarks Requires EFLAGS to be up-to-date in the VMCB!
  */
-DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
+DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
 {
     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
@@ -1230,9 +1291,10 @@
     }
 
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
     {
         if (!CPUMIsHyperDebugStateActive(pVCpu))
         {
-            rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
+            int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
             AssertRC(rc);
 
@@ -1249,5 +1311,5 @@
         if (!CPUMIsGuestDebugStateActive(pVCpu))
         {
-            rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
+            int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
             AssertRC(rc);
             STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
@@ -1263,7 +1325,7 @@
 
     if (fInterceptDB)
-        hmR0SvmAddXcptIntercept(X86_XCPT_DB);
+        hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB);
     else
-        hmR0SvmRemoveXcptIntercept(X86_XCPT_DB);
+        hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB);
 
     if (fInterceptMovDRx)
@@ -1318,5 +1380,5 @@
     if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
     {
-        pCtx->msrLSTAR = u8LastTPR;
+        pCtx->msrLSTAR = u8Tpr;
 
         /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
@@ -1337,5 +1399,5 @@
 
         /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
-        if (fPending)
+        if (fPendingIntr)
             pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
         else
@@ -1570,6 +1632,6 @@
     /** @todo Verify this. */
     if (   !pMixedCtx->cs.Attr.n.u1Granularity
-        &&  pMixedCtx->cs.Attr.n.u1Present
-        &&  pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
+        && pMixedCtx->cs.Attr.n.u1Present
+        && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
     {
         Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
@@ -1577,10 +1639,10 @@
     }
 #ifdef VBOX_STRICT
-# define HMSVM_ASSERT_SEL_GRANULARITY(reg) \
+# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
     AssertMsg(   !pMixedCtx->reg.Attr.n.u1Present \
               || (   pMixedCtx->reg.Attr.n.u1Granularity \
                   ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
                   :  pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
-              ("Invalid Segment Attributes %#x %#x %#llx\n", pMixedCtx->reg.u32Limit,
+              ("Invalid Segment Attributes %#x %#x %#llx\n", pMixedCtx->reg.u32Limit, \
               pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
 
@@ -1625,5 +1687,5 @@
      * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
      */
-    if (   pVM->hm.s.fNestedPaging
+    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
         && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
     {
@@ -1669,6 +1731,9 @@
         CPUMR0LoadHostDebugState(pVM, pVCpu);
         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
+#ifdef VBOX_STRICT
+        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
         Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
         Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
+#endif
     }
 
@@ -1854,6 +1919,7 @@
 
 
-/**
- * Converts any TRPM trap into a pending SVM event. This is typically used when
+
+/**
+ * Converts any TRPM trap into a pending HM event. This is typically used when
  * entering from ring-3 (not longjmp returns).
  *
@@ -1874,12 +1940,12 @@
     AssertRC(rc);
 
-    PSVMEVENT pEvent = &pVCpu->hm.s.Event;
-    pEvent->u         = 0;
-    pEvent->n.u1Valid = 1;
+    SVMEVENT Event;
+    Event.u         = 0;
+    Event.n.u1Valid = 1;
 
     /* Refer AMD spec. 15.20 "Event Injection" for the format. */
     if (enmTrpmEvent == TRPM_TRAP)
     {
-        pEvent->n.u3Type = SVM_EVENT_EXCEPTION;
+        Event.n.u3Type = SVM_EVENT_EXCEPTION;
         switch (uVector)
         {
@@ -1892,6 +1958,6 @@
             case X86_XCPT_AC:
             {
-                pEvent->n.u32ErrorCode     = uErrCode;
-                pEvent->n.u1ErrorCodeValid = 1;
+                Event.n.u32ErrorCode     = uErrCode;
+                Event.n.u1ErrorCodeValid = 1;
                 break;
             }
@@ -1901,10 +1967,10 @@
     {
         if (uVector == X86_XCPT_NMI)
-            pEvent->n.u3Type = SVM_EVENT_NMI;
+            Event.n.u3Type = SVM_EVENT_NMI;
         else
-            pEvent->n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
+            Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
     }
     else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
-        pEvent->n.u3Type = SVM_EVENT_SOFTWARE_INT;
+        Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
     else
         AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
@@ -1913,6 +1979,7 @@
     AssertRC(rc);
 
-    Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
-          pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
+    Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
+          !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
+    hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
 }
 
@@ -1929,12 +1996,14 @@
     Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
 
-    PSVMEVENT pEvent    = &pVCpu->hm.s.Event;
-    uint8_t uVector     = pEvent->n.u8Vector;
-    uint8_t uVectorType = pEvent->n.u3Type;
+    SVMEVENT Event;
+    Event.u = pVCpu->hm.s.Event.u64IntrInfo;
+
+    uint8_t uVector     = Event.n.u8Vector;
+    uint8_t uVectorType = Event.n.u3Type;
 
     TRPMEVENT enmTrapType;
     switch (uVectorType)
     {
-        case SVM_EVENT_EXTERNAL_IRQ
+        case SVM_EVENT_EXTERNAL_IRQ:
         case SVM_EVENT_NMI:
            enmTrapType = TRPM_HARDWARE_INT;
@@ -1957,6 +2026,6 @@
     AssertRC(rc);
 
-    if (pEvent->n.u1ErrorCodeValid)
-        TRPMSetErrorCode(pVCpu, pEvent->n.u32ErrorCode);
+    if (Event.n.u1ErrorCodeValid)
+        TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
 
     if (   uVectorType == SVM_EVENT_EXCEPTION
@@ -2061,5 +2130,5 @@
         {
             pVCpu->hm.s.Event.fPending = false;
-            hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
+            hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
         }
         else
@@ -2076,5 +2145,5 @@
             Event.n.u3Type   = SVM_EVENT_NMI;
 
-            hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
+            hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
         }
@@ -2090,5 +2159,5 @@
         {
             uint8_t u8Interrupt;
-            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
+            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
             if (RT_SUCCESS(rc))
             {
@@ -2099,5 +2168,5 @@
                 Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
 
-                hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
+                hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
                 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
             }
@@ -2289,5 +2358,5 @@
         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
         {
-            rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
+            int rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
             Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
             Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
@@ -2297,8 +2366,7 @@
         if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
         {
-            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+            int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
             if (rc != VINF_SUCCESS)
             {
-                AssertRC(rc);
                 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
                 return rc;
@@ -2312,5 +2380,5 @@
         {
             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
-            rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
+            int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
             Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
             return rc;
@@ -2340,6 +2408,4 @@
     }
 
-    /* Paranoia. */
-    Assert(rc != VERR_EM_INTERPRETER);
     return VINF_SUCCESS;
 }
@@ -2362,9 +2428,10 @@
  * @retval VINF_* scheduling changes, we have to go back to ring-3.
  *
+ * @param   pVM             Pointer to the VM.
  * @param   pVCpu           Pointer to the VMCPU.
  * @param   pCtx            Pointer to the guest-CPU context.
  * @param   pSvmTransient   Pointer to the SVM transient structure.
  */
-DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+DECLINLINE(int) hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
 {
     /* Check force flag actions that might require us to go back to ring-3. */
@@ -2430,4 +2497,5 @@
      *        should be done wrt to the VMCB Clean Bit, but we'll find out the
      *        hard way. */
+    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
 
@@ -2469,5 +2537,5 @@
         pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
         uint64_t u64GuestTscAux = 0;
-        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
+        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
         AssertRC(rc2);
         ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
@@ -2518,5 +2586,5 @@
  *          unconditionally when it is safe to do so.
  */
-DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, rcVMRun)
+DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
 {
     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
@@ -2565,7 +2633,7 @@
             /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
             if (   pVM->hm.s.fTPRPatchingActive
-                && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
+                && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
             {
-                int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff);
+                int rc = PDMApicSetTPR(pVCpu, (pMixedCtx->msrLSTAR & 0xff));
                 AssertRC(rc);
             }
@@ -2630,7 +2698,7 @@
                         || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
         {
-            if (rc == VINF_SUCCESS);
+            if (rc == VINF_SUCCESS)
                 rc = VERR_SVM_INVALID_GUEST_STATE;
-            hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &SvmTransient);
+            hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
             return rc;
         }
@@ -2749,85 +2817,88 @@
         default:
         {
-            case SVM_EXIT_READ_DR0:     case SVM_EXIT_READ_DR1:     case SVM_EXIT_READ_DR2:     case SVM_EXIT_READ_DR3:
-            case SVM_EXIT_READ_DR6:     case SVM_EXIT_READ_DR7:     case SVM_EXIT_READ_DR8:     case SVM_EXIT_READ_DR9:
-            case SVM_EXIT_READ_DR10:    case SVM_EXIT_READ_DR11:    case SVM_EXIT_READ_DR12:    case SVM_EXIT_READ_DR13:
-            case SVM_EXIT_READ_DR14:    case SVM_EXIT_READ_DR15:
-                return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
-
-            case SVM_EXIT_WRITE_DR0:    case SVM_EXIT_WRITE_DR1:    case SVM_EXIT_WRITE_DR2:    case SVM_EXIT_WRITE_DR3:
-            case SVM_EXIT_WRITE_DR6:    case SVM_EXIT_WRITE_DR7:    case SVM_EXIT_WRITE_DR8:    case SVM_EXIT_WRITE_DR9:
-            case SVM_EXIT_WRITE_DR10:   case SVM_EXIT_WRITE_DR11:   case SVM_EXIT_WRITE_DR12:   case SVM_EXIT_WRITE_DR13:
-            case SVM_EXIT_WRITE_DR14:   case SVM_EXIT_WRITE_DR15:
-                return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
-
-            case SVM_EXIT_TASK_SWITCH:
-                return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
-
-            case SVM_EXIT_VMMCALL:
-                return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
-
-            case SVM_EXIT_INVLPGA:
-            case SVM_EXIT_RSM:
-            case SVM_EXIT_VMRUN:
-            case SVM_EXIT_VMLOAD:
-            case SVM_EXIT_VMSAVE:
-            case SVM_EXIT_STGI:
-            case SVM_EXIT_CLGI:
-            case SVM_EXIT_SKINIT:
-                return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
+            switch (pSvmTransient->u64ExitCode)
+            {
+                case SVM_EXIT_READ_DR0:     case SVM_EXIT_READ_DR1:     case SVM_EXIT_READ_DR2:     case SVM_EXIT_READ_DR3:
+                case SVM_EXIT_READ_DR6:     case SVM_EXIT_READ_DR7:     case SVM_EXIT_READ_DR8:     case SVM_EXIT_READ_DR9:
+                case SVM_EXIT_READ_DR10:    case SVM_EXIT_READ_DR11:    case SVM_EXIT_READ_DR12:    case SVM_EXIT_READ_DR13:
+                case SVM_EXIT_READ_DR14:    case SVM_EXIT_READ_DR15:
+                    return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
+
+                case SVM_EXIT_WRITE_DR0:    case SVM_EXIT_WRITE_DR1:    case SVM_EXIT_WRITE_DR2:    case SVM_EXIT_WRITE_DR3:
+                case SVM_EXIT_WRITE_DR6:    case SVM_EXIT_WRITE_DR7:    case SVM_EXIT_WRITE_DR8:    case SVM_EXIT_WRITE_DR9:
+                case SVM_EXIT_WRITE_DR10:   case SVM_EXIT_WRITE_DR11:   case SVM_EXIT_WRITE_DR12:   case SVM_EXIT_WRITE_DR13:
+                case SVM_EXIT_WRITE_DR14:   case SVM_EXIT_WRITE_DR15:
+                    return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
+
+                case SVM_EXIT_TASK_SWITCH:
+                    return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
+
+                case SVM_EXIT_VMMCALL:
+                    return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
+
+                case SVM_EXIT_INVLPGA:
+                case SVM_EXIT_RSM:
+                case SVM_EXIT_VMRUN:
+                case SVM_EXIT_VMLOAD:
+                case SVM_EXIT_VMSAVE:
+                case SVM_EXIT_STGI:
+                case SVM_EXIT_CLGI:
+                case SVM_EXIT_SKINIT:
+                    return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
 
 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
-            case SVM_EXIT_EXCEPTION_0:      /* X86_XCPT_DE */
-            case SVM_EXIT_EXCEPTION_3:      /* X86_XCPT_BP */
-            case SVM_EXIT_EXCEPTION_6:      /* X86_XCPT_UD */
-            case SVM_EXIT_EXCEPTION_B:      /* X86_XCPT_NP */
-            case SVM_EXIT_EXCEPTION_C:      /* X86_XCPT_SS */
-            case SVM_EXIT_EXCEPTION_D:      /* X86_XCPT_GP */
-            {
-                SVMEVENT Event;
-                Event.u          = 0;
-                Event.n.u1Valid  = 1;
-                Event.n.u3Type   = SVM_EVENT_EXCEPTION;
-                Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
-
-                switch (Event.n.u8Vector)
+                case SVM_EXIT_EXCEPTION_0:      /* X86_XCPT_DE */
+                case SVM_EXIT_EXCEPTION_3:      /* X86_XCPT_BP */
+                case SVM_EXIT_EXCEPTION_6:      /* X86_XCPT_UD */
+                case SVM_EXIT_EXCEPTION_B:      /* X86_XCPT_NP */
+                case SVM_EXIT_EXCEPTION_C:      /* X86_XCPT_SS */
+                case SVM_EXIT_EXCEPTION_D:      /* X86_XCPT_GP */
                 {
-                    case X86_XCPT_GP:
-                        Event.n.u1ErrorCodeValid    = 1;
-                        Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
-                        break;
-                    case X86_XCPT_BP:
-                        /** Saves the wrong EIP on the stack (pointing to the int3) instead of the
-                         *  next instruction. */
-                        /** @todo Investigate this later. */
-                        break;
-                    case X86_XCPT_DE:
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
-                        break;
-                    case X86_XCPT_UD:
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
-                        break;
-                    case X86_XCPT_SS:
-                        Event.n.u1ErrorCodeValid    = 1;
-                        Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
-                        break;
-                    case X86_XCPT_NP:
-                        Event.n.u1ErrorCodeValid    = 1;
-                        Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
-                        break;
+                    SVMEVENT Event;
+                    Event.u          = 0;
+                    Event.n.u1Valid  = 1;
+                    Event.n.u3Type   = SVM_EVENT_EXCEPTION;
+                    Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
+
+                    switch (Event.n.u8Vector)
+                    {
+                        case X86_XCPT_GP:
+                            Event.n.u1ErrorCodeValid    = 1;
+                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
+                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
+                            break;
+                        case X86_XCPT_BP:
+                            /** Saves the wrong EIP on the stack (pointing to the int3) instead of the
+                             *  next instruction. */
+                            /** @todo Investigate this later. */
+                            break;
+                        case X86_XCPT_DE:
+                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
+                            break;
+                        case X86_XCPT_UD:
+                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
+                            break;
+                        case X86_XCPT_SS:
+                            Event.n.u1ErrorCodeValid    = 1;
+                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
+                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
+                            break;
+                        case X86_XCPT_NP:
+                            Event.n.u1ErrorCodeValid    = 1;
+                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
+                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
+                            break;
+                    }
+                    Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
+                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
+                    return VINF_SUCCESS;
                 }
-                Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
-                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
-                return VINF_SUCCESS;
-            }
-#endif
-
-            default:
-            {
-                AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode));
-                return VERR_SVM_UNEXPECTED_EXIT;
+#endif  /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
+
+                default:
+                {
+                    AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode));
+                    return VERR_SVM_UNEXPECTED_EXIT;
+                }
             }
         }
@@ -2861,5 +2932,4 @@
                 if (VMMR0IsLogFlushDisabled(pVCpu)) \
                     HMSVM_ASSERT_PREEMPT_CPUID(); \
-                HMSVM_STOP_EXIT_DISPATCH_PROF(); \
             } while (0)
 #else   /* Release builds */
@@ -2892,5 +2962,6 @@
 
         GCPtrPage = Param1.val.val64;
-        rc = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu,  pRegFrame, GCPtrPage);
+        VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu,  pRegFrame, GCPtrPage);
+        rc = VBOXSTRICTRC_VAL(rc2);
     }
     else
@@ -2951,5 +3022,5 @@
     Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     Event.n.u8Vector = X86_XCPT_UD;
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
 }
 
@@ -2967,5 +3038,5 @@
     Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     Event.n.u8Vector = X86_XCPT_DB;
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
 }
 
@@ -2994,5 +3065,5 @@
     pCtx->cr2 = uFaultAddress;
 
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
 }
 
@@ -3011,5 +3082,5 @@
     Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     Event.n.u8Vector = X86_XCPT_NM;
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
 }
 
@@ -3027,5 +3098,5 @@
     Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     Event.n.u8Vector = X86_XCPT_MF;
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
 }
 
@@ -3045,5 +3116,5 @@
     Event.n.u1ErrorCodeValid = 1;
     Event.n.u32ErrorCode     = 0;
-    hmR0SvmSetPendingEvent(pVCpu, &Event);
+    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
 }
 
@@ -3165,21 +3236,21 @@
     if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
     {
+        uint8_t uIdtVector  = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
+        uint8_t uExitVector = UINT8_MAX;       /* Start off with an invalid vector, updated when it's valid. See below. */
+
+        typedef enum
+        {
+            SVMREFLECTXCPT_XCPT,    /* Reflect the exception to the guest or for further evaluation by VMM. */
+            SVMREFLECTXCPT_DF,      /* Reflect the exception as a double-fault to the guest. */
+            SVMREFLECTXCPT_TF,      /* Indicate a triple faulted state to the VMM. */
+            SVMREFLECTXCPT_NONE     /* Nothing to reflect. */
+        } SVMREFLECTXCPT;
+
+        SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
         if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)
         {
-            typedef enum
-            {
-                SVMREFLECTXCPT_XCPT,    /* Reflect the exception to the guest or for further evaluation by VMM. */
-                SVMREFLECTXCPT_DF,      /* Reflect the exception as a double-fault to the guest. */
-                SVMREFLECTXCPT_TF,      /* Indicate a triple faulted state to the VMM. */
-                SVMREFLECTXCPT_NONE     /* Nothing to reflect. */
-            } SVMREFLECTXCPT;
-
-            SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
-
             if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
             {
-                uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
-                uint8_t uIdtVector  = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
-
+                uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
                 if (   uExitVector == X86_XCPT_PF
                     && uIdtVector  == X86_XCPT_PF)
@@ -3194,5 +3265,6 @@
                {
                    enmReflect = SVMREFLECTXCPT_DF;
-               }
+                   Log4(("IDT: Pending vectoring #DF %#RX64 uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uExitVector));
+                }
                else if (uIdtVector == X86_XCPT_DF)
                    enmReflect = SVMREFLECTXCPT_TF;
@@ -3328,5 +3400,5 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     PVM pVM = pVCpu->CTX_SUFF(pVM);
-    rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
+    int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     if (RT_LIKELY(rc == VINF_SUCCESS))
         pCtx->rip += 2;     /* Hardcoded opcode, AMD-V doesn't give us this information. */
@@ -3347,5 +3419,5 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    int rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
+    int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
     if (RT_LIKELY(rc == VINF_SUCCESS))
         pCtx->rip += 3;     /* Hardcoded opcode, AMD-V doesn't give us this information. */
@@ -3366,5 +3438,5 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    int rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
+    int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
     if (RT_LIKELY(rc == VINF_SUCCESS))
         pCtx->rip += 2;     /* Hardcoded opcode, AMD-V doesn't give us this information. */
@@ -3385,4 +3457,5 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     Assert(!pVM->hm.s.fNestedPaging);
 
@@ -3414,5 +3487,5 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    int rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
+    int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
     if (RT_LIKELY(rc == VINF_SUCCESS))
         pCtx->rip += 3;     /* Hardcoded opcode, AMD-V doesn't give us this information. */
@@ -3433,5 +3506,6 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    int rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
+    VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
+    int rc = VBOXSTRICTRC_VAL(rc2);
     if (    rc == VINF_EM_HALT
         ||  rc == VINF_SUCCESS)
@@ -3475,5 +3549,6 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     /** @todo Decode Assist. */
-    int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
+    VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
+    int rc = VBOXSTRICTRC_VAL(rc2);
     Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
     Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
@@ -3490,6 +3565,7 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     /** @todo Decode Assist. */
-    int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
-    if (rc == VINF_SUCCCES)
+    VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
+    int rc = VBOXSTRICTRC_VAL(rc2);
+    if (rc == VINF_SUCCESS)
     {
         /* RIP has been updated by EMInterpretInstruction(). */
@@ -3502,5 +3578,5 @@
 
             case 3:     /* CR3. */
-                Assert(!pVM->hm.s.fNestedPaging);
+                Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
                 break;
@@ -3515,5 +3591,5 @@
 
             default:
-                AsserMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
+                AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
                                 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0));
                 break;
@@ -3533,5 +3609,6 @@
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    return hmR0SvmSetPendingXcptUD(pVCpu);
+    hmR0SvmSetPendingXcptUD(pVCpu);
+    return VINF_SUCCESS;
 }
 
@@ -3544,4 +3621,5 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
+    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
 
     int rc;
@@ -3564,5 +3642,5 @@
         }
 
-        rc = EMInterpretWrmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
+        rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
         AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
 
@@ -3574,5 +3652,5 @@
         /* MSR Read access. */
         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
-        int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
+        rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
         AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
     }
@@ -3609,14 +3687,15 @@
         /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
         PVM pVM = pVCpu->CTX_SUFF(pVM);
-        rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
+        int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
         AssertRC(rc);
         Assert(CPUMIsGuestDebugStateActive(pVCpu));
 
         STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
-        return VINF_SUCCESS;
+        return rc;
     }
 
     /** @todo Decode assist.  */
-    int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
+    VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
+    int rc = VBOXSTRICTRC_VAL(rc2);
     if (RT_LIKELY(rc == VINF_SUCCESS))
     {
@@ -3625,5 +3704,5 @@
     }
     else
-        Assert(c == VERR_EM_INTERPRETER);
+        Assert(rc == VERR_EM_INTERPRETER);
     return rc;
 }
@@ -3656,4 +3735,7 @@
                                                                                             the result (in AL/AX/EAX). */
 
+    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
+    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
+
     /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
     SVMIOIOEXIT IoExitInfo;
@@ -3683,12 +3765,14 @@
             if (IoExitInfo.n.u1Type == 0)   /* OUT */
             {
-                rc = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
-                                        (DISCPUMODE)pDis->uAddrMode, uIOSize);
+                VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
+                                                      (DISCPUMODE)pDis->uAddrMode, uIOSize);
+                rc = VBOXSTRICTRC_VAL(rc2);
                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
             }
             else
             {
-                rc = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
-                                       (DISCPUMODE)pDis->uAddrMode, uIOSize);
+                VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
+                                                     (DISCPUMODE)pDis->uAddrMode, uIOSize);
+                rc = VBOXSTRICTRC_VAL(rc2);
                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
             }
@@ -3704,5 +3788,6 @@
         if (IoExitInfo.n.u1Type == 0)   /* OUT */
         {
-            rc = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
+            VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
+            rc = VBOXSTRICTRC_VAL(rc2);
             if (rc == VINF_IOM_R3_IOPORT_WRITE)
                 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
@@ -3714,5 +3799,6 @@
             uint32_t u32Val = 0;
 
-            rc = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, uIOSize);
+            VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, uIOSize);
+            rc = VBOXSTRICTRC_VAL(rc2);
             if (IOM_SUCCESS(rc))
             {
@@ -3838,8 +3924,5 @@
             PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
             if (!pPatch)
-            {
-                rc = VINF_EM_HM_PATCH_TPR_INSTR;
-                return rc;
-            }
+                return VINF_EM_HM_PATCH_TPR_INSTR;
         }
     }
@@ -3864,5 +3947,7 @@
     if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
     {
-        rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, u32ErrCode);
+        VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
+                                                         u32ErrCode);
+        rc = VBOXSTRICTRC_VAL(rc2);
 
         /*
@@ -3913,4 +3998,5 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
 
+    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     pVmcb->ctrl.IntCtrl.n.u1VIrqValid  = 0;  /* No virtual interrupts pending, we'll inject the current one before reentry. */
     pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;
@@ -3920,5 +4006,5 @@
     pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
 
-    /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEvent() and resume guest execution. */
+    /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
     return VINF_SUCCESS;
@@ -3968,5 +4054,5 @@
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
 
-    int rc = hmR0SvmEmulateMovTpr(pVM, pVCpu, pCtx);
+    int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
     if (RT_UNLIKELY(rc != VINF_SUCCESS))
         hmR0SvmSetPendingXcptUD(pVCpu);
@@ -4011,4 +4097,5 @@
 #endif
 
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     Assert(!pVM->hm.s.fNestedPaging);
 
@@ -4028,7 +4115,7 @@
         /* Check if the page at the fault-address is the APIC base. */
         RTGCPHYS GCPhysPage;
-        rc = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
-        if (   rc == VINF_SUCCESS
-            && GCPhys == GCPhysApicBase)
+        int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
+        if (   rc2 == VINF_SUCCESS
+            && GCPhysPage == GCPhysApicBase)
         {
             /* Only attempt to patch the instruction once. */
@@ -4044,5 +4131,5 @@
 
     TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
-    rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
+    int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
 
     Log2(("#PF rc=%Rrc\n", rc));
@@ -4098,6 +4185,5 @@
 
     /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
+    int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
     if (rc == VINF_SUCCESS)
     {
@@ -4125,4 +4211,5 @@
     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
 
+    int rc;
     if (!(pCtx->cr0 & X86_CR0_NE))
     {
@@ -4140,2 +4227,16 @@
 }
 
+
+/**
+ * #VMEXIT handler for debug exception (SVM_EXIT_DB). Conditional #VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+
+    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
+
+    /* -XXX- todo!!*/
+    return VERR_NOT_IMPLEMENTED;
+}
+
