Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 46511)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 46512)
@@ -71,5 +71,5 @@
  */
 /** Invalid guest state in VMCB. */
-#define SVM_EXIT_INVALID                -1
+#define SVM_EXIT_INVALID                (-1)
 /** Read from CR0-CR15. */
 #define SVM_EXIT_READ_CR0               0x0
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46511)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 46512)
@@ -29,4 +29,15 @@
 *   Defined Constants And Macros                                               *
 *******************************************************************************/
+#ifdef VBOX_WITH_STATISTICS
+# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
+        if ((u64ExitCode) == SVM_EXIT_NPF) \
+            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
+        else \
+            STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
+        } while (0)
+#else
+# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
+#endif
+
 /** @name Segment attribute conversion between CPU and AMD-V VMCB format.
  *
@@ -129,5 +140,5 @@
 #endif
 
-    /** The #VMEXIT exit code. */
+    /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
     uint64_t        u64ExitCode;
 } SVMTRANSIENT, *PSVMTRANSIENT;
@@ -144,5 +155,5 @@
     /** Reading this MSR does not cause a VM-exit. */
     SVMMSREXIT_PASSTHRU_READ
-} VMXMSREXITREAD;
+} SVMMSREXITREAD;
 
 /**
@@ -155,5 +166,5 @@
     /** Writing to this MSR does not cause a VM-exit. */
     SVMMSREXIT_PASSTHRU_WRITE
-} VMXMSREXITWRITE;
+} SVMMSREXITWRITE;
 
 
@@ -162,4 +173,6 @@
 *******************************************************************************/
 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
+
+DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
 
 
@@ -489,5 +502,5 @@
         ASMBitClear(pbMsrBitmap, ulBit + 1);
 
-    pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
+    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
 }
 
@@ -582,5 +595,5 @@
 
         /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */
-        pVmcb->u64VmcbCleanBits = 0;
+        pVmcb->ctrl.u64VmcbCleanBits = 0;
 
         /* The guest ASID MBNZ, set it to 1. The host uses 0. */
@@ -749,5 +762,5 @@
     {
         pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
     }
 
@@ -852,5 +865,5 @@
     {
         pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     }
 }
@@ -858,9 +871,9 @@
 DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt)
 {
-#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
+#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
     if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
     {
         pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     }
 #endif
@@ -935,5 +948,5 @@
 
         pVmcb->guest.u64CR0 = u64GuestCR0;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
     }
@@ -945,5 +958,5 @@
     {
         pVmcb->guest.u64CR2 = pCtx->cr2;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
     }
@@ -965,5 +978,5 @@
 
             pVmcb->ctrl.u64NestedPagingCR3  = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
-            pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
+            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
             Assert(pVmcb->ctrl.u64NestedPagingCR3);
             pVmcb->guest.u64CR3 = pCtx->cr3;
@@ -972,5 +985,5 @@
             pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
 
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
         pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3;
     }
@@ -1017,5 +1030,5 @@
 
         pVmcb->guest.u64CR4 = u64GuestCR4;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
     }
@@ -1045,5 +1058,5 @@
         HMSVM_LOAD_SEG_REG(GS, cs);
 
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
     }
@@ -1068,5 +1081,5 @@
         pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
         pVmcb->guest.GDTR.u64Base  = pCtx->gdtr.pGdt;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
     }
@@ -1077,5 +1090,5 @@
         pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
         pVmcb->guest.IDTR.u64Base  = pCtx->idtr.pIdt;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
     }
@@ -1151,5 +1164,5 @@
     pVmcb->guest.u64DR7 = pCtx->dr[7];
     pVmcb->guest.u64DR6 = pCtx->dr[6];
-    pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
+    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
 
     bool fInterceptDB     = false;
@@ -1172,5 +1185,5 @@
             pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
             pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);
-            pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
+            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
         }
         Assert(CPUMIsHyperDebugStateActive(pVCpu));
@@ -1206,5 +1219,5 @@
             pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
             pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
-            pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
         }
     }
@@ -1216,5 +1229,5 @@
             pVmcb->ctrl.u16InterceptRdDRx = 0;
             pVmcb->ctrl.u16InterceptWrDRx = 0;
-            pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
         }
     }
@@ -1290,13 +1303,10 @@
 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 {
-    AssertPtr(pVCpu);
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     NOREF(pVM);
+    NOREF(pVCpu);
     NOREF(pCtx);
 
-    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
-
-    /** -xxx- todo. */
-
+    /* Nothing to do here. Everything is taken care of in hmR0SvmLongJmpToRing3(). */
     return VINF_SUCCESS;
 }
@@ -1399,6 +1409,10 @@
 
     /*
-     * Save all the MSRs that can be changed by the guest without causing a world switch.
-     * FS & GS base are saved with HMSVM_SAVE_SEG_REG.
+     * Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
+     */
+    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
+
+    /*
+     * Guest MSRs.
      */
     pMixedCtx->msrSTAR         = pVmcb->guest.u64STAR;            /* legacy syscall eip, cs & ss */
@@ -1411,8 +1425,7 @@
     pMixedCtx->SysEnter.esp    = pVmcb->guest.u64SysEnterESP;
 
-    /* Can be updated behind our back in the nested paging case. */
-    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
-
-    /* Segment registers:  CS, SS, DS, ES, FS, GS. */
+    /*
+     * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
+     */
     HMSVM_SAVE_SEG_REG(CS, ss);
     HMSVM_SAVE_SEG_REG(SS, cs);
@@ -1461,9 +1474,8 @@
 
     /*
-     * Descriptor Table Registers: TR, IDTR, GDTR, LDTR.
+     * Guest Descriptor-Table registers.
      */
     HMSVM_SAVE_SEG_REG(TR, tr);
     HMSVM_SAVE_SEG_REG(LDTR, ldtr);
-
     pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
     pMixedCtx->gdtr.pGdt  = pVmcb->guest.GDTR.u64Base;
@@ -1473,5 +1485,5 @@
 
     /*
-     * Debug registers.
+     * Guest Debug registers.
      */
     pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
@@ -1480,4 +1492,5 @@
     /*
      * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
+     * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
      */
     if (   pVM->hm.s.fNestedPaging
@@ -1485,5 +1498,5 @@
     {
         CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
-        PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);       /* This may longjmp to ring-3 hence done at the very end. */
+        PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
     }
 }
@@ -1506,5 +1519,5 @@
     Assert(VMMR0IsLogFlushDisabled(pVCpu));
 
-    /* Restore FPU state if necessary and resync on next R0 reentry .*/
+    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
     if (CPUMIsGuestFPUStateActive(pVCpu))
     {
@@ -1514,5 +1527,5 @@
     }
 
-    /* Restore debug registers if necessary and resync on next R0 reentry. */
+    /* Restore host debug registers if necessary and resync on next R0 reentry. */
     if (CPUMIsGuestDebugStateActive(pVCpu))
     {
@@ -1531,4 +1544,33 @@
     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
+}
+
+
+/**
+ * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
+ * any remaining host state) before we longjump to ring-3 and possibly get
+ * preempted.
+ *
+ * @param   pVCpu           Pointer to the VMCPU.
+ * @param   enmOperation    The operation causing the ring-3 longjump.
+ * @param   pvUser          The user argument (pointer to the possibly
+ *                          out-of-date guest-CPU context).
+ *
+ * @remarks Must never be called with @a enmOperation ==
+ *          VMMCALLRING3_VM_R0_ASSERTION.
+ */
+DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
+{
+    /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
+    Assert(pVCpu);
+    Assert(pvUser);
+    Assert(VMMRZCallRing3IsEnabled(pVCpu));
+    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+
+    VMMRZCallRing3Disable(pVCpu);
+    Assert(VMMR0IsLogFlushDisabled(pVCpu));
+    Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
+    hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
+    VMMRZCallRing3Enable(pVCpu);
 }
 
@@ -1626,5 +1668,5 @@
     }
 
-    pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
 }
 
@@ -1706,5 +1748,4 @@
 
     /* Refer AMD spec. 15.20 "Event Injection" for the format. */
-    uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
     if (enmTrpmEvent == TRPM_TRAP)
     {
@@ -1752,5 +1793,5 @@
  * @param   pvCpu           Pointer to the VMCPU.
  */
-static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
+static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
 {
     Assert(pVCpu->hm.s.Event.fPending);
@@ -1796,5 +1837,5 @@
     else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
     {
-        AssertMsg(   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
+        AssertMsg(   uVectorType == SVM_EVENT_SOFTWARE_INT
                   || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
                   ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
@@ -1853,5 +1894,5 @@
         pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;     /* Not necessary as we #VMEXIT for delivering the interrupt. */
         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
-        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     }
 }
@@ -1958,8 +1999,11 @@
 {
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
+
     if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
     {
         HMDumpRegs(pVM, pVCpu, pCtx);
 #ifdef VBOX_STRICT
+        Log4(("ctrl.u64VmcbCleanBits             %#RX64\n",   pVmcb->ctrl.u64VmcbCleanBits));
         Log4(("ctrl.u16InterceptRdCRx            %#x\n",      pVmcb->ctrl.u16InterceptRdCRx));
         Log4(("ctrl.u16InterceptWrCRx            %#x\n",      pVmcb->ctrl.u16InterceptWrCRx));
@@ -2193,5 +2237,5 @@
 {
     /* Check force flag actions that might require us to go back to ring-3. */
-    int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pCtx);
+    int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     if (rc != VINF_SUCCESS)
         return rc;
@@ -2341,5 +2385,5 @@
 
     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
-    pVmcb->u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;             /* Mark the VMCB-state cache as unmodified by VMM. */
+    pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;             /* Mark the VMCB-state cache as unmodified by VMM. */
 
     /* Restore host's TSC_AUX if required. */
@@ -2361,5 +2405,5 @@
 
     ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
-    VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
+    VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pMixedCtx);
     VMMRZCallRing3Enable(pVCpu);                                /* It is now safe to do longjmps to ring-3!!! */
 
@@ -2430,9 +2474,7 @@
 
         /* Handle the #VMEXIT. */
-        AssertMsg(SvmTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
-        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
-
-        /* -xxx- todo. */
-
+        AssertMsg(SvmTransient.u64ExitCode != SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode));
+        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
+        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
         if (rc != VINF_SUCCESS)
             break;
@@ -2453,2 +2495,53 @@
 }
 
+
+/**
+ * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
+ *
+ * @returns VBox status code (informational status codes included).
+ * @param   pVCpu           Pointer to the VMCPU.
+ * @param   pCtx            Pointer to the guest-CPU context.
+ * @param   pSvmTransient   Pointer to the SVM transient structure.
+ */
+DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient)
+{
+    int rc;
+    uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
+    switch (u32ExitCode)
+    {
+
+    }
+    return rc;
+
+}
+
+#ifdef DEBUG
+/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
+# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
+    RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
+
+# define HMSVM_ASSERT_PREEMPT_CPUID() \
+   do \
+   { \
+        RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
+        AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
+   } while (0)
+
+# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
+            do { \
+                AssertPtr(pVCpu); \
+                AssertPtr(pMixedCtx); \
+                AssertPtr(pSvmTransient); \
+                Assert(ASMIntAreEnabled()); \
+                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
+                HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
+                Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
+                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
+                if (VMMR0IsLogFlushDisabled(pVCpu)) \
+                    HMSVM_ASSERT_PREEMPT_CPUID(); \
+                HMSVM_STOP_EXIT_DISPATCH_PROF(); \
+            } while (0)
+#else   /* Release builds */
+# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
+#endif
+
