Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 68433)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 68434)
@@ -350,5 +350,5 @@
     {
         PSVMVMCBCTRL      pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
-        PSVMVMCBSTATESAVE pVmcbNstGstState =&pVmcbNstGst->guest;
+        PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
         pVmcbNstGstCtrl->u16InterceptRdCRx        = pNstGstVmcbCache->u16InterceptRdCRx;
         pVmcbNstGstCtrl->u16InterceptWrCRx        = pNstGstVmcbCache->u16InterceptWrCRx;
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 68433)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 68434)
@@ -1374,4 +1374,5 @@
         AssertRC(rc);
         pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
+        Log4(("hmR0SvmLoadGuestControlRegsNested: CR3=%#RX64 to HC phys CR3=%#RHp\n", pCtx->cr3, pVmcbNstGst->guest.u64CR3));
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
     }
@@ -1795,22 +1796,31 @@
  * well and handle it accordingly.
  *
+ * @param   pVCpu       The cross context virtual CPU structure.
  * @param   pVmcb           Pointer to the VM control block.
  * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
  */
-static void hmR0SvmMergeIntercepts(PCSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
-{
-    pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
-    pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
-
-    /** @todo Figure out debugging with nested-guests, till then just intercept
-     *        all DR[0-15] accesses. */
-    pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
-    pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
-
-    pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
-    pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
-                                        |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
-
-    Assert((pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
+static void hmR0SvmLoadGuestXcptInterceptsNested(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
+{
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
+    {
+        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
+
+        pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
+        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
+
+        /** @todo Figure out debugging with nested-guests, till then just intercept
+         *        all DR[0-15] accesses. */
+        pVmcbNstGst->ctrl.u16InterceptRdDRx |= 0xffff;
+        pVmcbNstGst->ctrl.u16InterceptWrDRx |= 0xffff;
+
+        pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
+        pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
+                                            |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
+
+        Assert(   (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
+               == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
+
+        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
+    }
 }
 #endif
@@ -2029,6 +2039,6 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT
 /**
- * Caches the nested-guest VMCB fields before we modify them for executing the
- * nested-guest under SVM R0.
+ * Caches the nested-guest VMCB fields before we modify them for execution using
+ * hardware-assisted SVM.
  *
  * @param   pCtx            Pointer to the guest-CPU context.
@@ -2039,6 +2049,6 @@
 {
     PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
-    PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
-    PSVMVMCBSTATESAVE   pVmcbNstGstState = &pVmcbNstGst->guest;
+    PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
+    PCSVMVMCBSTATESAVE  pVmcbNstGstState = &pVmcbNstGst->guest;
     PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
 
@@ -2060,40 +2070,50 @@
 
 /**
- * Sets up the nested-guest for hardware-assisted SVM execution.
+ * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
  *
  * @param   pVCpu           The cross context virtual CPU structure.
  * @param   pCtx            Pointer to the guest-CPU context.
  */
+static void hmR0SvmVmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
+{
+    RT_NOREF(pVCpu);
+    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
+    PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
+
+    /*
+     * First cache the nested-guest VMCB fields we may potentially modify.
+     */
+    hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
+
+    /*
+     * The IOPM of the nested-guest can be ignored because the the guest always
+     * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
+     * into the nested-guest one and swap it back on the #VMEXIT.
+     */
+    pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
+
+    /*
+     * Load the host-physical address into the MSRPM rather than the nested-guest
+     * physical address (currently we trap all MSRs in the nested-guest).
+     */
+    pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
+}
+
+
+/**
+ * Sets up the nested-guest for hardware-assisted SVM execution.
+ *
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   pCtx            Pointer to the guest-CPU context.
+ *
+ * @remarks This must be called only after the guest exceptions are up to date as
+ *          otherwise we risk overwriting the guest exceptions with the nested-guest
+ *          exceptions.
+ */
 static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
 {
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST))
     {
-        /*
-         * Cache the nested-guest VMCB fields before we start modifying them below.
-         */
-        hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
-
-        PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
-        PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
-
-        /*
-         * The IOPM of the nested-guest can be ignored because the the guest always
-         * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
-         * into the nested-guest one and swap it back on the #VMEXIT.
-         */
-        pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
-
-        /*
-         * Load the host-physical address into the MSRPM rather than the nested-guest
-         * physical address.
-         */
-        pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
-
-        /*
-         * Merge the guest exception intercepts in to the nested-guest ones.
-         */
-        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
-        hmR0SvmMergeIntercepts(pVmcb, pVmcbNstGst);
-
+        hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST);
     }
@@ -2114,13 +2134,4 @@
     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
 
-    /*
-     * Load guest intercepts first into the guest VMCB as later we may merge
-     * them into the nested-guest VMCB further below.
-     */
-    {
-        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
-        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
-    }
-
     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     Assert(pVmcbNstGst);
@@ -2131,8 +2142,7 @@
     if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0)
     {
-        /* hmR0SvmLoadGuestVmcbNested needs to be called first which caches the VMCB fields and adjusts others. */
+        /* First, we need to setup the nested-guest VMCB for hardware-assisted SVM execution. */
         hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
 
-        hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
         hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
         hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
@@ -2144,5 +2154,9 @@
     }
 
+    hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
     hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
+
+    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
+    hmR0SvmLoadGuestXcptInterceptsNested(pVCpu, pVmcb, pVmcbNstGst);
 
     int rc = hmR0SvmSetupVMRunHandler(pVCpu);
@@ -5804,5 +5818,5 @@
 /**
  * Performs a \#VMEXIT when the VMRUN was emulating using hmR0SvmExecVmrun and
- * optionally then through SVM R0 execution.
+ * optionally went ahead with hardware-assisted SVM execution.
  *
  * @returns VBox status code.
@@ -5814,5 +5828,5 @@
     /*
      * Restore the modifications we did to the nested-guest VMCB in order
-     * to execute the nested-guest in SVM R0.
+     * to executing the nested-guesting using hardware-assisted SVM.
      */
     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
@@ -5878,5 +5892,5 @@
 
 /**
- * Setup execution of the nested-guest in SVM R0.
+ * Setup the nested-guest for hardware-assisted SVM execution.
  *
  * @returns VBox status code.
@@ -6001,9 +6015,4 @@
         if (fLongModeWithPaging)
             uValidEfer |= MSR_K6_EFER_LMA;
-
-        /*
-         * Set up the nested-guest for executing it using hardware-assisted SVM.
-         */
-        hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
 
         /*
@@ -6074,4 +6083,9 @@
 
         /*
+         * Set up the nested-guest for executing it using hardware-assisted SVM.
+         */
+        hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
+
+        /*
          * VMRUN loads a subset of the guest-CPU state (see above) and nothing else. Ensure
          * hmR0SvmLoadGuestStateNested doesn't need to load anything back to the VMCB cache
@@ -6086,6 +6100,11 @@
         PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
         pNstGstVmcbCache->fVmrunEmulatedInR0 = true;
+
+        /*
+         * We flag a CR3 change to ensure loading the host-physical address of CR3 into
+         * the nested-guest VMCB in hmR0SvmLoadGuestControlRegsNested.
+         */
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ALL_GUEST);
-        HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE);
+        HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE | HM_CHANGED_GUEST_CR3);
 
         /*
