Index: /trunk/include/VBox/vmm/gim.h
===================================================================
--- /trunk/include/VBox/vmm/gim.h	(revision 55128)
+++ /trunk/include/VBox/vmm/gim.h	(revision 55129)
@@ -175,5 +175,5 @@
 VMM_INT_DECL(int)           GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx);
 VMM_INT_DECL(int)           GIMXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis);
-VMM_INT_DECL(bool)          GIMShouldTrapXcptUD(PVM pVM);
+VMM_INT_DECL(bool)          GIMShouldTrapXcptUD(PVMCPU pVCpu);
 VMM_INT_DECL(VBOXSTRICTRC)  GIMReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
 VMM_INT_DECL(VBOXSTRICTRC)  GIMWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue);
Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 55128)
+++ /trunk/include/VBox/vmm/hm.h	(revision 55129)
@@ -144,6 +144,6 @@
 VMM_INT_DECL(int)               HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
 VMM_INT_DECL(bool)              HMSetSingleInstruction(PVMCPU pVCpu, bool fEnable);
-VMM_INT_DECL(void)              HMHypercallsEnable(PVM pVM);
-VMM_INT_DECL(void)              HMHypercallsDisable(PVM pVM);
+VMM_INT_DECL(void)              HMHypercallsEnable(PVMCPU pVCpu);
+VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
 
 #ifndef IN_RC
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 55128)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 55129)
@@ -269,6 +269,6 @@
 VMM_INT_DECL(void)          VMMTrashVolatileXMMRegs(void);
 VMM_INT_DECL(int)           VMMPatchHypercall(PVM pVM, void *pvBuf, size_t cbBuf, size_t *pcbWritten);
-VMM_INT_DECL(void)          VMMHypercallsEnable(PVM pVM);
-VMM_INT_DECL(void)          VMMHypercallsDisable(PVM pVM);
+VMM_INT_DECL(void)          VMMHypercallsEnable(PVMCPU pVCpu);
+VMM_INT_DECL(void)          VMMHypercallsDisable(PVMCPU pVCpu);
 
 
Index: /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 55129)
@@ -149,8 +149,9 @@
  *
  * @returns true if needed, false otherwise.
- * @param   pVM         Pointer to the VM.
- */
-VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVM pVM)
-{
+ * @param   pVCpu       Pointer to the VMCPU.
+ */
+VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPU pVCpu)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     if (!GIMIsEnabled(pVM))
         return false;
@@ -159,5 +160,5 @@
     {
         case GIMPROVIDERID_KVM:
-            return gimKvmShouldTrapXcptUD(pVM);
+            return gimKvmShouldTrapXcptUD(pVCpu);
 
         default:
Index: /trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp	(revision 55129)
@@ -324,8 +324,9 @@
  * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
  *
- * @param   pVM         Pointer to the VM.
- */
-VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM)
-{
+ * @param   pVCpu       Pointer to the VMCPU.
+ */
+VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
     return pVM->gim.s.u.Kvm.fTrapXcptUD;
 }
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 55129)
@@ -510,9 +510,9 @@
  * Notifies HM that paravirtualized hypercalls are now enabled.
  *
- * @param   pVM     Pointer to the VM.
- */
-VMM_INT_DECL(void) HMHypercallsEnable(PVM pVM)
-{
-    pVM->hm.s.fHypercallsEnabled = true;
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu)
+{
+    pVCpu->hm.s.fHypercallsEnabled = true;
 }
 
@@ -521,9 +521,33 @@
  * Notifies HM that paravirtualized hypercalls are now disabled.
  *
- * @param   pVM     Pointer to the VM.
- */
-VMM_INT_DECL(void) HMHypercallsDisable(PVM pVM)
-{
-    pVM->hm.s.fHypercallsEnabled = false;
-}
-
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu)
+{
+    pVCpu->hm.s.fHypercallsEnabled = false;
+}
+
+
+/**
+ * Notifies HM that GIM provider wants to trap #UD.
+ *
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
+{
+    pVCpu->hm.s.fGIMTrapXcptUD = true;
+    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+}
+
+
+/**
+ * Notifies HM that GIM provider no longer wants to trap #UD.
+ *
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
+{
+    pVCpu->hm.s.fGIMTrapXcptUD = false;
+    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp	(revision 55129)
@@ -439,12 +439,12 @@
  * Notifies VMM that paravirtualized hypercalls are now enabled.
  *
- * @param   pVM     Pointer to the VM.
- */
-VMM_INT_DECL(void) VMMHypercallsEnable(PVM pVM)
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) VMMHypercallsEnable(PVMCPU pVCpu)
 {
     /* If there is anything to do for raw-mode, do it here. */
 #ifndef IN_RC
-    if (HMIsEnabled(pVM))
-        HMHypercallsEnable(pVM);
+    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
+        HMHypercallsEnable(pVCpu);
 #endif
 }
@@ -454,13 +454,13 @@
  * Notifies VMM that paravirtualized hypercalls are now disabled.
  *
- * @param   pVM     Pointer to the VM.
- */
-VMM_INT_DECL(void) VMMHypercallsDisable(PVM pVM)
+ * @param   pVCpu   Pointer to the VMCPU.
+ */
+VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
 {
     /* If there is anything to do for raw-mode, do it here. */
 #ifndef IN_RC
-    if (HMIsEnabled(pVM))
-        HMHypercallsDisable(pVM);
-#endif
-}
-
+    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
+        HMHypercallsDisable(pVCpu);
+#endif
+}
+
Index: /trunk/src/VBox/VMM/VMMR0/HMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 55129)
@@ -1214,6 +1214,4 @@
     pVM->hm.s.uMaxAsid                  = g_HvmR0.uMaxAsid;
 
-    pVM->hm.s.fGIMTrapXcptUD            = GIMShouldTrapXcptUD(pVM);
-
     if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
     {
@@ -1229,6 +1227,7 @@
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-        pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
-        pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
+        pVCpu->hm.s.idEnteredCpu   = NIL_RTCPUID;
+        pVCpu->hm.s.idLastCpu      = NIL_RTCPUID;
+        pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu);
 
         /* We'll aways increment this the first time (host uses ASID 0). */
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 55129)
@@ -670,5 +670,4 @@
     Assert(pVM->hm.s.svm.fSupported);
 
-    uint32_t const fGimXcptIntercepts = pVM->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
@@ -787,5 +786,6 @@
 
         /* Apply the exceptions intercepts needed by the GIM provider. */
-        pVmcb->ctrl.u32InterceptException |= fGimXcptIntercepts;
+        if (pVCpu->hm.s.fGIMTrapXcptUD)
+            pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD);
 
         /*
@@ -1634,4 +1634,27 @@
 
 /**
+ * Loads the exception interrupts required for guest execution in the VMCB.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVmcb       Pointer to the VM control block.
+ * @param   pCtx        Pointer to the guest-CPU context.
+ */
+static int hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
+{
+    int rc = VINF_SUCCESS;
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
+    {
+        if (pVCpu->hm.s.fGIMTrapXcptUD)
+            hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_UD);
+        else
+            hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_UD);
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+    }
+    return rc;
+}
+
+
+/**
  * Sets up the appropriate function to run guest code.
  *
@@ -1816,4 +1839,7 @@
     rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
     AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
+
+    rc = hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
+    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
 
     rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
@@ -5008,6 +5034,5 @@
     else if (rc == VERR_NOT_FOUND)
     {
-        PVM pVM = pVCpu->CTX_SUFF(pVM);
-        if (pVM->hm.s.fHypercallsEnabled)
+        if (pVCpu->hm.s.fHypercallsEnabled)
         {
             rc = GIMHypercall(pVCpu, pCtx);
@@ -5227,6 +5252,5 @@
     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
 
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    if (pVM->hm.s.fGIMTrapXcptUD)
+    if (pVCpu->hm.s.fGIMTrapXcptUD)
         GIMXcptUD(pVCpu, pCtx, NULL /* pDis */);
     else
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 55129)
@@ -2635,5 +2635,5 @@
     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
 
-    uint32_t u32XcptBitmap = 0;
+    uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
 
     /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
@@ -3554,4 +3554,40 @@
 
 /**
+ * Loads the exception intercepts required for guest execution in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
+ *                      out-of-sync. Make sure to update the required fields
+ *                      before using them.
+ */
+static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
+{
+    NOREF(pMixedCtx);
+    int rc = VINF_SUCCESS;
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
+    {
+        /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
+        if (pVCpu->hm.s.fGIMTrapXcptUD)
+            pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
+        else
+        {
+#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
+            pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
+#endif
+        }
+
+        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
+        AssertRCReturn(rc, rc);
+
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+        Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
+              pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
+    }
+    return rc;
+}
+
+
+/**
  * Loads the guest's RIP into the guest-state area in the VMCS.
  *
@@ -3779,4 +3815,5 @@
             pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
         }
+        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
 
         if (fInterceptNM)
@@ -3823,8 +3860,6 @@
         u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);          /* Always enable caching. */
 
-        /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
+        /* Write VT-x's view of the guest CR0 into the VMCS. */
         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
-        AssertRCReturn(rc, rc);
-        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
         AssertRCReturn(rc, rc);
         Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
@@ -4220,13 +4255,15 @@
     if (   fInterceptDB
         || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
+    {
         pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
+        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+    }
     else
     {
 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
         pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
-#endif
-    }
-    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
-    AssertRCReturn(rc, rc);
+        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
+#endif
+    }
 
     /*
@@ -8295,4 +8332,7 @@
     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
 
+    rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
+    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
+
     /*
      * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
@@ -8354,4 +8394,12 @@
 #endif
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
+    }
+
+    /* Loading CR0, debug state might have changed intercepts, update VMCS. */
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
+    {
+        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
+        AssertRC(rc);
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     }
 
@@ -10243,6 +10291,5 @@
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
 
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    if (pVM->hm.s.fHypercallsEnabled)
+    if (pVCpu->hm.s.fHypercallsEnabled)
     {
 #if 0
@@ -11405,6 +11452,5 @@
 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
             pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
-            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
-            AssertRCReturn(rc, rc);
+            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
 #endif
         }
Index: /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp	(revision 55129)
@@ -633,5 +633,6 @@
         GIMR3Mmio2Unmap(pVM, pRegion);
         Assert(!pRegion->fMapped);
-        VMMHypercallsDisable(pVM);
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
+            VMMHypercallsDisable(&pVM->aCpus[i]);
         LogRel(("GIM: HyperV: Disabled Hypercall-page\n"));
         return VINF_SUCCESS;
@@ -690,7 +691,8 @@
 
             /*
-             * Notify VMM that hypercalls are now enabled.
+             * Notify VMM that hypercalls are now enabled for all VCPUs.
              */
-            VMMHypercallsEnable(pVM);
+            for (VMCPUID i = 0; i < pVM->cCpus; i++)
+                VMMHypercallsEnable(&pVM->aCpus[i]);
 
             LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage));
Index: /trunk/src/VBox/VMM/VMMR3/GIMKvm.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/GIMKvm.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMR3/GIMKvm.cpp	(revision 55129)
@@ -143,7 +143,9 @@
 
     /*
-     * Setup #UD and hypercall behaviour.
-     */
-    VMMHypercallsEnable(pVM);
+     * Setup hypercall and #UD handling.
+     */
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+        VMMHypercallsEnable(&pVM->aCpus[i]);
+
     if (ASMIsAmdCpu())
     {
@@ -157,4 +159,5 @@
         pKvm->uOpCodeNative = OP_VMCALL;
     }
+
     /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */
     if (!HMIsEnabled(pVM))
Index: /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp	(revision 55128)
+++ /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp	(revision 55129)
@@ -610,5 +610,5 @@
             rc = EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR);
         }
-        else if (GIMShouldTrapXcptUD(pVM))
+        else if (GIMShouldTrapXcptUD(pVCpu))
         {
             LogFlow(("TRPMGCTrap06Handler: -> GIMXcptUD\n"));
Index: /trunk/src/VBox/VMM/include/GIMKvmInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/GIMKvmInternal.h	(revision 55128)
+++ /trunk/src/VBox/VMM/include/GIMKvmInternal.h	(revision 55129)
@@ -261,5 +261,5 @@
 VMM_INT_DECL(VBOXSTRICTRC)      gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
 VMM_INT_DECL(VBOXSTRICTRC)      gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
-VMM_INT_DECL(bool)              gimKvmShouldTrapXcptUD(PVM pVM);
+VMM_INT_DECL(bool)              gimKvmShouldTrapXcptUD(PVMCPU pVCpu);
 VMM_INT_DECL(int)               gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis);
 
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 55128)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 55129)
@@ -164,16 +164,17 @@
 #define HM_CHANGED_GUEST_EFER_MSR                RT_BIT(16)
 #define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(17)     /* Shared */
+#define HM_CHANGED_GUEST_XCPT_INTERCEPTS         RT_BIT(18)
 /* VT-x specific state. */
-#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(18)
-#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(19)
-#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(20)
-#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(21)
-#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(22)
+#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(19)
+#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(20)
+#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(21)
+#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(22)
+#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(23)
 /* AMD-V specific state. */
-#define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(18)
-#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(19)
-#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(20)
-#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(21)
-#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(22)
+#define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(19)
+#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(20)
+#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(21)
+#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(22)
+#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(23)
 
 #define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
@@ -195,4 +196,5 @@
                                                   | HM_CHANGED_GUEST_EFER_MSR           \
                                                   | HM_CHANGED_GUEST_LAZY_MSRS          \
+                                                  | HM_CHANGED_GUEST_XCPT_INTERCEPTS    \
                                                   | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
                                                   | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
@@ -201,5 +203,5 @@
                                                   | HM_CHANGED_VMX_EXIT_CTLS)
 
-#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(23)
+#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(24)
 
 /* Bits shared between host and guest. */
@@ -343,9 +345,5 @@
     /** Set when TPR patching is active. */
     bool                        fTPRPatchingActive;
-    /** Whether #UD needs to be intercepted (required by certain GIM providers). */
-    bool                        fGIMTrapXcptUD;
-    /** Whether paravirt. hypercalls are enabled. */
-    bool                        fHypercallsEnabled;
-    bool                        u8Alignment[1];
+    bool                        u8Alignment[3];
 
     /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
@@ -584,4 +582,10 @@
     /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
     bool                        fPreloadGuestFpu;
+
+    /** Whether #UD needs to be intercepted (required by certain GIM providers). */
+    bool                        fGIMTrapXcptUD;
+    /** Whether paravirt. hypercalls are enabled. */
+    bool                        fHypercallsEnabled;
+    uint8_t                     u8Alignment0[6];
 
     /** World switch exit counter. */
