Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 54074)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 54075)
@@ -148,7 +148,8 @@
  * Exception bitmap mask for real-mode guests (real-on-v86).
  *
- * We need to intercept all exceptions manually (except #PF). #NM is also
- * handled separately, see hmR0VmxLoadSharedCR0(). #PF need not be intercepted
- * even in real-mode if we have Nested Paging support.
+ * We need to intercept all exceptions manually except:
+ * - #NM, #MF handled separately, see hmR0VmxLoadSharedCR0().
+ * - #PF need not be intercepted even in real-mode if we have Nested Paging
+ * support.
  */
 #define HMVMX_REAL_MODE_XCPT_MASK    (  RT_BIT(X86_XCPT_DE)             | RT_BIT(X86_XCPT_DB)    | RT_BIT(X86_XCPT_NMI)   \
@@ -157,5 +158,5 @@
                                       | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS)    | RT_BIT(X86_XCPT_NP)    \
                                       | RT_BIT(X86_XCPT_SS)             | RT_BIT(X86_XCPT_GP)   /* RT_BIT(X86_XCPT_PF) */ \
-                                      | RT_BIT(X86_XCPT_MF)             | RT_BIT(X86_XCPT_AC)    | RT_BIT(X86_XCPT_MC)    \
+                                     /* RT_BIT(X86_XCPT_MF) */          | RT_BIT(X86_XCPT_AC)    | RT_BIT(X86_XCPT_MC)    \
                                       | RT_BIT(X86_XCPT_XF))
 
@@ -2621,6 +2622,5 @@
 
 /**
- * Sets up the initial exception bitmap in the VMCS based on static conditions
- * (i.e. conditions that cannot ever change after starting the VM).
+ * Sets up the initial exception bitmap in the VMCS based on static conditions.
  *
  * @returns VBox status code.
@@ -3773,6 +3773,4 @@
             fInterceptMF = true;
         }
-        else
-            pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
 
         if (fInterceptNM)
@@ -8214,9 +8212,11 @@
 
 /**
- * Loads the guest state into the VMCS guest-state area. The CPU state will be
- * loaded from these fields on every successful VM-entry.
- *
- * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
- * Sets up the VM-entry controls.
+ * Loads the guest state into the VMCS guest-state area.
+ *
+ * The will typically be done before VM-entry when the guest-CPU state and the
+ * VMCS state may potentially be out of sync.
+ *
+ * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
+ * VM-entry controls.
  * Sets up the appropriate VMX non-root function to execute guest code based on
  * the guest CPU mode.
@@ -10979,4 +10979,13 @@
         AssertRCReturn(rc2, rc2);
     }
+    else if (rc == VINF_PGM_CHANGE_MODE)
+    {
+        /*
+         * Clear the exception-mask here rather than messing with it in hmR0VmxLoadSharedCR0(). Since the fRealOnV86Active
+         * state may be changed now. Re-evaluate the necessary intercepts when we return to VT-x execution via
+         * hmR0VmxLoadSharedCR0() and hmR0VmxLoadSharedDebugState(), see @bugref{7626}.
+         */
+        hmR0VmxInitXcptBitmap(pVM, pVCpu);
+    }
 
     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
