Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 51219)
+++ /trunk/include/VBox/err.h	(revision 51220)
@@ -1941,11 +1941,11 @@
 /** Internal VMX processing error no 1. */
 #define VERR_HMVMX_IPE_1                            (-4023)
-/** Internal VMX processing error no 1. */
+/** Internal VMX processing error no 2. */
 #define VERR_HMVMX_IPE_2                            (-4024)
-/** Internal VMX processing error no 1. */
+/** Internal VMX processing error no 3. */
 #define VERR_HMVMX_IPE_3                            (-4025)
-/** Internal VMX processing error no 1. */
+/** Internal VMX processing error no 4. */
 #define VERR_HMVMX_IPE_4                            (-4026)
-/** Internal VMX processing error no 1. */
+/** Internal VMX processing error no 5. */
 #define VERR_HMVMX_IPE_5                            (-4027)
 /** VT-x features for all modes (SMX and non-SMX) disabled by the BIOS. */
@@ -1953,4 +1953,16 @@
 /** VT-x features disabled by the BIOS. */
 #define VERR_VMX_MSR_VMXON_DISABLED                 (-4029)
+/** VM-Entry Controls internal cache invalid. */
+#define VERR_VMX_ENTRY_CTLS_CACHE_INVALID           (-4030)
+/** VM-Exit Controls internal cache invalid. */
+#define VERR_VMX_EXIT_CTLS_CACHE_INVALID            (-4031)
+/** VM-Execution Pin-based Controls internal cache invalid. */
+#define VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID        (-4032)
+/** VM-Execution Primary Processor-based Controls internal cache
+ *  invalid. */
+#define VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID       (-4033)
+/** VM-Execution Secondary Processor-based Controls internal
+ *  cache invalid. */
+#define VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID      (-4034)
 /** @} */
 
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 51219)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 51220)
@@ -1359,9 +1359,9 @@
      * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
      */
-    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR))
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
     {
         pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
-        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
     }
 
@@ -4433,5 +4433,5 @@
         }
         else if (pCtx->ecx == MSR_K6_EFER)
-            HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
+            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
         else if (pCtx->ecx == MSR_IA32_TSC)
             pSvmTransient->fUpdateTscOffsetting = true;
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 51219)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 51220)
@@ -1556,5 +1556,68 @@
 
 
+/**
+ * Verifies that our cached values of the VMCS controls are all
+ * consistent with what's actually present in the VMCS.
+ *
+ * @returns VBox status code.
+ * @param pVCpu     Pointer to the VMCPU.
+ */
+static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
+{
+    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+
+    uint32_t u32Val;
+    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
+    AssertRCReturn(rc, rc);
+    AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
+                    VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
+
+    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
+    AssertRCReturn(rc, rc);
+    AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
+                    VERR_VMX_EXIT_CTLS_CACHE_INVALID);
+
+    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
+    AssertRCReturn(rc, rc);
+    AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
+                    VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
+
+    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
+    AssertRCReturn(rc, rc);
+    AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
+                    VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
+
+    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
+    AssertRCReturn(rc, rc);
+    AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
+                    VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
+
+    return VINF_SUCCESS;
+}
+
+
 #ifdef VBOX_STRICT
+/**
+ * Verifies that our cached host EFER value has not changed
+ * since we cached it.
+ *
+ * @param pVCpu         Pointer to the VMCPU.
+ */
+static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
+{
+    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+
+    if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
+    {
+        uint64_t u64Val;
+        int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
+        AssertRC(rc);
+
+        uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
+        AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
+    }
+}
+
+
 /**
  * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
@@ -2660,4 +2723,16 @@
         return rc;
     }
+
+    /* Check if we can use the VMCS controls for swapping the EFER MSR. */
+    Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
+#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+    if (   HMVMX_IS_64BIT_HOST_MODE()
+        && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
+        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
+        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
+    {
+        pVM->hm.s.vmx.fSupportsVmcsEfer = true;
+    }
+#endif
 
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
@@ -3079,7 +3154,70 @@
     AssertRCReturn(rc, rc);
 
-    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
-     *        hmR0VmxSetupExitCtls() !! */
+    /*
+     * If the CPU supports the newer VMCS controls for managing EFER, use it.
+     */
+#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+    if (   HMVMX_IS_64BIT_HOST_MODE()
+        && pVM->hm.s.vmx.fSupportsVmcsEfer)
+    {
+        rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
+        AssertRCReturn(rc, rc);
+    }
+#endif
+
+    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
+     *        hmR0VmxLoadGuestExitCtls() !! */
+
     return rc;
+}
+
+
+/**
+ * Figures out if we need to swap the EFER MSR which is
+ * particularly expensive.
+ *
+ * We check all relevant bits. For now, that's everything
+ * besides LMA/LME, as these two bits are handled by VM-entry,
+ * see hmR0VmxLoadGuestExitCtls() and
+ * hmR0VMxLoadGuestEntryCtls().
+ *
+ * @returns true if we need to load guest EFER, false otherwise.
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
+ *                      out-of-sync. Make sure to update the required fields
+ *                      before using them.
+ *
+ * @remarks Requires EFER, CR4.
+ * @remarks No-long-jump zone!!!
+ */
+static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
+{
+    PVM      pVM          = pVCpu->CTX_SUFF(pVM);
+    uint64_t u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
+    uint64_t u64GuestEfer = pMixedCtx->msrEFER;
+
+    /*
+     * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
+     * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
+     */
+    if (   pVM->hm.s.fAllow64BitGuests
+        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
+    {
+        return true;
+    }
+
+    /*
+     * If the guest uses PAE and EFER.NXE bit differs, we need to swap as it affects guest paging.
+     * 64-bit paging implies CR4.PAE as well. See Intel spec. 4.5 "IA32e Paging".
+     */
+    if (   (pMixedCtx->cr4 & X86_CR4_PAE)
+        && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
+    {
+        return true;
+    }
+
+    /** @todo Check the latest Intel spec. for any other bits,
+     *        like SMEP/SMAP? */
+    return false;
 }
 
@@ -3096,4 +3234,5 @@
  *                      before using them.
  *
+ * @remarks Requires EFER.
  * @remarks No-long-jump zone!!!
  */
@@ -3112,7 +3251,21 @@
         /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
         if (CPUMIsGuestInLongModeEx(pMixedCtx))
+        {
             val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
+            Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
+        }
         else
             Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
+
+        /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
+#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+        if (   HMVMX_IS_64BIT_HOST_MODE()
+            && pVM->hm.s.vmx.fSupportsVmcsEfer
+            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
+        {
+            val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
+            Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
+        }
+#endif
 
         /*
@@ -3123,6 +3276,5 @@
 
         /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
-         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
-         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
+         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
 
         if ((val & zap) != val)
@@ -3155,5 +3307,5 @@
  *                      before using them.
  *
- * @remarks requires EFER.
+ * @remarks Requires EFER.
  */
 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
@@ -3177,10 +3329,25 @@
 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
         if (HMVMX_IS_64BIT_HOST_MODE())
+        {
             val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
+            Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
+
+            /* If the newer VMCS fields for managing EFER exists, use it. */
+            if (   pVM->hm.s.vmx.fSupportsVmcsEfer
+                && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
+            {
+                val |=   VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
+                       | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
+            }
+        }
         else
             Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
         if (CPUMIsGuestInLongModeEx(pMixedCtx))
-            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;    /* The switcher goes to long mode. */
+        {
+            /* The switcher returns to long mode, EFER is managed by the switcher. */
+            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
+            Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
+        }
         else
             Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
@@ -3192,7 +3359,5 @@
         /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
          *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
-         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
-         *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
-         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
+         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
 
         if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
@@ -4530,4 +4695,19 @@
     }
 
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
+    {
+#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
+        if (   HMVMX_IS_64BIT_HOST_MODE()
+            && pVM->hm.s.vmx.fSupportsVmcsEfer
+            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))  /* Not really needed here, but avoids a VM-write as a nested guest. */
+        {
+            int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
+            AssertRCReturn(rc,rc);
+            Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
+        }
+#endif
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
+    }
+
     return VINF_SUCCESS;
 }
@@ -4587,6 +4767,8 @@
             {
                 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
-                AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS),
-                          ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
+                AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_HOST_CONTEXT
+                                                 | HM_CHANGED_VMX_EXIT_CTLS
+                                                 | HM_CHANGED_VMX_ENTRY_CTLS
+                                                 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
             }
             pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
@@ -4606,6 +4788,8 @@
             {
                 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
-                AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS),
-                          ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
+                AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_HOST_CONTEXT
+                                                 | HM_CHANGED_VMX_EXIT_CTLS
+                                                 | HM_CHANGED_VMX_ENTRY_CTLS
+                                                 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
             }
             pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
@@ -7892,4 +8076,6 @@
     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
 
+    /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
+       determine we don't have to swap EFER after all. */
     rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
@@ -8003,10 +8189,4 @@
               ||  HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
-
-#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
-    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
-    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
-        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
-#endif
 }
 
@@ -8282,6 +8462,14 @@
         }
     }
+
 #ifdef VBOX_STRICT
     hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
+    hmR0VmxCheckHostEferMsr(pVCpu);
+    AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
+#endif
+#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
+    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
+    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
+        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
 #endif
 }
@@ -8342,4 +8530,7 @@
 #endif
     pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
+#ifdef VBOX_STRICT
+    hmR0VmxCheckHostEferMsr(pVCpu);                                   /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
+#endif
     ASMSetFlags(pVmxTransient->uEflags);                              /* Enable interrupts. */
     VMMRZCallRing3Enable(pVCpu);                                      /* It is now safe to do longjmps to ring-3!!! */
@@ -8763,4 +8954,7 @@
  * @param   pVCpu   Pointer to the VMCPU.
  * @param   pCtx    Pointer to the guest-CPU state.
+ *
+ * @remarks This function assumes our cache of the VMCS controls
+ *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
  */
 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
@@ -8964,12 +9158,14 @@
         if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
         {
+            Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
             AssertRCBreak(rc);
             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
                               VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
-            HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
+            HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
                               VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
             HMVMX_CHECK_BREAK(   fUnrestrictedGuest
-                              || (u64Val & MSR_K6_EFER_LMA) == (u32GuestCR0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH);
+                              || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u32GuestCR0 & X86_CR0_PG),
+                              VMX_IGS_EFER_LMA_PG_MISMATCH);
         }
 
@@ -9958,4 +10154,7 @@
     AssertRCReturn(rc, rc);
 
+    rc = hmR0VmxCheckVmcsCtls(pVCpu);
+    AssertRCReturn(rc, rc);
+
     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
     NOREF(uInvalidReason);
@@ -10163,4 +10362,13 @@
         else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
+        else if (pMixedCtx->ecx == MSR_K6_EFER)
+        {
+            /*
+             * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
+             * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
+             * the other bits as well, SCE and NXE. See @bugref{7368}.
+             */
+            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
+        }
 
         /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 51219)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 51220)
@@ -352,10 +352,11 @@
      * Misc initialisation.
      */
-    //pVM->hm.s.vmx.fSupported = false;
-    //pVM->hm.s.svm.fSupported = false;
-    //pVM->hm.s.vmx.fEnabled   = false;
-    //pVM->hm.s.svm.fEnabled   = false;
-    //pVM->hm.s.fNestedPaging  = false;
-
+#if 0
+    pVM->hm.s.vmx.fSupported = false;
+    pVM->hm.s.svm.fSupported = false;
+    pVM->hm.s.vmx.fEnabled   = false;
+    pVM->hm.s.svm.fEnabled   = false;
+    pVM->hm.s.fNestedPaging  = false;
+#endif
 
     /*
@@ -1260,4 +1261,5 @@
     }
 
+    LogRel(("HM: Supports VMCS EFER fields       = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
     LogRel(("HM: VMX enabled!\n"));
     pVM->hm.s.vmx.fEnabled = true;
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 51219)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 51220)
@@ -148,17 +148,18 @@
 #define HM_CHANGED_GUEST_SYSENTER_EIP_MSR        RT_BIT(14)
 #define HM_CHANGED_GUEST_SYSENTER_ESP_MSR        RT_BIT(15)
-#define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(16)     /* Shared */
+#define HM_CHANGED_GUEST_EFER_MSR                RT_BIT(16)
+#define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(17)     /* Shared */
 /* VT-x specific state. */
-#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(17)
-#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(18)
-#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(19)
-#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(20)
-#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(21)
+#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(18)
+#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(19)
+#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(20)
+#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(21)
+#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(22)
 /* AMD-V specific state. */
-#define HM_CHANGED_SVM_GUEST_EFER_MSR            RT_BIT(17)
 #define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(18)
 #define HM_CHANGED_SVM_RESERVED1                 RT_BIT(19)
 #define HM_CHANGED_SVM_RESERVED2                 RT_BIT(20)
 #define HM_CHANGED_SVM_RESERVED3                 RT_BIT(21)
+#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(22)
 
 #define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
@@ -178,4 +179,5 @@
                                                   | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
                                                   | HM_CHANGED_GUEST_SYSENTER_ESP_MSR   \
+                                                  | HM_CHANGED_GUEST_EFER_MSR           \
                                                   | HM_CHANGED_GUEST_LAZY_MSRS          \
                                                   | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
@@ -185,5 +187,5 @@
                                                   | HM_CHANGED_VMX_EXIT_CTLS)
 
-#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(22)
+#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(23)
 
 /* Bits shared between host and guest. */
@@ -414,4 +416,7 @@
         /** Host EFER value (set by ring-0 VMX init) */
         uint64_t                    u64HostEfer;
+        /** Whether the CPU supports VMCS fields for swapping EFER. */
+        bool                        fSupportsVmcsEfer;
+        bool                        afAlignment1[7];
 
         /** VMX MSR values */
