Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 61358)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 61359)
@@ -1458,5 +1458,4 @@
 
 
-#if HC_ARCH_BITS == 64
 /**
  * Saves a set of host MSRs to allow read/write passthru access to the guest and
@@ -1474,10 +1473,16 @@
      * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
      */
-    if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
-    {
-        pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
-        pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
-        pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
-        pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
+    Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST));
+    if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
+    {
+#if HC_ARCH_BITS == 64
+        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+        {
+            pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
+            pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
+            pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
+            pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
+        }
+#endif
         pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
     }
@@ -1496,12 +1501,17 @@
 {
     NOREF(pVCpu);
-    switch (uMsr)
-    {
-        case MSR_K8_LSTAR:
-        case MSR_K6_STAR:
-        case MSR_K8_SF_MASK:
-        case MSR_K8_KERNEL_GS_BASE:
-            return true;
-    }
+#if HC_ARCH_BITS == 64
+    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+    {
+        switch (uMsr)
+        {
+            case MSR_K8_LSTAR:
+            case MSR_K6_STAR:
+            case MSR_K8_SF_MASK:
+            case MSR_K8_KERNEL_GS_BASE:
+                return true;
+        }
+    }
+#endif
     return false;
 }
@@ -1526,8 +1536,13 @@
     {
         Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
-        pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
-        pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
-        pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
-        pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
+#if HC_ARCH_BITS == 64
+        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+        {
+            pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
+            pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
+            pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
+            pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
+        }
+#endif
     }
 }
@@ -1565,16 +1580,14 @@
     if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
     {
-        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
-        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
-        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
-        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
+#if HC_ARCH_BITS == 64
+        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+        {
+            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
+            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
+            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
+            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
+        }
+#endif
         pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
-    }
-    else
-    {
-        ASMWrMsr(MSR_K8_LSTAR,          pMixedCtx->msrLSTAR);
-        ASMWrMsr(MSR_K6_STAR,           pMixedCtx->msrSTAR);
-        ASMWrMsr(MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK);
-        ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
     }
 
@@ -1601,12 +1614,16 @@
     {
         Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
-        ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
-        ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
-        ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
-        ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
+#if HC_ARCH_BITS == 64
+        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+        {
+            ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
+            ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
+            ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
+            ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
+        }
+#endif
     }
     pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
 }
-#endif  /* HC_ARCH_BITS == 64 */
 
 
@@ -2431,4 +2448,5 @@
         }
 #endif
+        /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
     }
 
@@ -3095,14 +3113,14 @@
     AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
 
-    int rc = VINF_SUCCESS;
-#if HC_ARCH_BITS == 64
-    if (pVM->hm.s.fAllow64BitGuests)
-        hmR0VmxLazySaveHostMsrs(pVCpu);
-#endif
+    /*
+     * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
+     * rather than swapping them on every VM-entry.
+     */
+    hmR0VmxLazySaveHostMsrs(pVCpu);
 
     /*
      * Host Sysenter MSRs.
      */
-    rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,       ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
+    int rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,   ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
 #if HC_ARCH_BITS == 32
     rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
@@ -6169,27 +6187,17 @@
 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
 {
-#if HC_ARCH_BITS == 64
-    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
-    {
-        /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
-        VMMRZCallRing3Disable(pVCpu);
-        HM_DISABLE_PREEMPT();
-
-        /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
-        if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
-        {
-            hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
-            HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
-        }
-
-        HM_RESTORE_PREEMPT();
-        VMMRZCallRing3Enable(pVCpu);
-    }
-    else
+    /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
+    VMMRZCallRing3Disable(pVCpu);
+    HM_DISABLE_PREEMPT();
+
+    /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
+    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
+    {
+        hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
-#else
-    NOREF(pMixedCtx);
-    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
-#endif
+    }
+
+    HM_RESTORE_PREEMPT();
+    VMMRZCallRing3Enable(pVCpu);
 
     return VINF_SUCCESS;
@@ -7047,8 +7055,6 @@
 #endif
 
-#if HC_ARCH_BITS == 64
     /* Restore the lazy host MSRs as we're leaving VT-x context. */
-    if (   pVM->hm.s.fAllow64BitGuests
-        && pVCpu->hm.s.vmx.fLazyMsrs)
+    if (pVCpu->hm.s.vmx.fLazyMsrs)
     {
         /* We shouldn't reload the guest MSRs without saving it first. */
@@ -7062,5 +7068,4 @@
         Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
     }
-#endif
 
     /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
@@ -7288,10 +7293,9 @@
             VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
         pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
-
+#endif
         /* Restore the lazy host MSRs as we're leaving VT-x context. */
-        if (   pVCpu->hm.s.vmx.fLazyMsrs
-            && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
+        if (pVCpu->hm.s.vmx.fLazyMsrs)
             hmR0VmxLazyRestoreHostMsrs(pVCpu);
-#endif
+
         /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
         pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
@@ -8182,17 +8186,18 @@
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
 
-    if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
-        return VINF_SUCCESS;
-
-    int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
-    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
-
-    rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
-    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
-
-    rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
-    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
-
-    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
+    int rc = VINF_SUCCESS;
+    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
+    {
+        rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
+        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
+
+        rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
+        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
+
+        rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
+        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
+
+        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
+    }
     return rc;
 }
@@ -8367,8 +8372,5 @@
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
     {
-#if HC_ARCH_BITS == 64
-        if (pVM->hm.s.fAllow64BitGuests)
-            hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
-#endif
+        hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     }
@@ -11884,12 +11886,16 @@
             HMVMX_RETURN_UNEXPECTED_EXIT();
         }
-# if HC_ARCH_BITS == 64
-        if (   pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
-            && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
-        {
-            AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
-            HMVMX_RETURN_UNEXPECTED_EXIT();
-        }
-# endif
+        if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
+        {
+            VMXMSREXITREAD  enmRead;
+            VMXMSREXITWRITE enmWrite;
+            int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);
+            AssertRCReturn(rc2, rc2);
+            if (enmRead == VMXMSREXIT_PASSTHRU_READ)
+            {
+                AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
+                HMVMX_RETURN_UNEXPECTED_EXIT();
+            }
+        }
     }
 #endif
@@ -11974,8 +11980,6 @@
                     if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
                         HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
-#if HC_ARCH_BITS == 64
                     else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
                         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
-#endif
                     break;
                 }
@@ -12012,11 +12016,16 @@
                     }
 
-#if HC_ARCH_BITS == 64
                     if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
                     {
-                        AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
-                        HMVMX_RETURN_UNEXPECTED_EXIT();
+                        VMXMSREXITREAD  enmRead;
+                        VMXMSREXITWRITE enmWrite;
+                        int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);
+                        AssertRCReturn(rc2, rc2);
+                        if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
+                        {
+                            AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
+                            HMVMX_RETURN_UNEXPECTED_EXIT();
+                        }
                     }
-#endif
                     break;
                 }
