Index: /trunk/src/VBox/VMM/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/CPUM.cpp	(revision 30860)
+++ /trunk/src/VBox/VMM/CPUM.cpp	(revision 30861)
@@ -842,5 +842,5 @@
     if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
     {
-        /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
+        /* Only expose the virtual and physical address sizes to the guest. */
         pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
         pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
@@ -2283,4 +2283,8 @@
         return VERR_INTERNAL_ERROR_2;
     }
+
+    /* Notify PGM of the NXE states in case they've changed. */
+    for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+        PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 30860)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 30861)
@@ -24,4 +24,6 @@
 #include <VBox/patm.h>
 #include <VBox/dbgf.h>
+#include <VBox/pdm.h>
+#include <VBox/pgm.h>
 #include <VBox/mm.h>
 #include "CPUMInternal.h"
@@ -730,69 +732,112 @@
 
 
-VMMDECL(uint64_t)  CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
-{
-    uint64_t u64 = 0;
-    uint8_t  u8Multiplier = 4;
-
+/**
+ * Query an MSR.
+ *
+ * The caller is responsible for checking privilege if the call is the result
+ * of a RDMSR instruction.  We'll do the rest.
+ *
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
+ *          expected to take the appropriate actions. @a *puValue is set to 0.
+ * @param   pVCpu               The virtual CPU to operate on.
+ * @param   idMsr               The MSR.
+ * @param   puValue             Where to return the value..
+ *
+ * @remarks This will always return the right values, even when we're in the
+ *          recompiler.
+ */
+VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
+{
+    /*
+     * If we don't indicate MSR support in the CPUID feature bits, indicate
+     * that a #GP(0) should be raised.
+     */
+    if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
+    {
+        *puValue = 0;
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    int rc = VINF_SUCCESS;
+    uint8_t const u8Multiplier = 4;
     switch (idMsr)
     {
         case MSR_IA32_TSC:
-            u64 = TMCpuTickGet(pVCpu);
+            *puValue = TMCpuTickGet(pVCpu);
+            break;
+
+        case MSR_IA32_APICBASE:
+            rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue);
+            if (RT_SUCCESS(rc))
+                rc = VINF_SUCCESS;
+            else
+            {
+                *puValue = 0;
+                rc = VERR_CPUM_RAISE_GP_0;
+            }
             break;
 
         case MSR_IA32_CR_PAT:
-            u64 = pVCpu->cpum.s.Guest.msrPAT;
+            *puValue = pVCpu->cpum.s.Guest.msrPAT;
             break;
 
         case MSR_IA32_SYSENTER_CS:
-            u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
+            *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
             break;
 
         case MSR_IA32_SYSENTER_EIP:
-            u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
+            *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
             break;
 
         case MSR_IA32_SYSENTER_ESP:
-            u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
+            *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
             break;
 
         case MSR_K6_EFER:
-            u64 = pVCpu->cpum.s.Guest.msrEFER;
+            *puValue = pVCpu->cpum.s.Guest.msrEFER;
             break;
 
         case MSR_K8_SF_MASK:
-            u64 = pVCpu->cpum.s.Guest.msrSFMASK;
+            *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
             break;
 
         case MSR_K6_STAR:
-            u64 = pVCpu->cpum.s.Guest.msrSTAR;
+            *puValue = pVCpu->cpum.s.Guest.msrSTAR;
             break;
 
         case MSR_K8_LSTAR:
-            u64 = pVCpu->cpum.s.Guest.msrLSTAR;
+            *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
             break;
 
         case MSR_K8_CSTAR:
-            u64 = pVCpu->cpum.s.Guest.msrCSTAR;
+            *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
+            break;
+
+        case MSR_K8_FS_BASE:
+            *puValue = pVCpu->cpum.s.Guest.fsHid.u64Base;
+            break;
+
+        case MSR_K8_GS_BASE:
+            *puValue = pVCpu->cpum.s.Guest.gsHid.u64Base;
             break;
 
         case MSR_K8_KERNEL_GS_BASE:
-            u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
+            *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
             break;
 
         case MSR_K8_TSC_AUX:
-            u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
+            *puValue = pVCpu->cpum.s.GuestMsr.msr.tscAux;
             break;
 
         case MSR_IA32_PERF_STATUS:
-            /** @todo: could really be not exactly correct, maybe use host's values */
-            /* Keep consistent with helper_rdmsr() in REM */
-            u64 =     (1000ULL                      /* TSC increment by tick */)
-                    | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */       )
-                    | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */       );
-            break;
-
-        case  MSR_IA32_FSB_CLOCK_STS:
-            /**
+            /** @todo could really be not exactly correct, maybe use host's values */
+            *puValue = UINT64_C(1000)                 /* TSC increment by tick */
+                     | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
+                     | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
+            break;
+
+        case MSR_IA32_FSB_CLOCK_STS:
+            /*
              * Encoded as:
              * 0 - 266
@@ -802,16 +847,16 @@
              * 5 - return 100
              */
-            u64 = (2 << 4);
+            *puValue = (2 << 4);
             break;
 
         case MSR_IA32_PLATFORM_INFO:
-            u64 =     ((u8Multiplier)<<8              /* Flex ratio max */)
-                    | ((uint64_t)u8Multiplier << 40   /* Flex ratio min */ );
+            *puValue = (u8Multiplier << 8)            /* Flex ratio max */
+                     | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
             break;
 
         case MSR_IA32_THERM_STATUS:
             /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
-            u64 = (1 << 31) /* validity bit */ |
-                  (20 << 16) /* degrees till TCC */;
+            *puValue = ( 1 << 31) /* validity bit */
+                     | (20 << 16) /* degrees till TCC */;
             break;
 
@@ -819,36 +864,212 @@
 #if 0
             /* Needs to be tested more before enabling. */
-            u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
+            *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
 #else
-            u64 = 0;
+            *puValue = 0;
 #endif
             break;
 
-        /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
+#if 0 /*def IN_RING0 */
+        case MSR_IA32_PLATFORM_ID:
+        case MSR_IA32_BIOS_SIGN_ID:
+            if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
+            {
+                /* Available since the P6 family. VT-x implies that this feature is present. */
+                if (idMsr == MSR_IA32_PLATFORM_ID)
+                    *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
+                else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
+                    *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
+                break;
+            }
+            /* no break */
+#endif
+
         default:
-            AssertFailed();
+            /* In X2APIC specification this range is reserved for APIC control. */
+            if (    idMsr >= MSR_IA32_APIC_START
+                &&  idMsr <  MSR_IA32_APIC_END)
+            {
+                rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
+                if (RT_SUCCESS(rc))
+                    rc = VINF_SUCCESS;
+                else
+                {
+                    *puValue = 0;
+                    rc = VERR_CPUM_RAISE_GP_0;
+                }
+            }
+            else
+            {
+                *puValue = 0;
+                rc = VERR_CPUM_RAISE_GP_0;
+            }
             break;
     }
-    return u64;
-}
-
-VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
-{
-    /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
+
+    return rc;
+}
+
+
+/**
+ * Sets the MSR.
+ *
+ * The caller is responsible for checking privilege if the call is the result
+ * of a WRMSR instruction.  We'll do the rest.
+ *
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
+ *          appropriate actions.
+ *
+ * @param   pVCpu       The virtual CPU to operate on.
+ * @param   idMsr       The MSR id.
+ * @param   uValue      The value to set.
+ *
+ * @remarks Everyone changing MSR values, including the recompiler, shall do it
+ *          by calling this method.  This makes sure we have current values and
+ *          that we trigger all the right actions when something changes.
+ */
+VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
+{
+    /*
+     * If we don't indicate MSR support in the CPUID feature bits, indicate
+     * that a #GP(0) should be raised.
+     */
+    if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
+        return VERR_CPUM_RAISE_GP_0;
+
+    int rc = VINF_SUCCESS;
     switch (idMsr)
     {
+        case MSR_IA32_MISC_ENABLE:
+            pVCpu->cpum.s.GuestMsr.msr.miscEnable = uValue;
+            break;
+
+        case MSR_IA32_TSC:
+            TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
+            break;
+
+        case MSR_IA32_APICBASE:
+            rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue);
+            if (rc != VINF_SUCCESS)
+                rc = VERR_CPUM_RAISE_GP_0;
+            break;
+
+        case MSR_IA32_CR_PAT:
+            pVCpu->cpum.s.Guest.msrPAT      = uValue;
+            break;
+
+        case MSR_IA32_SYSENTER_CS:
+            pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
+            break;
+
+        case MSR_IA32_SYSENTER_EIP:
+            pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
+            break;
+
+        case MSR_IA32_SYSENTER_ESP:
+            pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
+            break;
+
+        case MSR_K6_EFER:
+        {
+            PVM             pVM          = pVCpu->CTX_SUFF(pVM);
+            uint64_t const  uOldEFER     = pVCpu->cpum.s.Guest.msrEFER;
+            uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
+                                         ? pVM->cpum.s.aGuestCpuIdExt[1].edx
+                                         : 0;
+            uint64_t        fMask        = 0;
+
+            /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
+            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX)
+                fMask |= MSR_K6_EFER_NXE;
+            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
+                fMask |= MSR_K6_EFER_LME;
+            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP)
+                fMask |= MSR_K6_EFER_SCE;
+            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+                fMask |= MSR_K6_EFER_FFXSR;
+
+            /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
+               paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
+            if (    (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
+                &&  (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
+            {
+                Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
+                return VERR_CPUM_RAISE_GP_0;
+            }
+
+            /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
+            AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
+                      ("Unexpected value %RX64\n", uValue));
+            pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
+
+            /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
+               if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
+            if (   (uValue                      & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
+                != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
+            {
+                /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
+                HWACCMFlushTLB(pVCpu);
+
+                /* Notify PGM about NXE changes. */
+                if (   (uValue        & MSR_K6_EFER_NXE)
+                    != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
+                    PGMNotifyNxeChanged(pVCpu, !!(uValue & MSR_K6_EFER_NXE));
+            }
+            break;
+        }
+
+        case MSR_K8_SF_MASK:
+            pVCpu->cpum.s.Guest.msrSFMASK       = uValue;
+            break;
+
+        case MSR_K6_STAR:
+            pVCpu->cpum.s.Guest.msrSTAR         = uValue;
+            break;
+
+        case MSR_K8_LSTAR:
+            pVCpu->cpum.s.Guest.msrLSTAR        = uValue;
+            break;
+
+        case MSR_K8_CSTAR:
+            pVCpu->cpum.s.Guest.msrCSTAR        = uValue;
+            break;
+
+        case MSR_K8_FS_BASE:
+            pVCpu->cpum.s.Guest.fsHid.u64Base   = uValue;
+            break;
+
+        case MSR_K8_GS_BASE:
+            pVCpu->cpum.s.Guest.gsHid.u64Base   = uValue;
+            break;
+
+        case MSR_K8_KERNEL_GS_BASE:
+            pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
+            break;
+
         case MSR_K8_TSC_AUX:
-            pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
-            break;
-
-        case MSR_IA32_MISC_ENABLE:
-            pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
+            pVCpu->cpum.s.GuestMsr.msr.tscAux   = uValue;
             break;
 
         default:
-            AssertFailed();
+            /* In X2APIC specification this range is reserved for APIC control. */
+            if (    idMsr >= MSR_IA32_APIC_START
+                &&  idMsr <  MSR_IA32_APIC_END)
+            {
+                rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
+                if (rc != VINF_SUCCESS)
+                    rc = VERR_CPUM_RAISE_GP_0;
+            }
+            else
+            {
+                /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
+                /** @todo rc = VERR_CPUM_RAISE_GP_0 */
+                Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
+            }
             break;
     }
-}
+    return rc;
+}
+
 
 VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
Index: /trunk/src/VBox/VMM/VMMAll/EMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 30860)
+++ /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 30861)
@@ -2496,5 +2496,6 @@
     pCtx->rdx = (uTicks >> 32ULL);
     /* Low dword of the TSC_AUX msr only. */
-    pCtx->rcx = (uint32_t)CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX);
+    CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
+    pCtx->rcx &= UINT32_C(0xffffffff);
 
     return VINF_SUCCESS;
@@ -2737,128 +2738,24 @@
  * @param   pVCpu       The VMCPU handle.
  * @param   pRegFrame   The register frame.
- *
  */
 VMMDECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
 {
-    uint32_t u32Dummy, u32Features, cpl;
-    uint64_t val;
-    CPUMCTX *pCtx;
-    int      rc = VINF_SUCCESS;
-
     /** @todo According to the Intel manuals, there's a REX version of RDMSR that is slightly different.
      *  That version clears the high dwords of both RDX & RAX */
-    pCtx = CPUMQueryGuestCtxPtr(pVCpu);
 
     /* Get the current privilege level. */
-    cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
-    if (cpl != 0)
+    if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
         return VERR_EM_INTERPRETER; /* supervisor only */
 
-    CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
-    if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR))
-        return VERR_EM_INTERPRETER; /* not supported */
-
-    switch (pRegFrame->ecx)
-    {
-    case MSR_IA32_TSC:
-        val = TMCpuTickGet(pVCpu);
-        break;
-
-    case MSR_IA32_APICBASE:
-        rc = PDMApicGetBase(pVM, &val);
-        AssertRC(rc);
-        break;
-
-    case MSR_IA32_CR_PAT:
-        val = pCtx->msrPAT;
-        break;
-
-    case MSR_IA32_SYSENTER_CS:
-        val = pCtx->SysEnter.cs;
-        break;
-
-    case MSR_IA32_SYSENTER_EIP:
-        val = pCtx->SysEnter.eip;
-        break;
-
-    case MSR_IA32_SYSENTER_ESP:
-        val = pCtx->SysEnter.esp;
-        break;
-
-    case MSR_K6_EFER:
-        val = pCtx->msrEFER;
-        break;
-
-    case MSR_K8_SF_MASK:
-        val = pCtx->msrSFMASK;
-        break;
-
-    case MSR_K6_STAR:
-        val = pCtx->msrSTAR;
-        break;
-
-    case MSR_K8_LSTAR:
-        val = pCtx->msrLSTAR;
-        break;
-
-    case MSR_K8_CSTAR:
-        val = pCtx->msrCSTAR;
-        break;
-
-    case MSR_K8_FS_BASE:
-        val = pCtx->fsHid.u64Base;
-        break;
-
-    case MSR_K8_GS_BASE:
-        val = pCtx->gsHid.u64Base;
-        break;
-
-    case MSR_K8_KERNEL_GS_BASE:
-        val = pCtx->msrKERNELGSBASE;
-        break;
-
-    case MSR_K8_TSC_AUX:
-        val = CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX);
-        break;
-
-    case MSR_IA32_PERF_STATUS:
-    case MSR_IA32_PLATFORM_INFO:
-    case MSR_IA32_MISC_ENABLE:
-    case MSR_IA32_FSB_CLOCK_STS:
-    case MSR_IA32_THERM_STATUS:
-        val = CPUMGetGuestMsr(pVCpu, pRegFrame->ecx);
-        break;
-
-#if 0 /*def IN_RING0 */
-    case MSR_IA32_PLATFORM_ID:
-    case MSR_IA32_BIOS_SIGN_ID:
-        if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
-        {
-            /* Available since the P6 family. VT-x implies that this feature is present. */
-            if (pRegFrame->ecx == MSR_IA32_PLATFORM_ID)
-                val = ASMRdMsr(MSR_IA32_PLATFORM_ID);
-            else
-            if (pRegFrame->ecx == MSR_IA32_BIOS_SIGN_ID)
-                val = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
-            break;
-        }
-        /* no break */
-#endif
-    default:
-        /* In X2APIC specification this range is reserved for APIC control. */
-        if (    pRegFrame->ecx >= MSR_IA32_APIC_START
-            &&  pRegFrame->ecx <  MSR_IA32_APIC_END)
-            rc = PDMApicReadMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, &val);
-        else
-            /* We should actually trigger a #GP here, but don't as that will cause more trouble. */
-            val = 0;
-        break;
-    }
-    LogFlow(("EMInterpretRdmsr %s (%x) -> val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val));
-    if (rc == VINF_SUCCESS)
-    {
-        pRegFrame->rax = (uint32_t) val;
-        pRegFrame->rdx = (uint32_t)(val >> 32);
-    }
+    uint64_t uValue;
+    int rc = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
+    if (RT_UNLIKELY(rc != VINF_SUCCESS))
+    {
+        Assert(rc == VERR_CPUM_RAISE_GP_0);
+        return VERR_EM_INTERPRETER;
+    }
+    pRegFrame->rax = (uint32_t) uValue;
+    pRegFrame->rdx = (uint32_t)(uValue >> 32);
+    LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
     return rc;
 }
@@ -2870,5 +2767,6 @@
 static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
 {
-    /* Note: the Intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
+    /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
+             different, so we play safe by completely disassembling the instruction. */
     Assert(!(pDis->prefix & PREFIX_REX));
     return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
@@ -2886,129 +2784,17 @@
 VMMDECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
 {
-    uint32_t u32Dummy, u32Features, cpl;
-    uint64_t val;
-    CPUMCTX *pCtx;
-
-    /* Note: works the same in 32 and 64 bits modes. */
-    pCtx = CPUMQueryGuestCtxPtr(pVCpu);
-
-    /* Get the current privilege level. */
-    cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
-    if (cpl != 0)
-        return VERR_EM_INTERPRETER; /* supervisor only */
-
-    CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
-    if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR))
-        return VERR_EM_INTERPRETER; /* not supported */
-
-    val = RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx);
-    LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val));
-    switch (pRegFrame->ecx)
-    {
-    case MSR_IA32_TSC:
-        TMCpuTickSet(pVM, pVCpu, val);
-        break;
-
-    case MSR_IA32_APICBASE:
-    {
-        int rc = PDMApicSetBase(pVM, val);
-        AssertRC(rc);
-        break;
-    }
-
-    case MSR_IA32_CR_PAT:
-        pCtx->msrPAT = val;
-        break;
-
-    case MSR_IA32_SYSENTER_CS:
-        pCtx->SysEnter.cs = val & 0xffff; /* 16 bits selector */
-        break;
-
-    case MSR_IA32_SYSENTER_EIP:
-        pCtx->SysEnter.eip = val;
-        break;
-
-    case MSR_IA32_SYSENTER_ESP:
-        pCtx->SysEnter.esp = val;
-        break;
-
-    case MSR_K6_EFER:
-    {
-        uint64_t uMask = 0;
-        uint64_t oldval = pCtx->msrEFER;
-
-        /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
-        CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
-        if (u32Features & X86_CPUID_AMD_FEATURE_EDX_NX)
-            uMask |= MSR_K6_EFER_NXE;
-        if (u32Features & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
-            uMask |= MSR_K6_EFER_LME;
-        if (u32Features & X86_CPUID_AMD_FEATURE_EDX_SEP)
-            uMask |= MSR_K6_EFER_SCE;
-        if (u32Features & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
-            uMask |= MSR_K6_EFER_FFXSR;
-
-        /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
-        if (    ((pCtx->msrEFER & MSR_K6_EFER_LME) != (val & uMask & MSR_K6_EFER_LME))
-            &&  (pCtx->cr0 & X86_CR0_PG))
-        {
-            AssertMsgFailed(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
-            return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */
-        }
-
-        /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
-        AssertMsg(!(val & ~(MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA /* ignored anyway */ |MSR_K6_EFER_SCE|MSR_K6_EFER_FFXSR)), ("Unexpected value %RX64\n", val));
-        pCtx->msrEFER = (pCtx->msrEFER & ~uMask) | (val & uMask);
-
-        /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
-        if ((oldval & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA)) != (pCtx->msrEFER & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA)))
-            HWACCMFlushTLB(pVCpu);
-
-        break;
-    }
-
-    case MSR_K8_SF_MASK:
-        pCtx->msrSFMASK = val;
-        break;
-
-    case MSR_K6_STAR:
-        pCtx->msrSTAR = val;
-        break;
-
-    case MSR_K8_LSTAR:
-        pCtx->msrLSTAR = val;
-        break;
-
-    case MSR_K8_CSTAR:
-        pCtx->msrCSTAR = val;
-        break;
-
-    case MSR_K8_FS_BASE:
-        pCtx->fsHid.u64Base = val;
-        break;
-
-    case MSR_K8_GS_BASE:
-        pCtx->gsHid.u64Base = val;
-        break;
-
-    case MSR_K8_KERNEL_GS_BASE:
-        pCtx->msrKERNELGSBASE = val;
-        break;
-
-    case MSR_K8_TSC_AUX:
-    case MSR_IA32_MISC_ENABLE:
-        CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, val);
-        break;
-
-    default:
-        /* In X2APIC specification this range is reserved for APIC control. */
-        if (    pRegFrame->ecx >= MSR_IA32_APIC_START
-            &&  pRegFrame->ecx <  MSR_IA32_APIC_END)
-            return PDMApicWriteMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, val);
-
-        /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
-        break;
-    }
-    return VINF_SUCCESS;
+    /* Check the current privilege level, this instruction is supervisor only. */
+    if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
+        return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
+
+    int rc = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
+    if (rc != VINF_SUCCESS)
+    {
+        Assert(rc == VERR_CPUM_RAISE_GP_0);
+        return VERR_EM_INTERPRETER;
+    }
+    LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
+             RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
+    return rc;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 30860)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 30861)
@@ -938,5 +938,5 @@
         if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu))
         {
-            /* AMD-V nested paging or real/protected mode without paging */
+            /* AMD-V nested paging or real/protected mode without paging. */
             GCPdPt  = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
             enmKind = PGMPOOLKIND_PAE_PD_PHYS;
@@ -981,5 +981,7 @@
 
 # if defined(IN_RC)
-        /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
+        /*
+         * In 32 bits PAE mode we *must* invalidate the TLB when changing a
+         * PDPT entry; the CPU fetches them only during cr3 load, so any
          * non-present PDPT will continue to cause page faults.
          */
@@ -2099,4 +2101,17 @@
         default:                return "unknown mode value";
     }
+}
+
+
+
+/**
+ * Notification from CPUM that the EFER.NXE bit has changed.
+ *
+ * @param   pVCpu       The virtual CPU for which EFER changed.
+ * @param   fNxe        The new NXE state.
+ */
+VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
+{
+    /* later */
 }
 
