Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 76199)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 76200)
@@ -1299,59 +1299,45 @@
 /** @name Guest Register Getters.
  * @{ */
-VMMDECL(void)       CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR);
-VMMDECL(RTGCPTR)    CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
-VMMDECL(RTSEL)      CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
-VMMDECL(RTSEL)      CPUMGetGuestLDTR(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
-VMMDECL(uint64_t)   CPUMGetGuestCR0(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestCR2(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestCR3(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestCR4(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestCR8(PVMCPU pVCpu);
-VMMDECL(int)        CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
-VMMDECL(uint32_t)   CPUMGetGuestEFlags(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEIP(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestRIP(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEAX(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEBX(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestECX(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEDX(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestESI(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEDI(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestESP(PVMCPU pVCpu);
-VMMDECL(uint32_t)   CPUMGetGuestEBP(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestCS(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestDS(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestES(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestFS(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestGS(PVMCPU pVCpu);
-VMMDECL(RTSEL)      CPUMGetGuestSS(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestFlatPC(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestFlatSP(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR0(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR1(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR2(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR3(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR6(PVMCPU pVCpu);
-VMMDECL(uint64_t)   CPUMGetGuestDR7(PVMCPU pVCpu);
-VMMDECL(int)        CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
-VMMDECL(void)       CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
-                                      uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
-VMMDECL(uint64_t)   CPUMGetGuestEFER(PVMCPU pVCpu);
+VMMDECL(void)           CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR);
+VMMDECL(RTGCPTR)        CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
+VMMDECL(RTSEL)          CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
+VMMDECL(RTSEL)          CPUMGetGuestLDTR(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
+VMMDECL(uint64_t)       CPUMGetGuestCR0(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestCR2(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestCR3(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestCR4(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestCR8(PVMCPU pVCpu);
+VMMDECL(int)            CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
+VMMDECL(uint32_t)       CPUMGetGuestEFlags(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEIP(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestRIP(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEAX(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEBX(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestECX(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEDX(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestESI(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEDI(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestESP(PVMCPU pVCpu);
+VMMDECL(uint32_t)       CPUMGetGuestEBP(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestCS(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestDS(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestES(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestFS(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestGS(PVMCPU pVCpu);
+VMMDECL(RTSEL)          CPUMGetGuestSS(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestFlatPC(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestFlatSP(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR0(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR1(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR2(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR3(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR6(PVMCPU pVCpu);
+VMMDECL(uint64_t)       CPUMGetGuestDR7(PVMCPU pVCpu);
+VMMDECL(int)            CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
+VMMDECL(void)           CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
+                                          uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
+VMMDECL(uint64_t)       CPUMGetGuestEFER(PVMCPU pVCpu);
 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxPinbasedCtls(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxProcbasedCtls(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxProcbasedCtls2(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxExitCtls(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxEntryCtls(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxMisc(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxCr0Fixed0(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxCr0Fixed1(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxCr4Fixed0(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxCr4Fixed1(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxVmcsEnum(PVMCPU pVCpu);
-VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxVmFunc(PVMCPU pVCpu);
 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu);
 VMMDECL(VBOXSTRICTRC)   CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
@@ -1363,44 +1349,44 @@
 /** @name Guest Register Setters.
  * @{ */
-VMMDECL(int)        CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
-VMMDECL(int)        CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
-VMMDECL(int)        CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
-VMMDECL(int)        CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
-VMMDECL(int)        CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
-VMMDECL(int)        CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
-VMMDECL(int)        CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
-VMMDECL(int)        CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
-VMMDECL(int)        CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
-VMMDECL(int)        CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
-VMMDECL(int)        CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
-VMMDECL(int)        CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
-VMMDECL(int)        CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
-VMMDECL(int)        CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
-VMMDECL(int)        CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
-VMM_INT_DECL(int)   CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
-VMMDECL(int)        CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
-VMMDECL(int)        CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
-VMMDECL(int)        CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
-VMMDECL(int)        CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
-VMMDECL(int)        CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
-VMMDECL(int)        CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
-VMMDECL(int)        CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
-VMMDECL(int)        CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
-VMMDECL(int)        CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
-VMMDECL(int)        CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
-VMMDECL(int)        CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
-VMMDECL(int)        CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
-VMMDECL(int)        CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
-VMMDECL(int)        CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
-VMMDECL(int)        CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
-VMMDECL(int)        CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
-VMMDECL(void)       CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
-VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
-VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
-VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
-VMMDECL(bool)       CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
-VMMDECL(void)       CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
-VMM_INT_DECL(void)  CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
-VMM_INT_DECL(void)  CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
+VMMDECL(int)           CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
+VMMDECL(int)           CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
+VMMDECL(int)           CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
+VMMDECL(int)           CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
+VMMDECL(int)           CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
+VMMDECL(int)           CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
+VMMDECL(int)           CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
+VMMDECL(int)           CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
+VMMDECL(int)           CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
+VMMDECL(int)           CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
+VMMDECL(int)           CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
+VMMDECL(int)           CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
+VMMDECL(int)           CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
+VMMDECL(int)           CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
+VMMDECL(int)           CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
+VMM_INT_DECL(int)      CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
+VMMDECL(int)           CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
+VMMDECL(int)           CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
+VMMDECL(int)           CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
+VMMDECL(int)           CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
+VMMDECL(int)           CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
+VMMDECL(int)           CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
+VMMDECL(int)           CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
+VMMDECL(int)           CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
+VMMDECL(int)           CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
+VMMDECL(int)           CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
+VMMDECL(int)           CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
+VMMDECL(int)           CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
+VMMDECL(int)           CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
+VMMDECL(int)           CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
+VMMDECL(int)           CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
+VMMDECL(int)           CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
+VMMDECL(void)          CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
+VMMR3_INT_DECL(void)   CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
+VMMR3_INT_DECL(void)   CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
+VMMR3_INT_DECL(bool)   CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
+VMMDECL(bool)          CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
+VMMDECL(void)          CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
+VMM_INT_DECL(void)     CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
+VMM_INT_DECL(void)     CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
 VMM_INT_DECL(void)     CPUMSetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
 VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPU pVCpu);
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 76199)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 76200)
@@ -236,27 +236,9 @@
 
 
-/**
- * Get fixed IA32_FEATURE_CONTROL value for NEM and cpumMsrRd_Ia32FeatureControl.
- *
- * @returns Fixed IA32_FEATURE_CONTROL value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu)
-{
-    /* Always report the MSR lock bit as set, in order to prevent guests from modifiying this MSR. */
-    uint64_t fFeatCtl = MSR_IA32_FEATURE_CONTROL_LOCK;
-
-    /* Report VMX features. */
-    if (pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fVmx)
-        fFeatCtl |= MSR_IA32_FEATURE_CONTROL_VMXON;
-
-    return fFeatCtl;
-}
-
 /** @callback_method_impl{FNCPUMRDMSR} */
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32FeatureControl(pVCpu);
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64FeatCtrl;
     return VINF_SUCCESS;
 }
@@ -1334,67 +1316,10 @@
 
 
-/**
- * Gets IA32_VMX_BASIC for IEM and cpumMsrRd_Ia32VmxBasic.
- *
- * @returns IA32_VMX_BASIC value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
-                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
-                | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  !pGuestFeatures->fLongMode    )
-                | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
-                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
-                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
-                | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
-}
-
-
 /** @callback_method_impl{FNCPUMRDMSR} */
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxBasic(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_PINBASED_CTLS for IEM and cpumMsrRd_Ia32VmxPinbasedCtls.
- *
- * @returns IA32_VMX_PINBASED_CTLS value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxPinbasedCtls(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uint32_t const fFeatures = (pGuestFeatures->fVmxExtIntExit   << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT )
-                                 | (pGuestFeatures->fVmxNmiExit      << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT     )
-                                 | (pGuestFeatures->fVmxVirtNmi      << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT     )
-                                 | (pGuestFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT)
-                                 | (pGuestFeatures->fVmxPostedInt    << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT   );
-        /* Set the default1 class bits. See Intel spec. A.3.1 "Pin-Based VM-Execution Controls". */
-        uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1;
-        uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1;
-        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n",
-                                                         fAllowed0, fAllowed1, fFeatures));
-        uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1);
-        LogRel(("fVmxExtIntExit=%u fFeatures=%#RX32 uVmxMsr=%#RX64\n", !!pGuestFeatures->fVmxExtIntExit, fFeatures, uVmxMsr));
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic;
+    return VINF_SUCCESS;
 }
 
@@ -1404,54 +1329,7 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_PROCBASED_CTLS for IEM and cpumMsrRd_Ia32VmxProcbasedCtls.
- *
- * @returns IA32_VMX_PROCBASED_CTLS value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxProcbasedCtls(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uint32_t const fFeatures = (pGuestFeatures->fVmxIntWindowExit     << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT   )
-                                 | (pGuestFeatures->fVmxTscOffsetting     << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT)
-                                 | (pGuestFeatures->fVmxHltExit           << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT          )
-                                 | (pGuestFeatures->fVmxInvlpgExit        << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT       )
-                                 | (pGuestFeatures->fVmxMwaitExit         << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT        )
-                                 | (pGuestFeatures->fVmxRdpmcExit         << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT        )
-                                 | (pGuestFeatures->fVmxRdtscExit         << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT        )
-                                 | (pGuestFeatures->fVmxCr3LoadExit       << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT     )
-                                 | (pGuestFeatures->fVmxCr3StoreExit      << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT    )
-                                 | (pGuestFeatures->fVmxCr8LoadExit       << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT     )
-                                 | (pGuestFeatures->fVmxCr8StoreExit      << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT    )
-                                 | (pGuestFeatures->fVmxUseTprShadow      << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT    )
-                                 | (pGuestFeatures->fVmxNmiWindowExit     << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT   )
-                                 | (pGuestFeatures->fVmxMovDRxExit        << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT       )
-                                 | (pGuestFeatures->fVmxUncondIoExit      << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT    )
-                                 | (pGuestFeatures->fVmxUseIoBitmaps      << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT    )
-                                 | (pGuestFeatures->fVmxMonitorTrapFlag   << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT )
-                                 | (pGuestFeatures->fVmxUseMsrBitmaps     << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT   )
-                                 | (pGuestFeatures->fVmxMonitorExit       << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT      )
-                                 | (pGuestFeatures->fVmxPauseExit         << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT        )
-                                 | (pGuestFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT);
-        /* Set the default1 class bits. See Intel spec. A.3.2 "Primary Processor-Based VM-Execution Controls". */
-        uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1;
-        uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1;
-        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
-                                                         fAllowed1, fFeatures));
-        uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1);
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
-}
-
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.PinCtls.u;
+    return VINF_SUCCESS;
+}
 
 /** @callback_method_impl{FNCPUMRDMSR} */
@@ -1459,39 +1337,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_EXIT_CTLS for IEM and cpumMsrRd_Ia32VmxProcbasedCtls.
- *
- * @returns IA32_VMX_EXIT_CTLS value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxExitCtls(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uint32_t const fFeatures = (pGuestFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT          )
-                                 | (pGuestFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT)
-                                 | (pGuestFeatures->fVmxExitAckExtInt     << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT         )
-                                 | (pGuestFeatures->fVmxExitSavePatMsr    << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT        )
-                                 | (pGuestFeatures->fVmxExitLoadPatMsr    << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT        )
-                                 | (pGuestFeatures->fVmxExitSaveEferMsr   << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT       )
-                                 | (pGuestFeatures->fVmxExitLoadEferMsr   << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT       )
-                                 | (pGuestFeatures->fVmxSavePreemptTimer  << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT  );
-        /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */
-        uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1;
-        uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1;
-        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
-                                                         fAllowed1, fFeatures));
-        uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1);
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ProcCtls.u;
+    return VINF_SUCCESS;
 }
 
@@ -1501,35 +1346,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxExitCtls(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_ENTRY_CTLS for IEM and cpumMsrRd_Ia32VmxEntryCtls.
- *
- * @returns IA32_VMX_ENTRY_CTLS value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxEntryCtls(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uint32_t const fFeatures = (pGuestFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT      )
-                                 | (pGuestFeatures->fVmxIa32eModeGuest     << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT)
-                                 | (pGuestFeatures->fVmxEntryLoadEferMsr   << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT   )
-                                 | (pGuestFeatures->fVmxEntryLoadPatMsr    << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT    );
-        /* Set the default1 class bits. See Intel spec. A.5 "VM-entry Controls". */
-        uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1;
-        uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1;
-        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0,
-                                                         fAllowed1, fFeatures));
-        uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1);
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ExitCtls.u;
+    return VINF_SUCCESS;
 }
 
@@ -1539,42 +1355,8 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_MISC for IEM and cpumMsrRd_Ia32VmxMisc.
- *
- * @returns IA32_VMX_MISC MSR.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxMisc(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        uint64_t uHostMsr;
-        int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_MISC, &uHostMsr);
-        AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc);
-        uint8_t const cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
-        uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
-        uVmxMsr = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC,      VMX_V_PREEMPT_TIMER_SHIFT             )
-                | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA,     pGuestFeatures->fVmxExitSaveEferLma   )
-                | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES,        fActivityState                        )
-                | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT,               pGuestFeatures->fVmxIntelPt           )
-                | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR,    0                                     )
-                | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET,             VMX_V_CR3_TARGET_COUNT                )
-                | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS,               cMaxMsrs                              )
-                | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI,       0                                     )
-                | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL,            pGuestFeatures->fVmxVmwriteAll        )
-                | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT,  pGuestFeatures->fVmxEntryInjectSoftInt)
-                | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID,                VMX_V_MSEG_REV_ID                     );
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
-}
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.EntryCtls.u;
+    return VINF_SUCCESS;
+}
+
 
 
@@ -1583,24 +1365,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxMisc(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_CR0_FIXED0 for IEM and cpumMsrRd_Ia32VmxMisc.
- *
- * @returns IA32_VMX_CR0_FIXED0 value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr0Fixed0(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    if (pGuestFeatures->fVmx)
-    {
-        uint64_t const uVmxMsr = pGuestFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
-        return uVmxMsr;
-    }
-    return 0;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Misc;
+    return VINF_SUCCESS;
 }
 
@@ -1610,28 +1374,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_CR0_FIXED1 for IEM and cpumMsrRd_Ia32VmxMisc.
- *
- * @returns IA32_VMX_CR0_FIXED1 MSR.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr0Fixed1(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_CR0_FIXED1, &uVmxMsr);
-        AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc);
-        uVmxMsr |= VMX_V_CR0_FIXED0;   /* Make sure the CR0 MB1 bits are not clear. */
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr0Fixed0;
+    return VINF_SUCCESS;
 }
 
@@ -1641,21 +1383,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    Assert(idMsr == MSR_IA32_VMX_CR0_FIXED1);
-    *puValue = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_CR4_FIXED0 for IEM and cpumMsrRd_Ia32VmxCr4Fixed0.
- *
- * @returns IA32_VMX_CR4_FIXED0 value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr4Fixed0(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t const uVmxMsr = pGuestFeatures->fVmx ? VMX_V_CR4_FIXED0 : 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr0Fixed1;
+    return VINF_SUCCESS;
 }
 
@@ -1665,28 +1392,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_CR4_FIXED1 for IEM and cpumMsrRd_Ia32VmxCr4Fixed1.
- *
- * @returns IA32_VMX_CR4_FIXED1 MSR.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxCr4Fixed1(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-    {
-        int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_CR4_FIXED1, &uVmxMsr);
-        AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc);
-        uVmxMsr |= VMX_V_CR4_FIXED0;   /* Make sure the CR4 MB1 bits are not clear. */
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr4Fixed0;
+    return VINF_SUCCESS;
 }
 
@@ -1696,25 +1401,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    Assert(idMsr == MSR_IA32_VMX_CR4_FIXED1);
-    *puValue = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets IA32_VMX_VMCS_ENUM for IEM and cpumMsrRd_Ia32VmxVmcsEnum.
- *
- * @returns IA32_VMX_VMCS_ENUM value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxVmcsEnum(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (pGuestFeatures->fVmx)
-        uVmxMsr = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT;
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Cr4Fixed1;
+    return VINF_SUCCESS;
 }
 
@@ -1724,50 +1410,6 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxVmcsEnum(pVCpu);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Gets MSR_IA32_VMX_PROCBASED_CTLS2 for IEM and cpumMsrRd_Ia32VmxProcBasedCtls2.
- *
- * @returns MSR_IA32_VMX_PROCBASED_CTLS2 value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxProcbasedCtls2(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (   pGuestFeatures->fVmx
-        && pGuestFeatures->fVmxSecondaryExecCtls)
-    {
-        uint32_t const fFeatures = (pGuestFeatures->fVmxVirtApicAccess    << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT  )
-                                 | (pGuestFeatures->fVmxEpt               << VMX_BF_PROC_CTLS2_EPT_SHIFT               )
-                                 | (pGuestFeatures->fVmxDescTableExit     << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT   )
-                                 | (pGuestFeatures->fVmxRdtscp            << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT            )
-                                 | (pGuestFeatures->fVmxVirtX2ApicMode    << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT  )
-                                 | (pGuestFeatures->fVmxVpid              << VMX_BF_PROC_CTLS2_VPID_SHIFT              )
-                                 | (pGuestFeatures->fVmxWbinvdExit        << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT       )
-                                 | (pGuestFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT)
-                                 | (pGuestFeatures->fVmxApicRegVirt       << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT     )
-                                 | (pGuestFeatures->fVmxVirtIntDelivery   << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT )
-                                 | (pGuestFeatures->fVmxPauseLoopExit     << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT   )
-                                 | (pGuestFeatures->fVmxRdrandExit        << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT       )
-                                 | (pGuestFeatures->fVmxInvpcid           << VMX_BF_PROC_CTLS2_INVPCID_SHIFT           )
-                                 | (pGuestFeatures->fVmxVmFunc            << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT            )
-                                 | (pGuestFeatures->fVmxVmcsShadowing     << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT    )
-                                 | (pGuestFeatures->fVmxRdseedExit        << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT       )
-                                 | (pGuestFeatures->fVmxPml               << VMX_BF_PROC_CTLS2_PML_SHIFT               )
-                                 | (pGuestFeatures->fVmxEptXcptVe         << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT            )
-                                 | (pGuestFeatures->fVmxXsavesXrstors     << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT    )
-                                 | (pGuestFeatures->fVmxUseTscScaling     << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT       );
-        /* No default1 class bits. A.3.3 "Secondary Processor-Based VM-Execution Controls". */
-        uint32_t const fAllowed0 = 0;
-        uint32_t const fAllowed1 = fFeatures;
-        uVmxMsr = RT_MAKE_U64(fAllowed0, fAllowed1);
-    }
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmcsEnum;
+    return VINF_SUCCESS;
 }
 
@@ -1777,5 +1419,5 @@
 {
     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.ProcCtls2.u;
     return VINF_SUCCESS;
 }
@@ -1827,28 +1469,9 @@
 
 
-/**
- * Gets IA32_VMX_VMFUNC for IEM and cpumMsrRd_Ia32VmxVmFunc.
- *
- * @returns IA32_VMX_VMFUNC value.
- * @param   pVCpu           The cross context per CPU structure.
- */
-VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxVmFunc(PVMCPU pVCpu)
-{
-    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
-    uint64_t uVmxMsr;
-    if (   pGuestFeatures->fVmx
-        && pGuestFeatures->fVmxVmFunc)
-        uVmxMsr = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);
-    else
-        uVmxMsr = 0;
-    return uVmxMsr;
-}
-
-
 /** @callback_method_impl{FNCPUMRDMSR} */
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxVmFunc(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    *puValue = CPUMGetGuestIa32VmxVmFunc(pVCpu);
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 76199)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 76200)
@@ -5621,5 +5621,5 @@
             if (IEM_VMX_IS_ROOT_MODE(pVCpu))
             {
-                uint32_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
+                uint32_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
                 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
                 {
@@ -5628,5 +5628,5 @@
                 }
 
-                uint32_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
+                uint32_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
                 if (uNewCrX & ~uCr0Fixed1)
                 {
@@ -5855,5 +5855,5 @@
             if (IEM_VMX_IS_ROOT_MODE(pVCpu))
             {
-                uint32_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
+                uint32_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
                 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
                 {
@@ -5862,5 +5862,5 @@
                 }
 
-                uint32_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
+                uint32_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
                 if (uNewCrX & ~uCr4Fixed1)
                 {
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 76199)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 76200)
@@ -559,5 +559,5 @@
         case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
         {
-            uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
+            uint64_t const uVmFuncMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64VmFunc;
             return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
         }
@@ -1175,5 +1175,5 @@
 DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
 {
-    uint64_t const u64VmxMiscMsr      = CPUMGetGuestIa32VmxMisc(pVCpu);
+    uint64_t const u64VmxMiscMsr      = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
     uint32_t const cMaxSupportedMsrs  = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
     Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
@@ -2072,5 +2072,5 @@
     {
         /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
-        uint64_t const uCr0Fixed0  = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
+        uint64_t const uCr0Fixed0  = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
         uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
         uint64_t const uHostCr0    = pVmcs->u64HostCr0.u;
@@ -2083,5 +2083,5 @@
     {
         /* CR4 MB1 bits are not modified. */
-        uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
+        uint64_t const fCr4IgnMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
         uint64_t const uHostCr4    = pVmcs->u64HostCr4.u;
         uint64_t const uGuestCr4   = pVCpu->cpum.GstCtx.cr4;
@@ -5046,5 +5046,5 @@
     {
         /* CR0 MB1 bits. */
-        uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
+        uint64_t u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
         Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
         if (fUnrestrictedGuest)
@@ -5054,5 +5054,5 @@
 
         /* CR0 MBZ bits. */
-        uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
+        uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
         if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
@@ -5068,10 +5068,10 @@
     {
         /* CR4 MB1 bits. */
-        uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
+        uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
         if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
 
         /* CR4 MBZ bits. */
-        uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
+        uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
         if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
@@ -5682,5 +5682,5 @@
      * Activity state.
      */
-    uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
+    uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
     uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
     if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
@@ -6035,10 +6035,10 @@
     {
         /* CR0 MB1 bits. */
-        uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
+        uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
         if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
 
         /* CR0 MBZ bits. */
-        uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
+        uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
         if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
@@ -6048,10 +6048,10 @@
     {
         /* CR4 MB1 bits. */
-        uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
+        uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
         if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
 
         /* CR4 MBZ bits. */
-        uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
+        uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
         if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
@@ -6225,6 +6225,5 @@
 
     /* VM-entry controls. */
-    VMXCTLSMSR EntryCtls;
-    EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
+    VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
     if (~pVmcs->u32EntryCtls & EntryCtls.n.allowed0)
         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
@@ -6328,6 +6327,5 @@
 
     /* VM-exit controls. */
-    VMXCTLSMSR ExitCtls;
-    ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
+    VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
     if (~pVmcs->u32ExitCtls & ExitCtls.n.allowed0)
         IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
@@ -6383,6 +6381,5 @@
     /* Pin-based VM-execution controls. */
     {
-        VMXCTLSMSR PinCtls;
-        PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
+        VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
         if (~pVmcs->u32PinCtls & PinCtls.n.allowed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
@@ -6394,6 +6391,5 @@
     /* Processor-based VM-execution controls. */
     {
-        VMXCTLSMSR ProcCtls;
-        ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
+        VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
         if (~pVmcs->u32ProcCtls & ProcCtls.n.allowed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
@@ -6406,6 +6402,5 @@
     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     {
-        VMXCTLSMSR ProcCtls2;
-        ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
+        VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
         if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0)
             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
@@ -8159,5 +8154,5 @@
         {
             /* CR0 MB1 bits. */
-            uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
+            uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
             if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
             {
@@ -8168,5 +8163,5 @@
 
             /* CR0 MBZ bits. */
-            uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
+            uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
             if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
             {
@@ -8180,5 +8175,5 @@
         {
             /* CR4 MB1 bits. */
-            uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
+            uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
             if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
             {
@@ -8189,5 +8184,5 @@
 
             /* CR4 MBZ bits. */
-            uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
+            uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
             if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
             {
@@ -8199,6 +8194,7 @@
 
         /* Feature control MSR's LOCK and VMXON bits. */
-        uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
-        if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
+        uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl;
+        if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
+                        != (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
         {
             Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
Index: /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp	(revision 76199)
+++ /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp	(revision 76200)
@@ -2115,6 +2115,6 @@
         {
             Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
-            if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
-                Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
+            if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
+                Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
             iReg++;
         }
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 76199)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 76200)
@@ -1236,4 +1236,203 @@
 
 /**
+ * Initializes the guest VMX MSRs from guest-CPU features.
+ *
+ * @param   pVM     The cross context VM structure.
+ */
+static void cpumR3InitGuestVmxMsrs(PVM pVM)
+{
+    PVMCPU         pVCpu0    = &pVM->aCpus[0];
+    PCCPUMFEATURES pFeatures = &pVM->cpum.s.GuestFeatures;
+    PVMXMSRS       pVmxMsrs  = &pVCpu0->cpum.s.Guest.hwvirt.vmx.Msrs;
+
+    Assert(pFeatures->fVmx);
+    RT_ZERO(*pVmxMsrs);
+
+    /* Feature control. */
+    pVmxMsrs->u64FeatCtrl = MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON;
+
+    /* Basic information. */
+    {
+        uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID   )
+                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE          )
+                                | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  !pFeatures->fLongMode    )
+                                | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                        )
+                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB    )
+                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pFeatures->fVmxInsOutInfo)
+                                | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                        );
+        pVmxMsrs->u64Basic = u64Basic;
+    }
+
+    /* Pin-based VM-execution controls. */
+    {
+        uint32_t const fFeatures = (pFeatures->fVmxExtIntExit   << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT )
+                                 | (pFeatures->fVmxNmiExit      << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT     )
+                                 | (pFeatures->fVmxVirtNmi      << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT     )
+                                 | (pFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT)
+                                 | (pFeatures->fVmxPostedInt    << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT   );
+        uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1;
+        uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1;
+        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n",
+                                                         fAllowed0, fAllowed1, fFeatures));
+        pVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+    }
+
+    /* Processor-based VM-execution controls. */
+    {
+        uint32_t const fFeatures = (pFeatures->fVmxIntWindowExit     << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT   )
+                                 | (pFeatures->fVmxTscOffsetting     << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT)
+                                 | (pFeatures->fVmxHltExit           << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT          )
+                                 | (pFeatures->fVmxInvlpgExit        << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT       )
+                                 | (pFeatures->fVmxMwaitExit         << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT        )
+                                 | (pFeatures->fVmxRdpmcExit         << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT        )
+                                 | (pFeatures->fVmxRdtscExit         << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT        )
+                                 | (pFeatures->fVmxCr3LoadExit       << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT     )
+                                 | (pFeatures->fVmxCr3StoreExit      << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT    )
+                                 | (pFeatures->fVmxCr8LoadExit       << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT     )
+                                 | (pFeatures->fVmxCr8StoreExit      << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT    )
+                                 | (pFeatures->fVmxUseTprShadow      << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT    )
+                                 | (pFeatures->fVmxNmiWindowExit     << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT   )
+                                 | (pFeatures->fVmxMovDRxExit        << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT       )
+                                 | (pFeatures->fVmxUncondIoExit      << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT    )
+                                 | (pFeatures->fVmxUseIoBitmaps      << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT    )
+                                 | (pFeatures->fVmxMonitorTrapFlag   << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT )
+                                 | (pFeatures->fVmxUseMsrBitmaps     << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT   )
+                                 | (pFeatures->fVmxMonitorExit       << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT      )
+                                 | (pFeatures->fVmxPauseExit         << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT        )
+                                 | (pFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT);
+        uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1;
+        uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1;
+        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+                                                         fAllowed1, fFeatures));
+        pVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+    }
+
+    /* Secondary processor-based VM-execution controls. */
+    if (pFeatures->fVmxSecondaryExecCtls)
+    {
+        uint32_t const fFeatures = (pFeatures->fVmxVirtApicAccess    << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT  )
+                                 | (pFeatures->fVmxEpt               << VMX_BF_PROC_CTLS2_EPT_SHIFT               )
+                                 | (pFeatures->fVmxDescTableExit     << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT   )
+                                 | (pFeatures->fVmxRdtscp            << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT            )
+                                 | (pFeatures->fVmxVirtX2ApicMode    << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT  )
+                                 | (pFeatures->fVmxVpid              << VMX_BF_PROC_CTLS2_VPID_SHIFT              )
+                                 | (pFeatures->fVmxWbinvdExit        << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT       )
+                                 | (pFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT)
+                                 | (pFeatures->fVmxApicRegVirt       << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT     )
+                                 | (pFeatures->fVmxVirtIntDelivery   << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT )
+                                 | (pFeatures->fVmxPauseLoopExit     << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT   )
+                                 | (pFeatures->fVmxRdrandExit        << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT       )
+                                 | (pFeatures->fVmxInvpcid           << VMX_BF_PROC_CTLS2_INVPCID_SHIFT           )
+                                 | (pFeatures->fVmxVmFunc            << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT            )
+                                 | (pFeatures->fVmxVmcsShadowing     << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT    )
+                                 | (pFeatures->fVmxRdseedExit        << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT       )
+                                 | (pFeatures->fVmxPml               << VMX_BF_PROC_CTLS2_PML_SHIFT               )
+                                 | (pFeatures->fVmxEptXcptVe         << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT            )
+                                 | (pFeatures->fVmxXsavesXrstors     << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT    )
+                                 | (pFeatures->fVmxUseTscScaling     << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT       );
+        uint32_t const fAllowed0 = 0;
+        uint32_t const fAllowed1 = fFeatures;
+        pVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+    }
+
+    /* VM-exit controls. */
+    {
+        uint32_t const fFeatures = (pFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT          )
+                                 | (pFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT)
+                                 | (pFeatures->fVmxExitAckExtInt     << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT         )
+                                 | (pFeatures->fVmxExitSavePatMsr    << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT        )
+                                 | (pFeatures->fVmxExitLoadPatMsr    << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT        )
+                                 | (pFeatures->fVmxExitSaveEferMsr   << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT       )
+                                 | (pFeatures->fVmxExitLoadEferMsr   << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT       )
+                                 | (pFeatures->fVmxSavePreemptTimer  << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT  );
+        /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */
+        uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1;
+        uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1;
+        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+                                                         fAllowed1, fFeatures));
+        pVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+    }
+
+    /* VM-entry controls. */
+    {
+        uint32_t const fFeatures = (pFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT      )
+                                 | (pFeatures->fVmxIa32eModeGuest     << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT)
+                                 | (pFeatures->fVmxEntryLoadEferMsr   << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT   )
+                                 | (pFeatures->fVmxEntryLoadPatMsr    << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT    );
+        uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1;
+        uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1;
+        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0,
+                                                         fAllowed1, fFeatures));
+        pVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
+    }
+
+    /* Miscellaneous data. */
+    {
+        uint64_t uHostMsr = 0;
+        HMVmxGetHostMsr(pVM, MSR_IA32_VMX_MISC, &uHostMsr);
+        uint8_t const cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
+        uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
+        pVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC,      VMX_V_PREEMPT_TIMER_SHIFT        )
+                          | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA,     pFeatures->fVmxExitSaveEferLma   )
+                          | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES,        fActivityState                   )
+                          | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT,               pFeatures->fVmxIntelPt           )
+                          | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR,    0                                )
+                          | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET,             VMX_V_CR3_TARGET_COUNT           )
+                          | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS,               cMaxMsrs                         )
+                          | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI,       0                                )
+                          | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL,            pFeatures->fVmxVmwriteAll        )
+                          | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT,  pFeatures->fVmxEntryInjectSoftInt)
+                          | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID,                VMX_V_MSEG_REV_ID                );
+    }
+
+    /* CR0 Fixed-0. */
+    pVmxMsrs->u64Cr0Fixed0 = pFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX:  VMX_V_CR0_FIXED0;
+
+    /* CR0 Fixed-1. */
+    {
+        uint64_t uHostMsr = 0;
+        HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR0_FIXED1, &uHostMsr);
+        pVmxMsrs->u64Cr0Fixed1 = uHostMsr | VMX_V_CR0_FIXED0;   /* Make sure the CR0 MB1 bits are not clear. */
+    }
+
+    /* CR4 Fixed-0. */
+    pVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0;
+
+    /* CR4 Fixed-1. */
+    {
+        uint64_t uHostMsr = 0;
+        HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR4_FIXED1, &uHostMsr);
+        pVmxMsrs->u64Cr4Fixed1 = uHostMsr | VMX_V_CR4_FIXED0;   /* Make sure the CR4 MB1 bits are not clear. */
+    }
+
+    /* VMCS Enumeration. */
+    pVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT;
+
+    /* VM Functions. */
+    if (pFeatures->fVmxVmFunc)
+        pVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);
+
+    /*
+     * We don't support the following MSRs yet:
+     *   - True Pin-based VM-execution controls.
+     *   - True Processor-based VM-execution controls.
+     *   - True VM-entry VM-execution controls.
+     *   - True VM-exit VM-execution controls.
+     *   - EPT/VPID capabilities.
+     */
+
+    /*
+     * Copy the MSRs values initialized in VCPU 0 to all other VCPUs.
+     */
+    for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
+    {
+        PVMCPU pVCpu = &pVM->aCpus[idCpu];
+        Assert(pVCpu);
+        memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
+    }
+}
+
+
+/**
  * Explode VMX features from the provided MSRs.
  *
@@ -1245,4 +1444,5 @@
     Assert(pVmxMsrs);
     Assert(pFeatures);
+    Assert(pFeatures->fVmx);
 
     /* Basic information. */
@@ -1313,13 +1513,4 @@
     }
 
-    /* VM-entry controls. */
-    {
-        uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1;
-        pFeatures->fVmxEntryLoadDebugCtls    = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG);
-        pFeatures->fVmxIa32eModeGuest        = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
-        pFeatures->fVmxEntryLoadEferMsr      = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR);
-        pFeatures->fVmxEntryLoadPatMsr       = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR);
-    }
-
     /* VM-exit controls. */
     {
@@ -1333,4 +1524,13 @@
         pFeatures->fVmxExitLoadEferMsr       = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR);
         pFeatures->fVmxSavePreemptTimer      = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
+    }
+
+    /* VM-entry controls. */
+    {
+        uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1;
+        pFeatures->fVmxEntryLoadDebugCtls    = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG);
+        pFeatures->fVmxIa32eModeGuest        = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
+        pFeatures->fVmxEntryLoadEferMsr      = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR);
+        pFeatures->fVmxEntryLoadPatMsr       = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR);
     }
 
@@ -1536,4 +1736,9 @@
         Assert(!pGuestFeat->fVmxUseTscScaling);
     }
+
+    /*
+     * Finally initialize the VMX guest MSRs after merging the guest features.
+     */
+    cpumR3InitGuestVmxMsrs(pVM);
 }
 
