Index: /trunk/Config.kmk
===================================================================
--- /trunk/Config.kmk	(revision 49892)
+++ /trunk/Config.kmk	(revision 49893)
@@ -3768,5 +3768,5 @@
 TEMPLATE_VBOXR3STATIC                  = VBox Static Ring 3 EXE
 TEMPLATE_VBOXR3STATIC_EXTENDS          = VBOXR3EXE
-TEMPLATE_VBOXR3STATIC_DEFS             = IN_RT_STATIC IN_RT_R3 $(TEMPLATE_VBOXR3EXE_DEFS)
+TEMPLATE_VBOXR3STATIC_DEFS             = IN_RT_STATIC IN_RT_R3 IN_SUP_STATIC $(TEMPLATE_VBOXR3EXE_DEFS)
 ifeq ($(KBUILD_TARGET),win)
  TEMPLATE_VBOXR3STATIC_CFLAGS          = $(filter-out -MD$(VBOX_VCC_CRT_TYPE), $(TEMPLATE_VBOXR3EXE_CFLAGS)) -MT$(VBOX_VCC_CRT_TYPE)
Index: /trunk/include/VBox/cdefs.h
===================================================================
--- /trunk/include/VBox/cdefs.h	(revision 49892)
+++ /trunk/include/VBox/cdefs.h	(revision 49893)
@@ -204,7 +204,15 @@
  */
 #ifdef IN_SUP_R3
-# define SUPR3DECL(type)    DECLEXPORT(type) VBOXCALL
-#else
-# define SUPR3DECL(type)    DECLIMPORT(type) VBOXCALL
+# ifdef IN_SUP_STATIC
+#  define SUPR3DECL(type)   DECLHIDDEN(type) VBOXCALL
+# else
+#  define SUPR3DECL(type)   DECLEXPORT(type) VBOXCALL
+# endif
+#else
+# ifdef IN_SUP_STATIC
+#  define SUPR3DECL(type)   DECLHIDDEN(type) VBOXCALL
+# else
+#  define SUPR3DECL(type)   DECLIMPORT(type) VBOXCALL
+# endif
 #endif
 
Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 49892)
+++ /trunk/include/VBox/err.h	(revision 49893)
@@ -611,4 +611,14 @@
  * parts of the CS register. */
 #define VERR_CPUM_HIDDEN_CS_LOAD_ERROR          (-1752)
+/** Couldn't find the end of CPUID sub-leaves. */
+#define VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES      (-1753)
+/** CPUM internal processing error \#1. */
+#define VERR_CPUM_IPE_1                         (-1754)
+/** CPUM internal processing error \#2. */
+#define VERR_CPUM_IPE_2                         (-1755)
+/** The specified CPU cannot be found in the CPU database. */
+#define VERR_CPUM_DB_CPU_NOT_FOUND              (-1756)
+/** Invalid CPUMCPU offset in MSR range. */
+#define VERR_CPUM_MSR_BAD_CPUMCPU_OFFSET        (-1757)
 /** @} */
 
Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 49892)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 49893)
@@ -4,5 +4,5 @@
 
 /*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -78,9 +78,254 @@
     CPUMCPUVENDOR_AMD,
     CPUMCPUVENDOR_VIA,
+    CPUMCPUVENDOR_CYRIX,
     CPUMCPUVENDOR_UNKNOWN,
-    CPUMCPUVENDOR_SYNTHETIC,
     /** 32bit hackishness. */
     CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
 } CPUMCPUVENDOR;
+
+
+/**
+ * X86 and AMD64 CPU microarchitectures and in processor generations.
+ *
+ * @remarks The separation here is sometimes a little bit too finely grained,
+ *          and the differences is more like processor generation than micro
+ *          arch.  This can be useful, so we'll provide functions for getting at
+ *          more coarse grained info.
+ */
+typedef enum CPUMMICROARCH
+{
+    kCpumMicroarch_Invalid = 0,
+
+    kCpumMicroarch_Intel_First,
+
+    kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
+    kCpumMicroarch_Intel_80186,
+    kCpumMicroarch_Intel_80286,
+    kCpumMicroarch_Intel_80386,
+    kCpumMicroarch_Intel_80486,
+    kCpumMicroarch_Intel_P5,
+
+    kCpumMicroarch_Intel_P6_Core_Atom_First,
+    kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
+    kCpumMicroarch_Intel_P6_II,
+    kCpumMicroarch_Intel_P6_III,
+
+    kCpumMicroarch_Intel_P6_M_Banias,
+    kCpumMicroarch_Intel_P6_M_Dothan,
+    kCpumMicroarch_Intel_Core_Yonah,        /**< Core, also known as Enhanced Pentium M. */
+
+    kCpumMicroarch_Intel_Core2_Merom,
+    kCpumMicroarch_Intel_Core2_Penryn,
+
+    kCpumMicroarch_Intel_Core7_First,
+    kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
+    kCpumMicroarch_Intel_Core7_Westmere,
+    kCpumMicroarch_Intel_Core7_SandyBridge,
+    kCpumMicroarch_Intel_Core7_IvyBridge,
+    kCpumMicroarch_Intel_Core7_Haswell,
+    kCpumMicroarch_Intel_Core7_Broadwell,
+    kCpumMicroarch_Intel_Core7_Skylake,
+    kCpumMicroarch_Intel_Core7_Cannonlake,
+    kCpumMicroarch_Intel_Core7_End,
+
+    kCpumMicroarch_Intel_Atom_First,
+    kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
+    kCpumMicroarch_Intel_Atom_Lincroft,     /**< Second generation bonnell (44nm). */
+    kCpumMicroarch_Intel_Atom_Saltwell,     /**< 32nm shrink of Bonnell. */
+    kCpumMicroarch_Intel_Atom_Silvermont,   /**< 22nm */
+    kCpumMicroarch_Intel_Atom_Airmount,     /**< 14nm */
+    kCpumMicroarch_Intel_Atom_Goldmont,     /**< 14nm */
+    kCpumMicroarch_Intel_Atom_Unknown,
+    kCpumMicroarch_Intel_Atom_End,
+
+    kCpumMicroarch_Intel_P6_Core_Atom_End,
+
+    kCpumMicroarch_Intel_NB_First,
+    kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
+    kCpumMicroarch_Intel_NB_Northwood,      /**< 130nm */
+    kCpumMicroarch_Intel_NB_Prescott,       /**< 90nm */
+    kCpumMicroarch_Intel_NB_Prescott2M,     /**< 90nm */
+    kCpumMicroarch_Intel_NB_CedarMill,      /**< 65nm */
+    kCpumMicroarch_Intel_NB_Gallatin,       /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
+    kCpumMicroarch_Intel_NB_Unknown,
+    kCpumMicroarch_Intel_NB_End,
+
+    kCpumMicroarch_Intel_Unknown,
+    kCpumMicroarch_Intel_End,
+
+    kCpumMicroarch_AMD_First,
+    kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
+    kCpumMicroarch_AMD_Am386,
+    kCpumMicroarch_AMD_Am486,
+    kCpumMicroarch_AMD_Am486Enh,            /**< Covers Am5x86 as well. */
+    kCpumMicroarch_AMD_K5,
+    kCpumMicroarch_AMD_K6,
+
+    kCpumMicroarch_AMD_K7_First,
+    kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
+    kCpumMicroarch_AMD_K7_Spitfire,
+    kCpumMicroarch_AMD_K7_Thunderbird,
+    kCpumMicroarch_AMD_K7_Morgan,
+    kCpumMicroarch_AMD_K7_Thoroughbred,
+    kCpumMicroarch_AMD_K7_Barton,
+    kCpumMicroarch_AMD_K7_Unknown,
+    kCpumMicroarch_AMD_K7_End,
+
+    kCpumMicroarch_AMD_K8_First,
+    kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
+    kCpumMicroarch_AMD_K8_90nm,             /**< 90nm shrink */
+    kCpumMicroarch_AMD_K8_90nm_DualCore,    /**< 90nm with two cores. */
+    kCpumMicroarch_AMD_K8_90nm_AMDV,        /**< 90nm with AMD-V (usually) and two cores (usually). */
+    kCpumMicroarch_AMD_K8_65nm,             /**< 65nm shrink. */
+    kCpumMicroarch_AMD_K8_End,
+
+    kCpumMicroarch_AMD_K10,
+    kCpumMicroarch_AMD_K10_Lion,
+    kCpumMicroarch_AMD_K10_Llano,
+    kCpumMicroarch_AMD_Bobcat,
+    kCpumMicroarch_AMD_Jaguar,
+
+    kCpumMicroarch_AMD_15h_First,
+    kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
+    kCpumMicroarch_AMD_15h_Piledriver,
+    kCpumMicroarch_AMD_15h_Steamroller,     /**< Yet to be released, might have different family.  */
+    kCpumMicroarch_AMD_15h_Excavator,       /**< Yet to be released, might have different family.  */
+    kCpumMicroarch_AMD_15h_Unknown,
+    kCpumMicroarch_AMD_15h_End,
+
+    kCpumMicroarch_AMD_16h_First,
+    kCpumMicroarch_AMD_16h_End,
+
+    kCpumMicroarch_AMD_Unknown,
+    kCpumMicroarch_AMD_End,
+
+    kCpumMicroarch_VIA_First,
+    kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
+    kCpumMicroarch_Centaur_C2,
+    kCpumMicroarch_Centaur_C3,
+    kCpumMicroarch_VIA_C3_M2,
+    kCpumMicroarch_VIA_C3_C5A,          /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
+    kCpumMicroarch_VIA_C3_C5B,          /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
+    kCpumMicroarch_VIA_C3_C5C,          /**< 130nm Ezra - C3, Eden ESP. */
+    kCpumMicroarch_VIA_C3_C5N,          /**< 130nm Ezra-T - C3. */
+    kCpumMicroarch_VIA_C3_C5XL,         /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
+    kCpumMicroarch_VIA_C3_C5P,          /**< 130nm Nehemiah+ - C3. */
+    kCpumMicroarch_VIA_C7_C5J,          /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
+    kCpumMicroarch_VIA_Isaiah,
+    kCpumMicroarch_VIA_Unknown,
+    kCpumMicroarch_VIA_End,
+
+    kCpumMicroarch_Cyrix_First,
+    kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
+    kCpumMicroarch_Cyrix_M1,
+    kCpumMicroarch_Cyrix_MediaGX,
+    kCpumMicroarch_Cyrix_MediaGXm,
+    kCpumMicroarch_Cyrix_M2,
+    kCpumMicroarch_Cyrix_Unknown,
+    kCpumMicroarch_Cyrix_End,
+
+    kCpumMicroarch_Unknown,
+
+    kCpumMicroarch_32BitHack = 0x7fffffff
+} CPUMMICROARCH;
+
+
+/** Predicate macro for catching netburst CPUs. */
+#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
+    ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
+
+/** Predicate macro for catching Core7 CPUs. */
+#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
+    ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
+
+/** Predicate macro for catching AMD Family 8H CPUs (aka K8).    */
+#define CPUMMICROARCH_IS_AMD_FAM_8H(a_enmMicroarch) \
+    ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
+
+/** Predicate macro for catching AMD Family 10H CPUs (aka K10).    */
+#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
+
+/** Predicate macro for catching AMD Family 11H CPUs (aka Lion).    */
+#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
+
+/** Predicate macro for catching AMD Family 12H CPUs (aka Llano).    */
+#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
+
+/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat).    */
+#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
+
+/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
+ * decendants). */
+#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
+    ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
+
+/** Predicate macro for catching AMD Family 16H CPUs. */
+#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
+    ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
+
+
+
+/**
+ * CPUID leaf.
+ */
+typedef struct CPUMCPUIDLEAF
+{
+    /** The leaf number. */
+    uint32_t    uLeaf;
+    /** The sub-leaf number. */
+    uint32_t    uSubLeaf;
+    /** Sub-leaf mask.  This is 0 when sub-leaves aren't used. */
+    uint32_t    fSubLeafMask;
+
+    /** The EAX value. */
+    uint32_t    uEax;
+    /** The EBX value. */
+    uint32_t    uEbx;
+    /** The ECX value. */
+    uint32_t    uEcx;
+    /** The EDX value. */
+    uint32_t    uEdx;
+
+    /** Flags. */
+    uint32_t    fFlags;
+} CPUMCPUIDLEAF;
+/** Pointer to a CPUID leaf. */
+typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
+/** Pointer to a const CPUID leaf. */
+typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
+
+/** @name CPUMCPUIDLEAF::fFlags
+ * @{ */
+/** Indicates that ECX (the sub-leaf indicator) doesn't change when
+ * requesting the final leaf and all undefined leaves that follows it.
+ * Observed for 0x0000000b on Intel. */
+#define CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED RT_BIT_32(0)
+/** @} */
+
+/**
+ * Method used to deal with unknown CPUID leafs.
+ */
+typedef enum CPUMUKNOWNCPUID
+{
+    /** Invalid zero value. */
+    CPUMUKNOWNCPUID_INVALID = 0,
+    /** Use given default values (DefCpuId). */
+    CPUMUKNOWNCPUID_DEFAULTS,
+    /** Return the last standard leaf.
+     * Intel Sandy Bridge has been observed doing this. */
+    CPUMUKNOWNCPUID_LAST_STD_LEAF,
+    /** Return the last standard leaf, with ecx observed.
+     * Intel Sandy Bridge has been observed doing this. */
+    CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
+    /** The register values are passed thru unmodified. */
+    CPUMUKNOWNCPUID_PASSTHRU,
+    /** End of valid value. */
+    CPUMUKNOWNCPUID_END,
+    /** Ensure 32-bit type. */
+    CPUMUKNOWNCPUID_32BIT_HACK = 0x7fffffff
+} CPUMUKNOWNCPUID;
+/** Pointer to unknown CPUID leaf method. */
+typedef CPUMUKNOWNCPUID *PCPUMUKNOWNCPUID;
+
 
 
@@ -173,4 +418,6 @@
 VMM_INT_DECL(void)  CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
 VMM_INT_DECL(void)  CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
+VMMR0_INT_DECL(void)        CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
+VMMR0_INT_DECL(uint64_t)    CPUMR0GetGuestTscAux(PVMCPU pVCpu);
 /** @} */
 
@@ -443,5 +690,5 @@
 VMMR3DECL(int)          CPUMR3Term(PVM pVM);
 VMMR3DECL(void)         CPUMR3Reset(PVM pVM);
-VMMR3DECL(void)         CPUMR3ResetCpu(PVMCPU pVCpu);
+VMMR3DECL(void)         CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
 VMMDECL(bool)           CPUMR3IsStateRestorePending(PVM pVM);
 VMMR3DECL(void)         CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled);
@@ -451,4 +698,13 @@
 VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdCentaurRCPtr(PVM pVM);
 VMMR3DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdDefRCPtr(PVM pVM);
+
+VMMR3DECL(CPUMMICROARCH)    CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
+                                                            uint8_t bModel, uint8_t bStepping);
+VMMR3DECL(const char *)     CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
+VMMR3DECL(int)              CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
+VMMR3DECL(int)              CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
+VMMR3DECL(const char *)     CPUMR3CpuIdUnknownLeafMethodName(CPUMUKNOWNCPUID enmUnknownMethod);
+VMMR3DECL(CPUMCPUVENDOR)    CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
+VMMR3DECL(const char *)     CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
 
 /** @} */
Index: /trunk/include/VBox/vmm/mm.h
===================================================================
--- /trunk/include/VBox/vmm/mm.h	(revision 49892)
+++ /trunk/include/VBox/vmm/mm.h	(revision 49893)
@@ -59,4 +59,6 @@
 
     MM_TAG_CPUM_CTX,
+    MM_TAG_CPUM_CPUID,
+    MM_TAG_CPUM_MSRS,
 
     MM_TAG_DBGF,
@@ -206,4 +208,5 @@
 
 VMMDECL(int)        MMHyperAlloc(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
+VMMDECL(int)        MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv);
 VMMDECL(int)        MMHyperFree(PVM pVM, void *pv);
 VMMDECL(void)       MMHyperHeapCheck(PVM pVM);
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 49892)
+++ /trunk/include/iprt/x86.h	(revision 49893)
@@ -1037,23 +1037,23 @@
 #define MSR_IA32_MISC_ENABLE                   0x1A0
 /** Enable fast-strings feature (for REP MOVS and REP STORS). */
-#define MSR_IA32_MISC_ENABLE_FAST_STRINGS      RT_BIT(0)
+#define MSR_IA32_MISC_ENABLE_FAST_STRINGS      RT_BIT_64(0)
 /** Automatic Thermal Control Circuit Enable (R/W). */
-#define MSR_IA32_MISC_ENABLE_TCC               RT_BIT(3)
+#define MSR_IA32_MISC_ENABLE_TCC               RT_BIT_64(3)
 /** Performance Monitoring Available (R). */
-#define MSR_IA32_MISC_ENABLE_PERF_MON          RT_BIT(7)
+#define MSR_IA32_MISC_ENABLE_PERF_MON          RT_BIT_64(7)
 /** Branch Trace Storage Unavailable (R/O). */
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL       RT_BIT(11)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL       RT_BIT_64(11)
 /** Precise Event Based Sampling (PEBS) Unavailable (R/O). */
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL      RT_BIT(12)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL      RT_BIT_64(12)
 /** Enhanced Intel SpeedStep Technology Enable (R/W). */
-#define MSR_IA32_MISC_ENABLE_SST_ENABLE        RT_BIT(16)
+#define MSR_IA32_MISC_ENABLE_SST_ENABLE        RT_BIT_64(16)
 /** If MONITOR/MWAIT is supported (R/W). */
-#define MSR_IA32_MISC_ENABLE_MONITOR           RT_BIT(18)
+#define MSR_IA32_MISC_ENABLE_MONITOR           RT_BIT_64(18)
 /** Limit CPUID Maxval to 3 leafs (R/W). */
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID       RT_BIT(22)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID       RT_BIT_64(22)
 /** When set to 1, xTPR messages are disabled (R/W). */
-#define MSR_IA32_MISC_ENABLE_XTPR_MSG_DISABLE  RT_BIT(23)
+#define MSR_IA32_MISC_ENABLE_XTPR_MSG_DISABLE  RT_BIT_64(23)
 /** When set to 1, the Execute Disable Bit feature (XD Bit) is disabled (R/W). */
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE        RT_BIT(34)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE        RT_BIT_64(34)
 
 /** Trace/Profile Resource Control (R/W) */
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 49892)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 49893)
@@ -77,4 +77,8 @@
 endif
 
+ifdef VBOX_WITH_NEW_MSR_CODE
+ VMM_COMMON_DEFS += VBOX_WITH_NEW_MSR_CODE
+endif
+
 
 #
@@ -118,4 +122,6 @@
 	VMMR3/CFGM.cpp \
 	VMMR3/CPUM.cpp \
+	VMMR3/CPUMR3CpuId.cpp \
+	VMMR3/CPUMR3Db.cpp \
 	VMMR3/CPUMDbg.cpp \
 	VMMR3/DBGF.cpp \
@@ -187,4 +193,5 @@
        ,) \
 	VMMAll/CPUMAllRegs.cpp \
+	VMMAll/CPUMAllMsrs.cpp \
 	VMMAll/CPUMStack.cpp \
 	VMMAll/DBGFAll.cpp \
@@ -438,4 +445,5 @@
  	VMMRZ/VMMRZ.cpp \
  	VMMAll/CPUMAllRegs.cpp \
+	VMMAll/CPUMAllMsrs.cpp \
  	VMMAll/DBGFAll.cpp \
 	VMMAll/IEMAll.cpp \
@@ -540,4 +548,5 @@
  	VMMRZ/VMMRZ.cpp \
  	VMMAll/CPUMAllRegs.cpp \
+	VMMAll/CPUMAllMsrs.cpp \
  	VMMAll/CPUMStack.cpp \
  	VMMAll/DBGFAll.cpp \
@@ -610,7 +619,9 @@
  LIBRARIES += SSMStandalone
  SSMStandalone_TEMPLATE = VBOXR3EXE
- SSMStandalone_DEFS     = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE
+ SSMStandalone_DEFS     = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE CPUM_DB_STANDALONE
  SSMStandalone_INCS     = include
- SSMStandalone_SOURCES  = VMMR3/SSM.cpp
+ SSMStandalone_SOURCES  = \
+ 	VMMR3/SSM.cpp \
+ 	VMMR3/CPUMR3Db.cpp
 endif # !VBOX_ONLY_EXTPACKS
 
@@ -704,4 +715,9 @@
 endif # bird wants good stacks
 
+
+# Alias the CPU database entries.
+$(foreach base,$(notdir $(basename $(wildcard $(PATH_SUB_CURRENT)/VMMR3/cpus/*.h))), $(eval $(base).o $(base).obj: CPUMR3Db.o))
+
+
 include $(FILE_KBUILD_SUB_FOOTER)
 
@@ -732,2 +748,3 @@
 LegacyandAMD64.o LegacyandAMD64.obj:           32BitToAMD64.o PAEToAMD64.o
 AMD64andLegacy.o AMD64andLegacy.obj:           AMD64To32Bit.o AMD64ToPAE.o
+
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 49893)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 49893)
@@ -0,0 +1,4896 @@
+/* $Id$ */
+/** @file
+ * CPUM - CPU MSR Registers.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+*   Header Files                                                               *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/hm.h>
+#include <VBox/vmm/tm.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/err.h>
+
+
+/*******************************************************************************
+*   Defined Constants And Macros                                               *
+*******************************************************************************/
+/**
+ * Validates the CPUMMSRRANGE::offCpumCpu value and declares a local variable
+ * pointing to it.
+ *
+ * ASSUMES sizeof(a_Type) is a power of two and that the member is aligned
+ * correctly.
+ */
+#define CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(a_pVCpu, a_pRange, a_Type, a_VarName) \
+    AssertMsgReturn(   (a_pRange)->offCpumCpu >= 8 \
+                    && (a_pRange)->offCpumCpu < sizeof(CPUMCPU) \
+                    && !((a_pRange)->offCpumCpu & (RT_MIN(sizeof(a_Type), 8) - 1)) \
+                    , ("offCpumCpu=%#x %s\n", (a_pRange)->offCpumCpu, (a_pRange)->szName), \
+                    VERR_CPUM_MSR_BAD_CPUMCPU_OFFSET); \
+    a_Type *a_VarName = (a_Type *)((uintptr_t)&(a_pVCpu)->cpum.s + (a_pRange)->offCpumCpu)
+
+
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+
+/**
+ * Implements reading one or more MSRs.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure (invalid MSR).
+ *
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   idMsr       The MSR we're reading.
+ * @param   pRange      The MSR range descriptor.
+ * @param   puValue     Where to return the value.
+ */
+typedef DECLCALLBACK(int) FNCPUMRDMSR(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
+/** Pointer to a RDMSR worker for a specific MSR or range of MSRs.  */
+typedef FNCPUMRDMSR *PFNCPUMRDMSR;
+
+
+/**
+ * Implements writing one or more MSRs.
+ *
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure.
+ *
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   idMsr       The MSR we're writing.
+ * @param   pRange      The MSR range descriptor.
+ * @param   uValue      The value to set.
+ */
+typedef DECLCALLBACK(int) FNCPUMWRMSR(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue);
+/** Pointer to a WRMSR worker for a specific MSR or range of MSRs.  */
+typedef FNCPUMWRMSR *PFNCPUMWRMSR;
+
+
+
+/*
+ * Generic functions.
+ * Generic functions.
+ * Generic functions.
+ */
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_FixedValue(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IgnoreWrite(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    Log(("CPUM: Ignoring WRMSR %#x (%s), %#llx\n", idMsr, pRange->szName, uValue));
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_WriteOnly(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_ReadOnly(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    Assert(pRange->fWrGpMask == UINT64_MAX);
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+
+
+/*
+ * IA32
+ * IA32
+ * IA32
+ */
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32P5McAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0; /** @todo implement machine check injection. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32P5McAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement machine check injection. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32P5McType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0; /** @todo implement machine check injection. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32P5McType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement machine check injection. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32TimestampCounter(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = TMCpuTickGet(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32TimestampCounter(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32ApicBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (   !pVM->cpum.s.GuestFeatures.fApic
+        && !pVM->cpum.s.GuestFeatures.fX2Apic)
+    {
+        Log(("CPUM: %s, apic not present -> GP\n", pRange->szName));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    *puValue = pVCpu->cpum.s.Guest.msrApicBase;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32ApicBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    int rc = PDMApicSetBase(pVCpu, uValue);
+    if (rc != VINF_SUCCESS)
+        rc = VERR_CPUM_RAISE_GP_0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 1; /* Locked, no VT-X, no SYSENTER micromanagement. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32BiosUpdateTrigger(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Fake bios update trigger better.  The value is the address to an
+     *        update package, I think.  We should probably GP if it's invalid. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo SMM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PmcN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo check CPUID leaf 0ah. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PmcN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo check CPUID leaf 0ah. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MonitorFilterLineSize(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo return 0x1000 if we try emulate mwait 100% correctly. */
+    *puValue = 0x40; /** @todo Change to CPU cache line size. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MonitorFilterLineSize(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo should remember writes, though it's supposedly something only a BIOS
+     * would write so, it's not extremely important. */
+    return VINF_SUCCESS;
+}
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MPerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Read MPERF: Adjust against previously written MPERF value.  Is TSC
+     *        what we want? */
+    *puValue = TMCpuTickGet(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MPerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Write MPERF: Calc adjustment. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32APerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Read APERF: Adjust against previously written MPERF value.  Is TSC
+     *        what we want? */
+    *puValue = TMCpuTickGet(pVCpu);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32APerf(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Write APERF: Calc adjustment. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /* This is currently a bit weird. :-) */
+    uint8_t const   cVariableRangeRegs              = 0;
+    bool const      fSystemManagementRangeRegisters = false;
+    bool const      fFixedRangeRegisters            = false;
+    bool const      fWriteCombiningType             = false;
+    *puValue = cVariableRangeRegs
+             | (fFixedRangeRegisters            ? RT_BIT_64(8)  : 0)
+             | (fWriteCombiningType             ? RT_BIT_64(10) : 0)
+             | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrPhysBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement variable MTRR storage. */
+    Assert(pRange->uInitOrReadValue == (idMsr - 0x200) / 2);
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrPhysBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /*
+     * Validate the value.
+     */
+    Assert(pRange->uInitOrReadValue == (idMsr - 0x200) / 2);
+
+    if ((uValue & 0xff) >= 7)
+    {
+        Log(("CPUM: Invalid type set writing MTRR PhysBase MSR %#x: %#llx (%#llx)\n", idMsr, uValue, uValue & 0xff));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);
+    if (fInvPhysMask & uValue)
+    {
+        Log(("CPUM: Invalid physical address bits set writing MTRR PhysBase MSR %#x: %#llx (%#llx)\n",
+             idMsr, uValue, uValue & fInvPhysMask));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    /*
+     * Store it.
+     */
+    /** @todo Implement variable MTRR storage. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrPhysMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement variable MTRR storage. */
+    Assert(pRange->uInitOrReadValue == (idMsr - 0x200) / 2);
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrPhysMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /*
+     * Validate the value.
+     */
+    Assert(pRange->uInitOrReadValue == (idMsr - 0x200) / 2);
+
+    uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);
+    if (fInvPhysMask & uValue)
+    {
+        Log(("CPUM: Invalid physical address bits set writing MTRR PhysMask MSR %#x: %#llx (%#llx)\n",
+             idMsr, uValue, uValue & fInvPhysMask));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    /*
+     * Store it.
+     */
+    /** @todo Implement variable MTRR storage. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrFixed(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr);
+    *puValue = *puFixedMtrr;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrFixed(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr);
+    for (uint32_t cShift = 0; cShift < 63; cShift += 8)
+    {
+        uint8_t uType = (uint8_t)(uValue >> cShift);
+        if (uType >= 7)
+        {
+            Log(("CPUM: Invalid MTRR type at %u:%u in fixed range (%#x/%s): %#llx (%#llx)\n",
+                 cShift + 7, cShift, idMsr, pRange->szName, uValue, uType));
+            return VERR_CPUM_RAISE_GP_0;
+        }
+    }
+    *puFixedMtrr = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MtrrDefType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MtrrDefType(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if ((uValue & 0xff) >= 7)
+    {
+        Log(("CPUM: Invalid MTRR default type value: %#llx (%#llx)\n", pRange->szName, uValue, uValue & 0xff));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32Pat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrPAT;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32Pat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.msrPAT = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterCs(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterCs(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /* Note! We used to mask this by 0xffff, but turns out real HW doesn't and
+             there are generally 32-bit working bits backing this register. */
+    pVCpu->cpum.s.Guest.SysEnter.cs = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterEsp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterEsp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if (X86_IS_CANONICAL(uValue))
+    {
+        pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
+        return VINF_SUCCESS;
+    }
+    Log(("CPUM: IA32_SYSENTER_ESP not canonical! %#llx\n", uValue));
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SysEnterEip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SysEnterEip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if (X86_IS_CANONICAL(uValue))
+    {
+        pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
+        return VINF_SUCCESS;
+    }
+    Log(("CPUM: IA32_SYSENTER_EIP not canonical! %#llx\n", uValue));
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32McgCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+#if 0 /** @todo implement machine checks. */
+    *puValue = pRange->uInitOrReadValue & (RT_BIT_64(8) | 0);
+#else
+    *puValue = 0;
+#endif
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32McgStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement machine checks. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32McgStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement machine checks. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32McgCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement machine checks. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32McgCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement machine checks. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32DebugCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_DEBUGCTL. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32DebugCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_DEBUGCTL. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SmrrPhysBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SmrrPhysBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement intel SMM. */
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32SmrrPhysMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32SmrrPhysMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement intel SMM. */
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PlatformDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel direct cache access (DCA)?? */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PlatformDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement intel direct cache access (DCA)?? */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32CpuDcaCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel direct cache access (DCA)?? */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32Dca0Cap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel direct cache access (DCA)?? */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32Dca0Cap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement intel direct cache access (DCA)?? */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_PERFEVTSEL0+. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfEvtSelN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_PERFEVTSEL0+. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_PERFSTATUS. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_PERFCTL. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_PERFCTL. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32FixedCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_FIXED_CTRn (fixed performance counters). */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32FixedCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_FIXED_CTRn (fixed performance counters). */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfCapabilities(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfCapabilities(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32FixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32FixedCtrCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PerfGlobalOvfCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32PebsEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32PebsEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32ClockModulation(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_CLOCK_MODULATION. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32ClockModulation(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_CLOCK_MODULATION. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32ThermInterrupt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_THERM_INTERRUPT. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32ThermInterrupt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_THERM_STATUS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32ThermStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_THERM_STATUS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32ThermStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_THERM_INTERRUPT. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32Therm2Ctl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_THERM2_CTL. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32Therm2Ctl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement IA32_THERM2_CTL. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32MiscEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32MiscEnable(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+#ifdef LOG_ENABLED
+    uint64_t const uOld = pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
+#endif
+
+    /* Unsupported bits are generally ignored and stripped by the MSR range
+       entry that got us here. So, we just need to preserve fixed bits. */
+    pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue
+                                           | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
+                                           | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
+
+    Log(("CPUM: IA32_MISC_ENABLE; old=%#llx written=%#llx => %#llx\n",
+         uOld, uValue,  pVCpu->cpum.s.GuestMsrs.msr.MiscEnable));
+
+    /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
+    /** @todo Wire up MSR_IA32_MISC_ENABLE_XD_DISABLE. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32McCtlStatusAddrMiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement machine check exception injection. */
+    switch (idMsr & 3)
+    {
+        case 0:
+        case 1:
+            *puValue = 0;
+            break;
+
+        /* The ADDR and MISC registers aren't accessible since the
+           corresponding STATUS bits are zero. */
+        case 2:
+            Log(("CPUM: Reading IA32_MCi_ADDR %#x -> #GP\n", idMsr));
+            return VERR_CPUM_RAISE_GP_0;
+        case 3:
+            Log(("CPUM: Reading IA32_MCi_MISC %#x -> #GP\n", idMsr));
+            return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32McCtlStatusAddrMiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    switch (idMsr & 3)
+    {
+        case 0:
+            /* Ignore writes to the CTL register. */
+            break;
+
+        case 1:
+            /* According to specs, the STATUS register can only be written to
+               with the value 0.  VBoxCpuReport thinks different for a
+               Pentium M Dothan, but implementing according to specs now. */
+            if (uValue != 0)
+            {
+                Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_STATUS %#x -> #GP\n", uValue, idMsr));
+                return VERR_CPUM_RAISE_GP_0;
+            }
+            break;
+
+        /* Specs states that ADDR and MISC can be cleared by writing zeros.
+           Writing 1s will GP.  Need to figure out how this relates to the
+           ADDRV and MISCV status flags.  If writing is independent of those
+           bits, we need to know whether the CPU really implements them since
+           that is exposed by writing 0 to them.
+           Implementing the solution with the fewer GPs for now. */
+        case 2:
+            if (uValue != 0)
+            {
+                Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_ADDR %#x -> #GP\n", uValue, idMsr));
+                return VERR_CPUM_RAISE_GP_0;
+            }
+            break;
+        case 3:
+            if (uValue != 0)
+            {
+                Log(("CPUM: Writing non-zero value (%#llx) to IA32_MCi_MISC %#x -> #GP\n", uValue, idMsr));
+                return VERR_CPUM_RAISE_GP_0;
+            }
+            break;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32McNCtl2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement machine check exception injection. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32McNCtl2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Implement machine check exception injection. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32DsArea(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement IA32_DS_AREA. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32DsArea(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32TscDeadline(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement TSC deadline timer. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32TscDeadline(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement TSC deadline timer. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32X2ApicN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    int rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
+    if (rc != VINF_SUCCESS)
+    {
+        Log(("CPUM: X2APIC %#x read => %Rrc => #GP\n", idMsr, rc));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Ia32X2ApicN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    int rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
+    if (rc != VINF_SUCCESS)
+    {
+        Log(("CPUM: X2APIC %#x write %#llx => %Rrc => #GP\n", idMsr, rc, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxPinbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxProcbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxExitCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxEntryCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxMisc(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr0Fixed0(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr0Fixed1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr4Fixed0(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxCr4Fixed1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxVmcsEnum(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxProcBasedCtls2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxEptVpidCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTruePinbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueProcbasedCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueExitCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Ia32VmxTrueEntryCtls(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+
+
+
+
+
+
+
+
+/*
+ * AMD64
+ * AMD64
+ * AMD64
+ */
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64Efer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrEFER;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64Efer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    PVM             pVM          = pVCpu->CTX_SUFF(pVM);
+    uint64_t const  uOldEfer     = pVCpu->cpum.s.Guest.msrEFER;
+    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
+                                 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
+                                 : 0;
+    uint64_t        fMask        = 0;
+
+    /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
+    if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
+        fMask |= MSR_K6_EFER_NXE;
+    if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
+        fMask |= MSR_K6_EFER_LME;
+    if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
+        fMask |= MSR_K6_EFER_SCE;
+    if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+        fMask |= MSR_K6_EFER_FFXSR;
+
+    /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
+       paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
+    if (   (uOldEfer & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
+        && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
+    {
+        Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
+    AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
+              ("Unexpected value %RX64\n", uValue));
+    pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask);
+
+    /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
+       if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
+    if (   (uOldEfer                    & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
+        != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
+    {
+        /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
+        HMFlushTLB(pVCpu);
+
+        /* Notify PGM about NXE changes. */
+        if (   (uOldEfer                    & MSR_K6_EFER_NXE)
+            != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
+            PGMNotifyNxeChanged(pVCpu, !(uOldEfer & MSR_K6_EFER_NXE));
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64SyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrSTAR;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64SyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.msrSTAR = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64LongSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64LongSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    pVCpu->cpum.s.Guest.msrLSTAR = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64CompSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64CompSyscallTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    pVCpu->cpum.s.Guest.msrCSTAR = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64SyscallFlagMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64SyscallFlagMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.msrSFMASK = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64FsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64FsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.fs.u64Base = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64GsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
+    return VINF_SUCCESS;
+}
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64GsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.gs.u64Base = uValue;
+    return VINF_SUCCESS;
+}
+
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64KernelGsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
+    return VINF_SUCCESS;
+}
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64KernelGsBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_Amd64TscAux(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
+    return VINF_SUCCESS;
+}
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_Amd64TscAux(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/*
+ * Intel specific
+ * Intel specific
+ * Intel specific
+ */
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelEblCrPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo recalc clock frequency ratio? */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelEblCrPowerOn(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Write EBL_CR_POWERON: Remember written bits. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelPlatformInfo100MHz(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+
+    /* Just indicate a fixed TSC, no turbo boost, no programmable anything. */
+    uint64_t uTscHz = TMCpuTicksPerSecond(pVM);
+    uint8_t  uTsc100MHz = (uint8_t)(uTscHz / UINT32_C(100000000));
+    *puValue = ((uint32_t)uTsc100MHz << 8)   /* TSC invariant frequency. */
+             | ((uint64_t)uTsc100MHz << 40); /* The max turbo frequency. */
+
+    /* Ivy bridge has a minimum operating ratio as well. */
+    if (true) /** @todo detect sandy bridge. */
+        *puValue |= (uint64_t)uTsc100MHz << 48;
+
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelPlatformInfo133MHz(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /* Just indicate a fixed TSC, no turbo boost, no programmable anything. */
+    uint64_t uTscHz = TMCpuTicksPerSecond(pVCpu->CTX_SUFF(pVM));
+    uint8_t  uTsc133MHz = (uint8_t)(uTscHz / UINT32_C(133333333));
+    *puValue = ((uint32_t)uTsc133MHz << 8)   /* TSC invariant frequency. */
+             | ((uint64_t)uTsc133MHz << 40); /* The max turbo frequency. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelPkgCStConfigControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelPkgCStConfigControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))
+    {
+        Log(("CPUM: WRMDR %#x (%s), %#llx: Write protected -> #GP\n", idMsr, pRange->szName, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+#if 0 /** @todo check what real (old) hardware does. */
+    if ((uValue & 7) >= 5)
+    {
+        Log(("CPUM: WRMDR %#x (%s), %#llx: Invalid limit (%d) -> #GP\n", idMsr, pRange->szName, uValue, (uint32_t)(uValue & 7)));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+#endif
+    pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelPmgIoCaptureBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement I/O mwait wakeup. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelPmgIoCaptureBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement I/O mwait wakeup. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchFromToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchFromToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last branch records. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchFromN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchFromN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last branch records. */
+    /** @todo Probing indicates that bit 63 is settable on SandyBridge, at least
+     *        if the rest of the bits are zero.  Automatic sign extending?
+     *        Investigate! */
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchToN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last branch records. */
+    /** @todo Probing indicates that bit 63 is settable on SandyBridge, at least
+     *        if the rest of the bits are zero.  Automatic sign extending?
+     *        Investigate! */
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelLastBranchTos(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelLastBranchTos(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last branch records. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelBblCrCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelBblCrCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelBblCrCtl3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelBblCrCtl3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7TemperatureTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7TemperatureTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7MsrOffCoreResponseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo machine check. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7MsrOffCoreResponseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo machine check. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7MiscPwrMgmt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7MiscPwrMgmt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelP6CrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    int rc = CPUMGetGuestCRx(pVCpu, pRange->uInitOrReadValue, puValue);
+    AssertRC(rc);
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelP6CrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /* This CRx interface differs from the MOV CRx, GReg interface in that
+       #GP(0) isn't raised if unsupported bits are written to.  Instead they
+       are simply ignored and masked off. (Pentium M Dothan)  */
+    /** @todo Implement MSR_P6_CRx writing.  Too much effort for very little, if
+     *        any, gain. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelCpuId1FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement CPUID masking.  */
+    *puValue = UINT64_MAX;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelCpuId1FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement CPUID masking.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelCpuId1FeatureMaskEax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement CPUID masking.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelCpuId1FeatureMaskEax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement CPUID masking.  */
+    return VINF_SUCCESS;
+}
+
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelCpuId80000001FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement CPUID masking.  */
+    *puValue = UINT64_MAX;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelCpuId80000001FeatureMaskEcdx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement CPUID masking.  */
+    return VINF_SUCCESS;
+}
+
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyAesNiCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement AES-NI.  */
+    *puValue = 3;  /* Bit 0 is lock bit, bit 1 disables AES-NI. That's what they say. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyAesNiCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement AES-NI.  */
+    return VERR_CPUM_RAISE_GP_0;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7TurboRatioLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement intel C states.  */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7TurboRatioLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement intel C states.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7LbrSelect(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last-branch-records.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7LbrSelect(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last-branch-records.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyErrorControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement memory error injection (MSR_ERROR_CONTROL).  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyErrorControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement memory error injection (MSR_ERROR_CONTROL).  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7VirtualLegacyWireCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement memory VLW?  */
+    *puValue = pRange->uInitOrReadValue;
+    /* Note: A20M is known to be bit 1 as this was disclosed in spec update
+       AAJ49/AAK51/????, which documents the inversion of this bit.  The
+       Sandy bridge CPU here has value 0x74, so it probably doesn't have a BIOS
+       that correct things.  Some guesses at the other bits:
+                 bit 2 = INTR
+                 bit 4 = SMI
+                 bit 5 = INIT
+                 bit 6 = NMI */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7PowerCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7PowerCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel power management  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPebsNumAlt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel performance counters.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyPebsNumAlt(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel performance counters.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7PebsLdLat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel performance counters.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7PebsLdLat(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel performance counters.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7PkgCnResidencyN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7CoreCnResidencyN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyVrCurrentConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyVrCurrentConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyVrMiscConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyVrMiscConfig(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Figure out what MSR_VR_CURRENT_CONFIG & MSR_VR_MISC_CONFIG are.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyRaplPowerUnit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPkgCnIrtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7SandyPkgCnIrtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel power management.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7SandyPkgC2Residency(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPkgPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgEnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPkgPowerInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplDramPowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramEnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplDramPowerInfo(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp0PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0EnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp0Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp0PerfStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp1PowerLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1EnergyStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel power management.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_IntelI7RaplPp1Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo intel RAPL.  */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_IntelI7RaplPp1Policy(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo intel RAPL.  */
+    return VINF_SUCCESS;
+}
+
+
+
+
+
+/*
+ * Multiple vendor P6 MSRs.
+ * Multiple vendor P6 MSRs.
+ * Multiple vendor P6 MSRs.
+ *
+ * These MSRs were introduced with the P6 but not elevated to architectural
+ * MSRs, despite other vendors implementing them.
+ */
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_P6LastBranchFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /* AMD seems to just record RIP, while intel claims to record RIP+CS.BASE
+       if I read the docs correctly, thus the need for separate functions. */
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_P6LastBranchToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last branch records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_P6LastIntFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last exception records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_P6LastIntFromIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last exception records. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_P6LastIntToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo implement last exception records. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_P6LastIntToIp(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo implement last exception records. */
+    return VINF_SUCCESS;
+}
+
+
+
+/*
+ * AMD specific
+ * AMD specific
+ * AMD specific
+ */
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hTscRate(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement TscRateMsr */
+    *puValue = RT_MAKE_U64(0, 1); /* 1.0 = reset value. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hTscRate(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Implement TscRateMsr */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLwpCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */
+    /* Note: Only listes in BKDG for Family 15H. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLwpCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLwpCbAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */
+    /* Note: Only listes in BKDG for Family 15H. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLwpCbAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Implement AMD LWP? (Instructions: LWPINS, LWPVAL, LLWPCB, SLWPCB) */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hMc4MiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo machine check. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hMc4MiscN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo machine check. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8PerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD performance events. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8PerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD performance events. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8PerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD performance events. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8PerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD performance events. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SysCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SYS_CFG */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SysCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SYS_CFG */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8HwCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD HW_CFG */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8HwCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD HW_CFG */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8IorrBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IorrMask/IorrBase */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8IorrBaseN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IorrMask/IorrBase */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8IorrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IorrMask/IorrBase */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8IorrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IorrMask/IorrBase */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8TopOfMemN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    *puValue = 0;
+    /** @todo return 4GB - RamHoleSize here for TOPMEM. Figure out what to return
+     *        for TOPMEM2. */
+    //if (pRange->uInitOrReadValue == 0)
+    //    *puValue = _4G - RamHoleSize;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8TopOfMemN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD TOPMEM and TOPMEM2/TOM2. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8NbCfg1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD NB_CFG1 */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8NbCfg1(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD NB_CFG1 */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8McXcptRedir(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo machine check. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8McXcptRedir(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo machine check. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuNameN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), pRange->uInitOrReadValue / 2 + 0x80000001, 0);
+    if (pLeaf)
+    {
+        if (!(pRange->uInitOrReadValue & 1))
+            *puValue = RT_MAKE_U64(pLeaf->uEax, pLeaf->uEbx);
+        else
+            *puValue = RT_MAKE_U64(pLeaf->uEcx, pLeaf->uEdx);
+    }
+    else
+        *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuNameN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Remember guest programmed CPU name. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8HwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD HTC. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8HwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD HTC. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD STC. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SwThermalCtrl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD STC. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8McCtlMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD MC. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8McCtlMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD MC. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiOnIoTrapN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM/SMI and I/O trap. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiOnIoTrapN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM/SMI and I/O trap. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiOnIoTrapCtlSts(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM/SMI and I/O trap. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiOnIoTrapCtlSts(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM/SMI and I/O trap. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8IntPendingMessage(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Interrupt pending message. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8IntPendingMessage(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Interrupt pending message. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmiTriggerIoCycle(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM/SMI and trigger I/O cycle. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmiTriggerIoCycle(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM/SMI and trigger I/O cycle. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hMmioCfgBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD MMIO Configuration base address. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hMmioCfgBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD MMIO Configuration base address. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hTrapCtlMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD 0xc0010059. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hTrapCtlMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD 0xc0010059. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateCurLimit(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD P-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD P-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hPStateN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hPStateN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD P-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCofVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCofVidControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD P-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCofVidStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD P-states. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCofVidStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /* Note! Writing 0 seems to not GP, not sure if it does anything to the value... */
+    /** @todo AMD P-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCStateIoBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD C-states. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCStateIoBaseAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD C-states. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hCpuWatchdogTimer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD machine checks. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hCpuWatchdogTimer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD machine checks. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmBase(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM. */
+    return VINF_SUCCESS;
+}
+
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmMask(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SVM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SVM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8IgnNe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IGNNE\# control. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8IgnNe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IGNNE\# control. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8SmmCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8SmmCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SVM. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SVM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hVmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SVM. */
+    *puValue = 0; /* RAZ */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hVmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SVM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hSmmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM. */
+    *puValue = 0; /* RAZ */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hSmmLockKey(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hLocalSmiStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD SMM/SMI. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hLocalSmiStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD SMM/SMI. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hOsVisWrkIdLength(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD OS visible workaround. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hOsVisWrkIdLength(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD OS visible workaround. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hOsVisWrkStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD OS visible workaround. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hOsVisWrkStatus(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD OS visible workaround. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam16hL2IPerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD L2I performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam16hL2IPerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD L2I performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam16hL2IPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD L2I performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam16hL2IPerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD L2I performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hNorthbridgePerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD Northbridge performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hNorthbridgePerfCtlN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD Northbridge performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hNorthbridgePerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD Northbridge performance counters. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hNorthbridgePerfCtrN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD Northbridge performance counters. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7MicrocodeCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus. Need to be explored and verify K7 presence. */
+    /** @todo Undocumented register only seen mentioned in fam15h erratum \#608. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7MicrocodeCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo Undocumented register only seen mentioned in fam15h erratum \#608. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7ClusterIdMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus. Need to be explored and verify K7 presence. */
+    /** @todo Undocumented register only seen mentioned in fam16h BKDG r3.00 when
+     *        describing EBL_CR_POWERON. */
+    *puValue = pRange->uInitOrReadValue;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7ClusterIdMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo Undocumented register only seen mentioned in fam16h BKDG r3.00 when
+     *        describing EBL_CR_POWERON. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd07hEbax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000007, 0);
+    if (pLeaf)
+        *puValue = RT_MAKE_U64(pLeaf->uEbx, pLeaf->uEax);
+    else
+        *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd07hEbax(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Changing CPUID leaf 7/0. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd06hEcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000006, 0);
+    if (pLeaf)
+        *puValue = pLeaf->uEcx;
+    else
+        *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd06hEcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Changing CPUID leaf 6. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlStd01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x00000001, 0);
+    if (pLeaf)
+        *puValue = RT_MAKE_U64(pLeaf->uEdx, pLeaf->uEcx);
+    else
+        *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlStd01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Changing CPUID leaf 0x80000001. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK8CpuIdCtlExt01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVCpu->CTX_SUFF(pVM), 0x80000001, 0);
+    if (pLeaf)
+        *puValue = RT_MAKE_U64(pLeaf->uEdx, pLeaf->uEcx);
+    else
+        *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK8CpuIdCtlExt01hEdcx(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Changing CPUID leaf 0x80000001. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7DebugStatusMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7DebugStatusMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTraceBaseMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTraceBaseMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTracePtrMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTracePtrMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7BHTraceLimitMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7BHTraceLimitMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7HardwareDebugToolCfgMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7HardwareDebugToolCfgMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7FastFlushCountMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7FastFlushCountMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo undocumented */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7NodeId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD node ID and bios scratch. */
+    *puValue = 0; /* nodeid = 0; nodes-per-cpu = 1 */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7NodeId(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD node ID and bios scratch. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7DrXAddrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD DRx address masking (range breakpoints). */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7DrXAddrMaskN(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD DRx address masking (range breakpoints). */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7Dr0DataMatchMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD undocument debugging features. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7Dr0DataMatchMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD undocument debugging features. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7Dr0DataMaskMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD undocument debugging features. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7Dr0DataMaskMaybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD undocument debugging features. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7LoadStoreCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD load-store config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7LoadStoreCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD load-store config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7InstrCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD instruction cache config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7InstrCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD instruction cache config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7DataCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD data cache config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7DataCacheCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD data cache config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7BusUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD bus unit config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7BusUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo AMD bus unit config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdK7DebugCtl2Maybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo Undocument AMD debug control register \#2. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdK7DebugCtl2Maybe(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo Allegedly requiring edi=0x9c5a203a when execuing rdmsr/wrmsr on older
+     *  cpus.  Need to be explored and verify K7 presence.  */
+    /** @todo Undocument AMD debug control register \#2. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hFpuCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD FPU config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hFpuCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD FPU config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hDecoderCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD decoder config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hDecoderCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD decoder config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hBusUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /* Note! 10h and 16h */
+    /** @todo AMD bus unit  config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hBusUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /* Note! 10h and 16h */
+    /** @todo AMD bus unit config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD unit config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD unit config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD unit config 2. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD unit config 2. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hCombUnitCfg3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD combined unit config 3. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hCombUnitCfg3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD combined unit config 3. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hExecUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD execution unit config. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hExecUnitCfg(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD execution unit config. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam15hLoadStoreCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD load-store config 2. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam15hLoadStoreCfg2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD load-store config 2. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsFetchPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsFetchPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpExecCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpExecCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpRip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpRip(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData2(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsOpData3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsOpData3(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsDcLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsDcLinAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsDcPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsDcPhysAddr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam10hIbsCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam10hIbsCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMRDMSR} */
+static DECLCALLBACK(int) cpumMsrRd_AmdFam14hIbsBrTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
+{
+    /** @todo AMD IBS. */
+    *puValue = 0;
+    return VINF_SUCCESS;
+}
+
+
+/** @callback_method_impl{FNCPUMWRMSR} */
+static DECLCALLBACK(int) cpumMsrWr_AmdFam14hIbsBrTarget(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue)
+{
+    /** @todo AMD IBS. */
+    if (!X86_IS_CANONICAL(uValue))
+    {
+        Log(("CPUM: wrmsr %s(%#x), %#llx -> %#GP - not canonical\n", pRange->szName, idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+    return VINF_SUCCESS;
+}
+
+
+
+/**
+ * MSR read function table.
+ */
+static const PFNCPUMRDMSR g_aCpumRdMsrFns[kCpumMsrRdFn_End] =
+{
+    NULL, /* Invalid */
+    cpumMsrRd_FixedValue,
+    NULL, /* Alias */
+    cpumMsrRd_WriteOnly,
+    cpumMsrRd_Ia32P5McAddr,
+    cpumMsrRd_Ia32P5McType,
+    cpumMsrRd_Ia32TimestampCounter,
+    cpumMsrRd_Ia32ApicBase,
+    cpumMsrRd_Ia32FeatureControl,
+    cpumMsrRd_Ia32SmmMonitorCtl,
+    cpumMsrRd_Ia32PmcN,
+    cpumMsrRd_Ia32MonitorFilterLineSize,
+    cpumMsrRd_Ia32MPerf,
+    cpumMsrRd_Ia32APerf,
+    cpumMsrRd_Ia32MtrrCap,
+    cpumMsrRd_Ia32MtrrPhysBaseN,
+    cpumMsrRd_Ia32MtrrPhysMaskN,
+    cpumMsrRd_Ia32MtrrFixed,
+    cpumMsrRd_Ia32MtrrDefType,
+    cpumMsrRd_Ia32Pat,
+    cpumMsrRd_Ia32SysEnterCs,
+    cpumMsrRd_Ia32SysEnterEsp,
+    cpumMsrRd_Ia32SysEnterEip,
+    cpumMsrRd_Ia32McgCap,
+    cpumMsrRd_Ia32McgStatus,
+    cpumMsrRd_Ia32McgCtl,
+    cpumMsrRd_Ia32DebugCtl,
+    cpumMsrRd_Ia32SmrrPhysBase,
+    cpumMsrRd_Ia32SmrrPhysMask,
+    cpumMsrRd_Ia32PlatformDcaCap,
+    cpumMsrRd_Ia32CpuDcaCap,
+    cpumMsrRd_Ia32Dca0Cap,
+    cpumMsrRd_Ia32PerfEvtSelN,
+    cpumMsrRd_Ia32PerfStatus,
+    cpumMsrRd_Ia32PerfCtl,
+    cpumMsrRd_Ia32FixedCtrN,
+    cpumMsrRd_Ia32PerfCapabilities,
+    cpumMsrRd_Ia32FixedCtrCtrl,
+    cpumMsrRd_Ia32PerfGlobalStatus,
+    cpumMsrRd_Ia32PerfGlobalCtrl,
+    cpumMsrRd_Ia32PerfGlobalOvfCtrl,
+    cpumMsrRd_Ia32PebsEnable,
+    cpumMsrRd_Ia32ClockModulation,
+    cpumMsrRd_Ia32ThermInterrupt,
+    cpumMsrRd_Ia32ThermStatus,
+    cpumMsrRd_Ia32Therm2Ctl,
+    cpumMsrRd_Ia32MiscEnable,
+    cpumMsrRd_Ia32McCtlStatusAddrMiscN,
+    cpumMsrRd_Ia32McNCtl2,
+    cpumMsrRd_Ia32DsArea,
+    cpumMsrRd_Ia32TscDeadline,
+    cpumMsrRd_Ia32X2ApicN,
+    cpumMsrRd_Ia32VmxBase,
+    cpumMsrRd_Ia32VmxPinbasedCtls,
+    cpumMsrRd_Ia32VmxProcbasedCtls,
+    cpumMsrRd_Ia32VmxExitCtls,
+    cpumMsrRd_Ia32VmxEntryCtls,
+    cpumMsrRd_Ia32VmxMisc,
+    cpumMsrRd_Ia32VmxCr0Fixed0,
+    cpumMsrRd_Ia32VmxCr0Fixed1,
+    cpumMsrRd_Ia32VmxCr4Fixed0,
+    cpumMsrRd_Ia32VmxCr4Fixed1,
+    cpumMsrRd_Ia32VmxVmcsEnum,
+    cpumMsrRd_Ia32VmxProcBasedCtls2,
+    cpumMsrRd_Ia32VmxEptVpidCap,
+    cpumMsrRd_Ia32VmxTruePinbasedCtls,
+    cpumMsrRd_Ia32VmxTrueProcbasedCtls,
+    cpumMsrRd_Ia32VmxTrueExitCtls,
+    cpumMsrRd_Ia32VmxTrueEntryCtls,
+
+    cpumMsrRd_Amd64Efer,
+    cpumMsrRd_Amd64SyscallTarget,
+    cpumMsrRd_Amd64LongSyscallTarget,
+    cpumMsrRd_Amd64CompSyscallTarget,
+    cpumMsrRd_Amd64SyscallFlagMask,
+    cpumMsrRd_Amd64FsBase,
+    cpumMsrRd_Amd64GsBase,
+    cpumMsrRd_Amd64KernelGsBase,
+    cpumMsrRd_Amd64TscAux,
+
+    cpumMsrRd_IntelEblCrPowerOn,
+    cpumMsrRd_IntelPlatformInfo100MHz,
+    cpumMsrRd_IntelPlatformInfo133MHz,
+    cpumMsrRd_IntelPkgCStConfigControl,
+    cpumMsrRd_IntelPmgIoCaptureBase,
+    cpumMsrRd_IntelLastBranchFromToN,
+    cpumMsrRd_IntelLastBranchFromN,
+    cpumMsrRd_IntelLastBranchToN,
+    cpumMsrRd_IntelLastBranchTos,
+    cpumMsrRd_IntelBblCrCtl,
+    cpumMsrRd_IntelBblCrCtl3,
+    cpumMsrRd_IntelI7TemperatureTarget,
+    cpumMsrRd_IntelI7MsrOffCoreResponseN,
+    cpumMsrRd_IntelI7MiscPwrMgmt,
+    cpumMsrRd_IntelP6CrN,
+    cpumMsrRd_IntelCpuId1FeatureMaskEcdx,
+    cpumMsrRd_IntelCpuId1FeatureMaskEax,
+    cpumMsrRd_IntelCpuId80000001FeatureMaskEcdx,
+    cpumMsrRd_IntelI7SandyAesNiCtl,
+    cpumMsrRd_IntelI7TurboRatioLimit,
+    cpumMsrRd_IntelI7LbrSelect,
+    cpumMsrRd_IntelI7SandyErrorControl,
+    cpumMsrRd_IntelI7VirtualLegacyWireCap,
+    cpumMsrRd_IntelI7PowerCtl,
+    cpumMsrRd_IntelI7SandyPebsNumAlt,
+    cpumMsrRd_IntelI7PebsLdLat,
+    cpumMsrRd_IntelI7PkgCnResidencyN,
+    cpumMsrRd_IntelI7CoreCnResidencyN,
+    cpumMsrRd_IntelI7SandyVrCurrentConfig,
+    cpumMsrRd_IntelI7SandyVrMiscConfig,
+    cpumMsrRd_IntelI7SandyRaplPowerUnit,
+    cpumMsrRd_IntelI7SandyPkgCnIrtlN,
+    cpumMsrRd_IntelI7SandyPkgC2Residency,
+    cpumMsrRd_IntelI7RaplPkgPowerLimit,
+    cpumMsrRd_IntelI7RaplPkgEnergyStatus,
+    cpumMsrRd_IntelI7RaplPkgPerfStatus,
+    cpumMsrRd_IntelI7RaplPkgPowerInfo,
+    cpumMsrRd_IntelI7RaplDramPowerLimit,
+    cpumMsrRd_IntelI7RaplDramEnergyStatus,
+    cpumMsrRd_IntelI7RaplDramPerfStatus,
+    cpumMsrRd_IntelI7RaplDramPowerInfo,
+    cpumMsrRd_IntelI7RaplPp0PowerLimit,
+    cpumMsrRd_IntelI7RaplPp0EnergyStatus,
+    cpumMsrRd_IntelI7RaplPp0Policy,
+    cpumMsrRd_IntelI7RaplPp0PerfStatus,
+    cpumMsrRd_IntelI7RaplPp1PowerLimit,
+    cpumMsrRd_IntelI7RaplPp1EnergyStatus,
+    cpumMsrRd_IntelI7RaplPp1Policy,
+
+    cpumMsrRd_P6LastBranchFromIp,
+    cpumMsrRd_P6LastBranchToIp,
+    cpumMsrRd_P6LastIntFromIp,
+    cpumMsrRd_P6LastIntToIp,
+
+    cpumMsrRd_AmdFam15hTscRate,
+    cpumMsrRd_AmdFam15hLwpCfg,
+    cpumMsrRd_AmdFam15hLwpCbAddr,
+    cpumMsrRd_AmdFam10hMc4MiscN,
+    cpumMsrRd_AmdK8PerfCtlN,
+    cpumMsrRd_AmdK8PerfCtrN,
+    cpumMsrRd_AmdK8SysCfg,
+    cpumMsrRd_AmdK8HwCr,
+    cpumMsrRd_AmdK8IorrBaseN,
+    cpumMsrRd_AmdK8IorrMaskN,
+    cpumMsrRd_AmdK8TopOfMemN,
+    cpumMsrRd_AmdK8NbCfg1,
+    cpumMsrRd_AmdK8McXcptRedir,
+    cpumMsrRd_AmdK8CpuNameN,
+    cpumMsrRd_AmdK8HwThermalCtrl,
+    cpumMsrRd_AmdK8SwThermalCtrl,
+    cpumMsrRd_AmdK8McCtlMaskN,
+    cpumMsrRd_AmdK8SmiOnIoTrapN,
+    cpumMsrRd_AmdK8SmiOnIoTrapCtlSts,
+    cpumMsrRd_AmdK8IntPendingMessage,
+    cpumMsrRd_AmdK8SmiTriggerIoCycle,
+    cpumMsrRd_AmdFam10hMmioCfgBaseAddr,
+    cpumMsrRd_AmdFam10hTrapCtlMaybe,
+    cpumMsrRd_AmdFam10hPStateCurLimit,
+    cpumMsrRd_AmdFam10hPStateControl,
+    cpumMsrRd_AmdFam10hPStateStatus,
+    cpumMsrRd_AmdFam10hPStateN,
+    cpumMsrRd_AmdFam10hCofVidControl,
+    cpumMsrRd_AmdFam10hCofVidStatus,
+    cpumMsrRd_AmdFam10hCStateIoBaseAddr,
+    cpumMsrRd_AmdFam10hCpuWatchdogTimer,
+    cpumMsrRd_AmdK8SmmBase,
+    cpumMsrRd_AmdK8SmmAddr,
+    cpumMsrRd_AmdK8SmmMask,
+    cpumMsrRd_AmdK8VmCr,
+    cpumMsrRd_AmdK8IgnNe,
+    cpumMsrRd_AmdK8SmmCtl,
+    cpumMsrRd_AmdK8VmHSavePa,
+    cpumMsrRd_AmdFam10hVmLockKey,
+    cpumMsrRd_AmdFam10hSmmLockKey,
+    cpumMsrRd_AmdFam10hLocalSmiStatus,
+    cpumMsrRd_AmdFam10hOsVisWrkIdLength,
+    cpumMsrRd_AmdFam10hOsVisWrkStatus,
+    cpumMsrRd_AmdFam16hL2IPerfCtlN,
+    cpumMsrRd_AmdFam16hL2IPerfCtrN,
+    cpumMsrRd_AmdFam15hNorthbridgePerfCtlN,
+    cpumMsrRd_AmdFam15hNorthbridgePerfCtrN,
+    cpumMsrRd_AmdK7MicrocodeCtl,
+    cpumMsrRd_AmdK7ClusterIdMaybe,
+    cpumMsrRd_AmdK8CpuIdCtlStd07hEbax,
+    cpumMsrRd_AmdK8CpuIdCtlStd06hEcx,
+    cpumMsrRd_AmdK8CpuIdCtlStd01hEdcx,
+    cpumMsrRd_AmdK8CpuIdCtlExt01hEdcx,
+    cpumMsrRd_AmdK7DebugStatusMaybe,
+    cpumMsrRd_AmdK7BHTraceBaseMaybe,
+    cpumMsrRd_AmdK7BHTracePtrMaybe,
+    cpumMsrRd_AmdK7BHTraceLimitMaybe,
+    cpumMsrRd_AmdK7HardwareDebugToolCfgMaybe,
+    cpumMsrRd_AmdK7FastFlushCountMaybe,
+    cpumMsrRd_AmdK7NodeId,
+    cpumMsrRd_AmdK7DrXAddrMaskN,
+    cpumMsrRd_AmdK7Dr0DataMatchMaybe,
+    cpumMsrRd_AmdK7Dr0DataMaskMaybe,
+    cpumMsrRd_AmdK7LoadStoreCfg,
+    cpumMsrRd_AmdK7InstrCacheCfg,
+    cpumMsrRd_AmdK7DataCacheCfg,
+    cpumMsrRd_AmdK7BusUnitCfg,
+    cpumMsrRd_AmdK7DebugCtl2Maybe,
+    cpumMsrRd_AmdFam15hFpuCfg,
+    cpumMsrRd_AmdFam15hDecoderCfg,
+    cpumMsrRd_AmdFam10hBusUnitCfg2,
+    cpumMsrRd_AmdFam15hCombUnitCfg,
+    cpumMsrRd_AmdFam15hCombUnitCfg2,
+    cpumMsrRd_AmdFam15hCombUnitCfg3,
+    cpumMsrRd_AmdFam15hExecUnitCfg,
+    cpumMsrRd_AmdFam15hLoadStoreCfg2,
+    cpumMsrRd_AmdFam10hIbsFetchCtl,
+    cpumMsrRd_AmdFam10hIbsFetchLinAddr,
+    cpumMsrRd_AmdFam10hIbsFetchPhysAddr,
+    cpumMsrRd_AmdFam10hIbsOpExecCtl,
+    cpumMsrRd_AmdFam10hIbsOpRip,
+    cpumMsrRd_AmdFam10hIbsOpData,
+    cpumMsrRd_AmdFam10hIbsOpData2,
+    cpumMsrRd_AmdFam10hIbsOpData3,
+    cpumMsrRd_AmdFam10hIbsDcLinAddr,
+    cpumMsrRd_AmdFam10hIbsDcPhysAddr,
+    cpumMsrRd_AmdFam10hIbsCtl,
+    cpumMsrRd_AmdFam14hIbsBrTarget,
+};
+
+
+/**
+ * MSR write function table.
+ */
+static const PFNCPUMWRMSR g_aCpumWrMsrFns[kCpumMsrWrFn_End] =
+{
+    NULL, /* Invalid */
+    cpumMsrWr_IgnoreWrite,
+    cpumMsrWr_ReadOnly,
+    NULL, /* Alias */
+    cpumMsrWr_Ia32P5McAddr,
+    cpumMsrWr_Ia32P5McType,
+    cpumMsrWr_Ia32TimestampCounter,
+    cpumMsrWr_Ia32ApicBase,
+    cpumMsrWr_Ia32FeatureControl,
+    cpumMsrWr_Ia32BiosUpdateTrigger,
+    cpumMsrWr_Ia32SmmMonitorCtl,
+    cpumMsrWr_Ia32PmcN,
+    cpumMsrWr_Ia32MonitorFilterLineSize,
+    cpumMsrWr_Ia32MPerf,
+    cpumMsrWr_Ia32APerf,
+    cpumMsrWr_Ia32MtrrPhysBaseN,
+    cpumMsrWr_Ia32MtrrPhysMaskN,
+    cpumMsrWr_Ia32MtrrFixed,
+    cpumMsrWr_Ia32MtrrDefType,
+    cpumMsrWr_Ia32Pat,
+    cpumMsrWr_Ia32SysEnterCs,
+    cpumMsrWr_Ia32SysEnterEsp,
+    cpumMsrWr_Ia32SysEnterEip,
+    cpumMsrWr_Ia32McgStatus,
+    cpumMsrWr_Ia32McgCtl,
+    cpumMsrWr_Ia32DebugCtl,
+    cpumMsrWr_Ia32SmrrPhysBase,
+    cpumMsrWr_Ia32SmrrPhysMask,
+    cpumMsrWr_Ia32PlatformDcaCap,
+    cpumMsrWr_Ia32Dca0Cap,
+    cpumMsrWr_Ia32PerfEvtSelN,
+    cpumMsrWr_Ia32PerfCtl,
+    cpumMsrWr_Ia32FixedCtrN,
+    cpumMsrWr_Ia32PerfCapabilities,
+    cpumMsrWr_Ia32FixedCtrCtrl,
+    cpumMsrWr_Ia32PerfGlobalStatus,
+    cpumMsrWr_Ia32PerfGlobalCtrl,
+    cpumMsrWr_Ia32PerfGlobalOvfCtrl,
+    cpumMsrWr_Ia32PebsEnable,
+    cpumMsrWr_Ia32ClockModulation,
+    cpumMsrWr_Ia32ThermInterrupt,
+    cpumMsrWr_Ia32ThermStatus,
+    cpumMsrWr_Ia32Therm2Ctl,
+    cpumMsrWr_Ia32MiscEnable,
+    cpumMsrWr_Ia32McCtlStatusAddrMiscN,
+    cpumMsrWr_Ia32McNCtl2,
+    cpumMsrWr_Ia32DsArea,
+    cpumMsrWr_Ia32TscDeadline,
+    cpumMsrWr_Ia32X2ApicN,
+
+    cpumMsrWr_Amd64Efer,
+    cpumMsrWr_Amd64SyscallTarget,
+    cpumMsrWr_Amd64LongSyscallTarget,
+    cpumMsrWr_Amd64CompSyscallTarget,
+    cpumMsrWr_Amd64SyscallFlagMask,
+    cpumMsrWr_Amd64FsBase,
+    cpumMsrWr_Amd64GsBase,
+    cpumMsrWr_Amd64KernelGsBase,
+    cpumMsrWr_Amd64TscAux,
+
+    cpumMsrWr_IntelEblCrPowerOn,
+    cpumMsrWr_IntelPkgCStConfigControl,
+    cpumMsrWr_IntelPmgIoCaptureBase,
+    cpumMsrWr_IntelLastBranchFromToN,
+    cpumMsrWr_IntelLastBranchFromN,
+    cpumMsrWr_IntelLastBranchToN,
+    cpumMsrWr_IntelLastBranchTos,
+    cpumMsrWr_IntelBblCrCtl,
+    cpumMsrWr_IntelBblCrCtl3,
+    cpumMsrWr_IntelI7TemperatureTarget,
+    cpumMsrWr_IntelI7MsrOffCoreResponseN,
+    cpumMsrWr_IntelI7MiscPwrMgmt,
+    cpumMsrWr_IntelP6CrN,
+    cpumMsrWr_IntelCpuId1FeatureMaskEcdx,
+    cpumMsrWr_IntelCpuId1FeatureMaskEax,
+    cpumMsrWr_IntelCpuId80000001FeatureMaskEcdx,
+    cpumMsrWr_IntelI7SandyAesNiCtl,
+    cpumMsrWr_IntelI7TurboRatioLimit,
+    cpumMsrWr_IntelI7LbrSelect,
+    cpumMsrWr_IntelI7SandyErrorControl,
+    cpumMsrWr_IntelI7PowerCtl,
+    cpumMsrWr_IntelI7SandyPebsNumAlt,
+    cpumMsrWr_IntelI7PebsLdLat,
+    cpumMsrWr_IntelI7SandyVrCurrentConfig,
+    cpumMsrWr_IntelI7SandyVrMiscConfig,
+    cpumMsrWr_IntelI7SandyPkgCnIrtlN,
+    cpumMsrWr_IntelI7RaplPkgPowerLimit,
+    cpumMsrWr_IntelI7RaplDramPowerLimit,
+    cpumMsrWr_IntelI7RaplPp0PowerLimit,
+    cpumMsrWr_IntelI7RaplPp0Policy,
+    cpumMsrWr_IntelI7RaplPp1PowerLimit,
+    cpumMsrWr_IntelI7RaplPp1Policy,
+
+    cpumMsrWr_P6LastIntFromIp,
+    cpumMsrWr_P6LastIntToIp,
+
+    cpumMsrWr_AmdFam15hTscRate,
+    cpumMsrWr_AmdFam15hLwpCfg,
+    cpumMsrWr_AmdFam15hLwpCbAddr,
+    cpumMsrWr_AmdFam10hMc4MiscN,
+    cpumMsrWr_AmdK8PerfCtlN,
+    cpumMsrWr_AmdK8PerfCtrN,
+    cpumMsrWr_AmdK8SysCfg,
+    cpumMsrWr_AmdK8HwCr,
+    cpumMsrWr_AmdK8IorrBaseN,
+    cpumMsrWr_AmdK8IorrMaskN,
+    cpumMsrWr_AmdK8TopOfMemN,
+    cpumMsrWr_AmdK8NbCfg1,
+    cpumMsrWr_AmdK8McXcptRedir,
+    cpumMsrWr_AmdK8CpuNameN,
+    cpumMsrWr_AmdK8HwThermalCtrl,
+    cpumMsrWr_AmdK8SwThermalCtrl,
+    cpumMsrWr_AmdK8McCtlMaskN,
+    cpumMsrWr_AmdK8SmiOnIoTrapN,
+    cpumMsrWr_AmdK8SmiOnIoTrapCtlSts,
+    cpumMsrWr_AmdK8IntPendingMessage,
+    cpumMsrWr_AmdK8SmiTriggerIoCycle,
+    cpumMsrWr_AmdFam10hMmioCfgBaseAddr,
+    cpumMsrWr_AmdFam10hTrapCtlMaybe,
+    cpumMsrWr_AmdFam10hPStateControl,
+    cpumMsrWr_AmdFam10hPStateStatus,
+    cpumMsrWr_AmdFam10hPStateN,
+    cpumMsrWr_AmdFam10hCofVidControl,
+    cpumMsrWr_AmdFam10hCofVidStatus,
+    cpumMsrWr_AmdFam10hCStateIoBaseAddr,
+    cpumMsrWr_AmdFam10hCpuWatchdogTimer,
+    cpumMsrWr_AmdK8SmmBase,
+    cpumMsrWr_AmdK8SmmAddr,
+    cpumMsrWr_AmdK8SmmMask,
+    cpumMsrWr_AmdK8VmCr,
+    cpumMsrWr_AmdK8IgnNe,
+    cpumMsrWr_AmdK8SmmCtl,
+    cpumMsrWr_AmdK8VmHSavePa,
+    cpumMsrWr_AmdFam10hVmLockKey,
+    cpumMsrWr_AmdFam10hSmmLockKey,
+    cpumMsrWr_AmdFam10hLocalSmiStatus,
+    cpumMsrWr_AmdFam10hOsVisWrkIdLength,
+    cpumMsrWr_AmdFam10hOsVisWrkStatus,
+    cpumMsrWr_AmdFam16hL2IPerfCtlN,
+    cpumMsrWr_AmdFam16hL2IPerfCtrN,
+    cpumMsrWr_AmdFam15hNorthbridgePerfCtlN,
+    cpumMsrWr_AmdFam15hNorthbridgePerfCtrN,
+    cpumMsrWr_AmdK7MicrocodeCtl,
+    cpumMsrWr_AmdK7ClusterIdMaybe,
+    cpumMsrWr_AmdK8CpuIdCtlStd07hEbax,
+    cpumMsrWr_AmdK8CpuIdCtlStd06hEcx,
+    cpumMsrWr_AmdK8CpuIdCtlStd01hEdcx,
+    cpumMsrWr_AmdK8CpuIdCtlExt01hEdcx,
+    cpumMsrWr_AmdK7DebugStatusMaybe,
+    cpumMsrWr_AmdK7BHTraceBaseMaybe,
+    cpumMsrWr_AmdK7BHTracePtrMaybe,
+    cpumMsrWr_AmdK7BHTraceLimitMaybe,
+    cpumMsrWr_AmdK7HardwareDebugToolCfgMaybe,
+    cpumMsrWr_AmdK7FastFlushCountMaybe,
+    cpumMsrWr_AmdK7NodeId,
+    cpumMsrWr_AmdK7DrXAddrMaskN,
+    cpumMsrWr_AmdK7Dr0DataMatchMaybe,
+    cpumMsrWr_AmdK7Dr0DataMaskMaybe,
+    cpumMsrWr_AmdK7LoadStoreCfg,
+    cpumMsrWr_AmdK7InstrCacheCfg,
+    cpumMsrWr_AmdK7DataCacheCfg,
+    cpumMsrWr_AmdK7BusUnitCfg,
+    cpumMsrWr_AmdK7DebugCtl2Maybe,
+    cpumMsrWr_AmdFam15hFpuCfg,
+    cpumMsrWr_AmdFam15hDecoderCfg,
+    cpumMsrWr_AmdFam10hBusUnitCfg2,
+    cpumMsrWr_AmdFam15hCombUnitCfg,
+    cpumMsrWr_AmdFam15hCombUnitCfg2,
+    cpumMsrWr_AmdFam15hCombUnitCfg3,
+    cpumMsrWr_AmdFam15hExecUnitCfg,
+    cpumMsrWr_AmdFam15hLoadStoreCfg2,
+    cpumMsrWr_AmdFam10hIbsFetchCtl,
+    cpumMsrWr_AmdFam10hIbsFetchLinAddr,
+    cpumMsrWr_AmdFam10hIbsFetchPhysAddr,
+    cpumMsrWr_AmdFam10hIbsOpExecCtl,
+    cpumMsrWr_AmdFam10hIbsOpRip,
+    cpumMsrWr_AmdFam10hIbsOpData,
+    cpumMsrWr_AmdFam10hIbsOpData2,
+    cpumMsrWr_AmdFam10hIbsOpData3,
+    cpumMsrWr_AmdFam10hIbsDcLinAddr,
+    cpumMsrWr_AmdFam10hIbsDcPhysAddr,
+    cpumMsrWr_AmdFam10hIbsCtl,
+    cpumMsrWr_AmdFam14hIbsBrTarget,
+};
+
+
+/**
+ * Looks up the range for the given MSR.
+ *
+ * @returns Pointer to the range if found, NULL if not.
+ * @param   pVM                 The cross context VM structure.
+ * @param   idMsr               The MSR to look up.
+ */
+# ifndef IN_RING3
+static
+# endif
+PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr)
+{
+# if 0
+    /*
+     * Binary lookup.
+     */
+
+# else
+    /*
+     * Linear lookup.
+     */
+    uint32_t        cLeft = pVM->cpum.s.GuestInfo.cMsrRanges;
+    PCPUMMSRRANGE   pCur  = pVM->cpum.s.GuestInfo.CTX_SUFF(paMsrRanges);
+    while (cLeft-- > 0)
+    {
+        if (idMsr >= pCur->uFirst && idMsr <= pCur->uLast)
+            return pCur;
+        pCur++;
+    }
+# endif
+    return NULL;
+}
+
+#ifdef VBOX_WITH_NEW_MSR_CODE
+
+/**
+ * Query a guest MSR.
+ *
+ * The caller is responsible for checking privilege if the call is the result of
+ * a RDMSR instruction.  We'll do the rest.
+ *
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
+ *          expected to take the appropriate actions. @a *puValue is set to 0.
+ * @param   pVCpu               Pointer to the VMCPU.
+ * @param   idMsr               The MSR.
+ * @param   puValue             Where to return the value.
+ *
+ * @remarks This will always return the right values, even when we're in the
+ *          recompiler.
+ */
+VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
+{
+    *puValue = 0;
+
+    int             rc;
+    PVM             pVM    = pVCpu->CTX_SUFF(pVM);
+    PCPUMMSRRANGE   pRange = cpumLookupMsrRange(pVM, idMsr);
+    if (pRange)
+    {
+        CPUMMSRRDFN  enmRdFn = (CPUMMSRRDFN)pRange->enmRdFn;
+        AssertReturn(enmRdFn > kCpumMsrRdFn_Invalid && enmRdFn < kCpumMsrRdFn_End, VERR_CPUM_IPE_1);
+
+        PFNCPUMRDMSR pfnRdMsr = g_aCpumRdMsrFns[enmRdFn];
+        AssertReturn(pfnRdMsr, VERR_CPUM_IPE_2);
+
+        STAM_COUNTER_INC(&pRange->cReads);
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReads);
+
+        rc = pfnRdMsr(pVCpu, idMsr, pRange, puValue);
+        if (RT_SUCCESS(rc))
+        {
+            Log2(("CPUM: RDMSR %#x (%s) -> %#llx\n", idMsr, pRange->szName, *puValue));
+            AssertMsg(rc == VINF_SUCCESS, ("%Rrc idMsr=%#x\n", rc, idMsr));
+        }
+        else if (rc == VERR_CPUM_RAISE_GP_0)
+        {
+            Log(("CPUM: RDMSR %#x (%s) -> #GP(0)\n", idMsr, pRange->szName));
+            STAM_COUNTER_INC(&pRange->cGps);
+            STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReadsRaiseGp);
+        }
+        else
+            Log(("CPUM: RDMSR %#x (%s) -> rc=%Rrc\n", idMsr, pRange->szName, rc));
+    }
+    else
+    {
+        Log(("CPUM: Unknown RDMSR %#x -> #GP(0)\n", idMsr));
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReads);
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrReadsUnknown);
+        rc = VERR_CPUM_RAISE_GP_0;
+    }
+    return rc;
+}
+
+
+/**
+ * Writes to a guest MSR.
+ *
+ * The caller is responsible for checking privilege if the call is the result of
+ * a WRMSR instruction.  We'll do the rest.
+ *
+ * @retval  VINF_SUCCESS on success.
+ * @retval  VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
+ *          appropriate actions.
+ *
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   idMsr       The MSR id.
+ * @param   uValue      The value to set.
+ *
+ * @remarks Everyone changing MSR values, including the recompiler, shall do it
+ *          by calling this method.  This makes sure we have current values and
+ *          that we trigger all the right actions when something changes.
+ *
+ *          For performance reasons, this actually isn't entirely true for some
+ *          MSRs when in HM mode.  The code here and in HM must be aware of
+ *          this.
+ */
+VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
+{
+    int             rc;
+    PVM             pVM    = pVCpu->CTX_SUFF(pVM);
+    PCPUMMSRRANGE   pRange = cpumLookupMsrRange(pVM, idMsr);
+    if (pRange)
+    {
+        STAM_COUNTER_INC(&pRange->cWrites);
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWrites);
+
+        if (!(uValue & pRange->fWrGpMask))
+        {
+            CPUMMSRWRFN  enmWrFn = (CPUMMSRWRFN)pRange->enmWrFn;
+            AssertReturn(enmWrFn > kCpumMsrWrFn_Invalid && enmWrFn < kCpumMsrWrFn_End, VERR_CPUM_IPE_1);
+
+            PFNCPUMWRMSR pfnWrMsr = g_aCpumWrMsrFns[enmWrFn];
+            AssertReturn(pfnWrMsr, VERR_CPUM_IPE_2);
+
+            uint64_t uValueAdjusted = uValue & ~pRange->fWrIgnMask;
+            if (uValueAdjusted != uValue)
+            {
+                STAM_COUNTER_INC(&pRange->cIgnoredBits);
+                STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesToIgnoredBits);
+            }
+
+            rc = pfnWrMsr(pVCpu, idMsr, pRange, uValueAdjusted);
+            if (RT_SUCCESS(rc))
+            {
+                Log2(("CPUM: WRMSR %#x (%s), %#llx [%#llx]\n", idMsr, pRange->szName, uValueAdjusted, uValue));
+                AssertMsg(rc == VINF_SUCCESS, ("%Rrc idMsr=%#x\n", rc, idMsr));
+            }
+            else if (rc == VERR_CPUM_RAISE_GP_0)
+            {
+                Log(("CPUM: WRMSR %#x (%s), %#llx [%#llx] -> #GP(0)\n", idMsr, pRange->szName, uValueAdjusted, uValue));
+                STAM_COUNTER_INC(&pRange->cGps);
+                STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesRaiseGp);
+            }
+            else
+                Log(("CPUM: WRMSR %#x (%s), %#llx [%#llx] -> rc=%Rrc\n", idMsr, pRange->szName, uValueAdjusted, uValue, rc));
+        }
+        else
+        {
+            Log(("CPUM: WRMSR %#x (%s), %#llx -> #GP(0) - invalid bits %#llx\n",
+                 idMsr, pRange->szName, uValue, uValue & pRange->fWrGpMask));
+            STAM_COUNTER_INC(&pRange->cGps);
+            STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesRaiseGp);
+            rc = VERR_CPUM_RAISE_GP_0;
+        }
+    }
+    else
+    {
+        Log(("CPUM: Unknown WRMSR %#x, %#llx -> #GP(0)\n", idMsr, uValue));
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWrites);
+        STAM_REL_COUNTER_INC(&pVM->cpum.s.cMsrWritesUnknown);
+        rc = VERR_CPUM_RAISE_GP_0;
+    }
+    return rc;
+}
+
+#endif /* VBOX_WITH_NEW_MSR_CODE */
+
+
+#if defined(VBOX_STRICT) && defined(IN_RING3)
+/**
+ * Performs some checks on the static data related to MSRs.
+ *
+ * @returns VINF_SUCCESS on success, error on failure.
+ */
+int cpumR3MsrStrictInitChecks(void)
+{
+#define CPUM_ASSERT_RD_MSR_FN(a_Register) \
+        AssertReturn(g_aCpumRdMsrFns[kCpumMsrRdFn_##a_Register] == cpumMsrRd_##a_Register, VERR_CPUM_IPE_2);
+#define CPUM_ASSERT_WR_MSR_FN(a_Register) \
+        AssertReturn(g_aCpumWrMsrFns[kCpumMsrWrFn_##a_Register] == cpumMsrWr_##a_Register, VERR_CPUM_IPE_2);
+
+    AssertReturn(g_aCpumRdMsrFns[kCpumMsrRdFn_Invalid] == NULL, VERR_CPUM_IPE_2);
+    CPUM_ASSERT_RD_MSR_FN(FixedValue);
+    CPUM_ASSERT_RD_MSR_FN(WriteOnly);
+    CPUM_ASSERT_RD_MSR_FN(Ia32P5McAddr);
+    CPUM_ASSERT_RD_MSR_FN(Ia32P5McType);
+    CPUM_ASSERT_RD_MSR_FN(Ia32TimestampCounter);
+    CPUM_ASSERT_RD_MSR_FN(Ia32ApicBase);
+    CPUM_ASSERT_RD_MSR_FN(Ia32FeatureControl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SmmMonitorCtl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PmcN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MonitorFilterLineSize);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MPerf);
+    CPUM_ASSERT_RD_MSR_FN(Ia32APerf);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MtrrCap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MtrrPhysBaseN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MtrrPhysMaskN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MtrrFixed);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MtrrDefType);
+    CPUM_ASSERT_RD_MSR_FN(Ia32Pat);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterCs);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterEsp);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SysEnterEip);
+    CPUM_ASSERT_RD_MSR_FN(Ia32McgCap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32McgStatus);
+    CPUM_ASSERT_RD_MSR_FN(Ia32McgCtl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32DebugCtl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SmrrPhysBase);
+    CPUM_ASSERT_RD_MSR_FN(Ia32SmrrPhysMask);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PlatformDcaCap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32CpuDcaCap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32Dca0Cap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfEvtSelN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfStatus);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfCtl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32FixedCtrN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfCapabilities);
+    CPUM_ASSERT_RD_MSR_FN(Ia32FixedCtrCtrl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalStatus);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalCtrl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PerfGlobalOvfCtrl);
+    CPUM_ASSERT_RD_MSR_FN(Ia32PebsEnable);
+    CPUM_ASSERT_RD_MSR_FN(Ia32ClockModulation);
+    CPUM_ASSERT_RD_MSR_FN(Ia32ThermInterrupt);
+    CPUM_ASSERT_RD_MSR_FN(Ia32ThermStatus);
+    CPUM_ASSERT_RD_MSR_FN(Ia32MiscEnable);
+    CPUM_ASSERT_RD_MSR_FN(Ia32McCtlStatusAddrMiscN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32McNCtl2);
+    CPUM_ASSERT_RD_MSR_FN(Ia32DsArea);
+    CPUM_ASSERT_RD_MSR_FN(Ia32TscDeadline);
+    CPUM_ASSERT_RD_MSR_FN(Ia32X2ApicN);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxBase);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxPinbasedCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxProcbasedCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxExitCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxEntryCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxMisc);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr0Fixed0);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr0Fixed1);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr4Fixed0);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxCr4Fixed1);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxVmcsEnum);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxProcBasedCtls2);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxEptVpidCap);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxTruePinbasedCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueProcbasedCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueExitCtls);
+    CPUM_ASSERT_RD_MSR_FN(Ia32VmxTrueEntryCtls);
+    CPUM_ASSERT_RD_MSR_FN(Amd64Efer);
+    CPUM_ASSERT_RD_MSR_FN(Amd64SyscallTarget);
+    CPUM_ASSERT_RD_MSR_FN(Amd64LongSyscallTarget);
+    CPUM_ASSERT_RD_MSR_FN(Amd64CompSyscallTarget);
+    CPUM_ASSERT_RD_MSR_FN(Amd64SyscallFlagMask);
+    CPUM_ASSERT_RD_MSR_FN(Amd64FsBase);
+    CPUM_ASSERT_RD_MSR_FN(Amd64GsBase);
+    CPUM_ASSERT_RD_MSR_FN(Amd64KernelGsBase);
+    CPUM_ASSERT_RD_MSR_FN(Amd64TscAux);
+    CPUM_ASSERT_RD_MSR_FN(IntelEblCrPowerOn);
+    CPUM_ASSERT_RD_MSR_FN(IntelPlatformInfo100MHz);
+    CPUM_ASSERT_RD_MSR_FN(IntelPlatformInfo133MHz);
+    CPUM_ASSERT_RD_MSR_FN(IntelPkgCStConfigControl);
+    CPUM_ASSERT_RD_MSR_FN(IntelPmgIoCaptureBase);
+    CPUM_ASSERT_RD_MSR_FN(IntelLastBranchFromToN);
+    CPUM_ASSERT_RD_MSR_FN(IntelLastBranchFromN);
+    CPUM_ASSERT_RD_MSR_FN(IntelLastBranchToN);
+    CPUM_ASSERT_RD_MSR_FN(IntelLastBranchTos);
+    CPUM_ASSERT_RD_MSR_FN(IntelBblCrCtl);
+    CPUM_ASSERT_RD_MSR_FN(IntelBblCrCtl3);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7TemperatureTarget);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7MsrOffCoreResponseN);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7MiscPwrMgmt);
+    CPUM_ASSERT_RD_MSR_FN(IntelP6CrN);
+    CPUM_ASSERT_RD_MSR_FN(IntelCpuId1FeatureMaskEcdx);
+    CPUM_ASSERT_RD_MSR_FN(IntelCpuId1FeatureMaskEax);
+    CPUM_ASSERT_RD_MSR_FN(IntelCpuId80000001FeatureMaskEcdx);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyAesNiCtl);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7TurboRatioLimit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7LbrSelect);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyErrorControl);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7VirtualLegacyWireCap);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7PowerCtl);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPebsNumAlt);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7PebsLdLat);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7PkgCnResidencyN);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7CoreCnResidencyN);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyVrCurrentConfig);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyVrMiscConfig);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyRaplPowerUnit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPkgCnIrtlN);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7SandyPkgC2Residency);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPowerLimit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgEnergyStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPerfStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPkgPowerInfo);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPowerLimit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramEnergyStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPerfStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplDramPowerInfo);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0PowerLimit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0EnergyStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0Policy);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp0PerfStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1PowerLimit);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1EnergyStatus);
+    CPUM_ASSERT_RD_MSR_FN(IntelI7RaplPp1Policy);
+
+    CPUM_ASSERT_RD_MSR_FN(P6LastBranchFromIp);
+    CPUM_ASSERT_RD_MSR_FN(P6LastBranchToIp);
+    CPUM_ASSERT_RD_MSR_FN(P6LastIntFromIp);
+    CPUM_ASSERT_RD_MSR_FN(P6LastIntToIp);
+
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hTscRate);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hLwpCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hLwpCbAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hMc4MiscN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8PerfCtlN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8PerfCtrN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SysCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8HwCr);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8IorrBaseN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8IorrMaskN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8TopOfMemN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8NbCfg1);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8McXcptRedir);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8CpuNameN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8HwThermalCtrl);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SwThermalCtrl);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8McCtlMaskN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmiOnIoTrapN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmiOnIoTrapCtlSts);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8IntPendingMessage);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmiTriggerIoCycle);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hMmioCfgBaseAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hTrapCtlMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateCurLimit);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateControl);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateStatus);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hPStateN);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hCofVidControl);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hCofVidStatus);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hCStateIoBaseAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hCpuWatchdogTimer);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmmBase);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmmAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmmMask);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8VmCr);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8IgnNe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8SmmCtl);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8VmHSavePa);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hVmLockKey);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hSmmLockKey);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hLocalSmiStatus);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hOsVisWrkIdLength);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hOsVisWrkStatus);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam16hL2IPerfCtlN);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam16hL2IPerfCtrN);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hNorthbridgePerfCtlN);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hNorthbridgePerfCtrN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7MicrocodeCtl);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7ClusterIdMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd07hEbax);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd06hEcx);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlStd01hEdcx);
+    CPUM_ASSERT_RD_MSR_FN(AmdK8CpuIdCtlExt01hEdcx);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7DebugStatusMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7BHTraceBaseMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7BHTracePtrMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7BHTraceLimitMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7HardwareDebugToolCfgMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7FastFlushCountMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7NodeId);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7DrXAddrMaskN);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7Dr0DataMatchMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7Dr0DataMaskMaybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7LoadStoreCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7InstrCacheCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7DataCacheCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7BusUnitCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdK7DebugCtl2Maybe);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hFpuCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hDecoderCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hBusUnitCfg2);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg2);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hCombUnitCfg3);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hExecUnitCfg);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam15hLoadStoreCfg2);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchCtl);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchLinAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsFetchPhysAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpExecCtl);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpRip);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData2);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsOpData3);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsDcLinAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsDcPhysAddr);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam10hIbsCtl);
+    CPUM_ASSERT_RD_MSR_FN(AmdFam14hIbsBrTarget);
+
+    AssertReturn(g_aCpumWrMsrFns[kCpumMsrWrFn_Invalid] == NULL, VERR_CPUM_IPE_2);
+    CPUM_ASSERT_WR_MSR_FN(Ia32P5McAddr);
+    CPUM_ASSERT_WR_MSR_FN(Ia32P5McType);
+    CPUM_ASSERT_WR_MSR_FN(Ia32TimestampCounter);
+    CPUM_ASSERT_WR_MSR_FN(Ia32ApicBase);
+    CPUM_ASSERT_WR_MSR_FN(Ia32FeatureControl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32BiosUpdateTrigger);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SmmMonitorCtl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PmcN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MonitorFilterLineSize);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MPerf);
+    CPUM_ASSERT_WR_MSR_FN(Ia32APerf);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MtrrPhysBaseN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MtrrPhysMaskN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MtrrFixed);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MtrrDefType);
+    CPUM_ASSERT_WR_MSR_FN(Ia32Pat);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterCs);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterEsp);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SysEnterEip);
+    CPUM_ASSERT_WR_MSR_FN(Ia32McgStatus);
+    CPUM_ASSERT_WR_MSR_FN(Ia32McgCtl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32DebugCtl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SmrrPhysBase);
+    CPUM_ASSERT_WR_MSR_FN(Ia32SmrrPhysMask);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PlatformDcaCap);
+    CPUM_ASSERT_WR_MSR_FN(Ia32Dca0Cap);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfEvtSelN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfCtl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32FixedCtrN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfCapabilities);
+    CPUM_ASSERT_WR_MSR_FN(Ia32FixedCtrCtrl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalStatus);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalCtrl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PerfGlobalOvfCtrl);
+    CPUM_ASSERT_WR_MSR_FN(Ia32PebsEnable);
+    CPUM_ASSERT_WR_MSR_FN(Ia32ClockModulation);
+    CPUM_ASSERT_WR_MSR_FN(Ia32ThermInterrupt);
+    CPUM_ASSERT_WR_MSR_FN(Ia32ThermStatus);
+    CPUM_ASSERT_WR_MSR_FN(Ia32MiscEnable);
+    CPUM_ASSERT_WR_MSR_FN(Ia32McCtlStatusAddrMiscN);
+    CPUM_ASSERT_WR_MSR_FN(Ia32McNCtl2);
+    CPUM_ASSERT_WR_MSR_FN(Ia32DsArea);
+    CPUM_ASSERT_WR_MSR_FN(Ia32TscDeadline);
+    CPUM_ASSERT_WR_MSR_FN(Ia32X2ApicN);
+    CPUM_ASSERT_WR_MSR_FN(Amd64Efer);
+    CPUM_ASSERT_WR_MSR_FN(Amd64SyscallTarget);
+    CPUM_ASSERT_WR_MSR_FN(Amd64LongSyscallTarget);
+    CPUM_ASSERT_WR_MSR_FN(Amd64CompSyscallTarget);
+    CPUM_ASSERT_WR_MSR_FN(Amd64SyscallFlagMask);
+    CPUM_ASSERT_WR_MSR_FN(Amd64FsBase);
+    CPUM_ASSERT_WR_MSR_FN(Amd64GsBase);
+    CPUM_ASSERT_WR_MSR_FN(Amd64KernelGsBase);
+    CPUM_ASSERT_WR_MSR_FN(Amd64TscAux);
+
+    CPUM_ASSERT_WR_MSR_FN(IntelEblCrPowerOn);
+    CPUM_ASSERT_WR_MSR_FN(IntelPkgCStConfigControl);
+    CPUM_ASSERT_WR_MSR_FN(IntelPmgIoCaptureBase);
+    CPUM_ASSERT_WR_MSR_FN(IntelLastBranchFromToN);
+    CPUM_ASSERT_WR_MSR_FN(IntelLastBranchFromN);
+    CPUM_ASSERT_WR_MSR_FN(IntelLastBranchToN);
+    CPUM_ASSERT_WR_MSR_FN(IntelLastBranchTos);
+    CPUM_ASSERT_WR_MSR_FN(IntelBblCrCtl);
+    CPUM_ASSERT_WR_MSR_FN(IntelBblCrCtl3);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7TemperatureTarget);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7MsrOffCoreResponseN);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7MiscPwrMgmt);
+    CPUM_ASSERT_WR_MSR_FN(IntelP6CrN);
+    CPUM_ASSERT_WR_MSR_FN(IntelCpuId1FeatureMaskEcdx);
+    CPUM_ASSERT_WR_MSR_FN(IntelCpuId1FeatureMaskEax);
+    CPUM_ASSERT_WR_MSR_FN(IntelCpuId80000001FeatureMaskEcdx);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyAesNiCtl);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7TurboRatioLimit);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7LbrSelect);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyErrorControl);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7PowerCtl);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyPebsNumAlt);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7PebsLdLat);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyVrCurrentConfig);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyVrMiscConfig);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7SandyPkgCnIrtlN);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPkgPowerLimit);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplDramPowerLimit);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp0PowerLimit);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp0Policy);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp1PowerLimit);
+    CPUM_ASSERT_WR_MSR_FN(IntelI7RaplPp1Policy);
+
+    CPUM_ASSERT_WR_MSR_FN(P6LastIntFromIp);
+    CPUM_ASSERT_WR_MSR_FN(P6LastIntToIp);
+
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hTscRate);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hLwpCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hLwpCbAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hMc4MiscN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8PerfCtlN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8PerfCtrN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SysCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8HwCr);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8IorrBaseN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8IorrMaskN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8TopOfMemN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8NbCfg1);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8McXcptRedir);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8CpuNameN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8HwThermalCtrl);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SwThermalCtrl);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8McCtlMaskN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmiOnIoTrapN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmiOnIoTrapCtlSts);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8IntPendingMessage);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmiTriggerIoCycle);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hMmioCfgBaseAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hTrapCtlMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateControl);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateStatus);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hPStateN);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hCofVidControl);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hCofVidStatus);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hCStateIoBaseAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hCpuWatchdogTimer);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmmBase);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmmAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmmMask);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8VmCr);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8IgnNe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8SmmCtl);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8VmHSavePa);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hVmLockKey);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hSmmLockKey);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hLocalSmiStatus);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hOsVisWrkIdLength);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hOsVisWrkStatus);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam16hL2IPerfCtlN);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam16hL2IPerfCtrN);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hNorthbridgePerfCtlN);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hNorthbridgePerfCtrN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7MicrocodeCtl);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7ClusterIdMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd07hEbax);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd06hEcx);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlStd01hEdcx);
+    CPUM_ASSERT_WR_MSR_FN(AmdK8CpuIdCtlExt01hEdcx);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7DebugStatusMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7BHTraceBaseMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7BHTracePtrMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7BHTraceLimitMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7HardwareDebugToolCfgMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7FastFlushCountMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7NodeId);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7DrXAddrMaskN);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7Dr0DataMatchMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7Dr0DataMaskMaybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7LoadStoreCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7InstrCacheCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7DataCacheCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7BusUnitCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdK7DebugCtl2Maybe);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hFpuCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hDecoderCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hBusUnitCfg2);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg2);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hCombUnitCfg3);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hExecUnitCfg);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam15hLoadStoreCfg2);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchCtl);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchLinAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsFetchPhysAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpExecCtl);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpRip);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData2);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsOpData3);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsDcLinAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsDcPhysAddr);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam10hIbsCtl);
+    CPUM_ASSERT_WR_MSR_FN(AmdFam14hIbsBrTarget);
+
+    return VINF_SUCCESS;
+}
+#endif /* VBOX_STRICT && IN_RING3 */
+
+
+#ifdef IN_RING0
+
+/**
+ * Fast way for HM to access the MSR_K8_TSC_AUX register.
+ *
+ * @returns The register value.
+ * @param   pVCpu               Pointer to the cross context CPU structure for
+ *                              the calling EMT.
+ * @thread  EMT(pVCpu)
+ */
+VMMR0_INT_DECL(uint64_t) CPUMR0GetGuestTscAux(PVMCPU pVCpu)
+{
+    return pVCpu->cpum.s.GuestMsrs.msr.TscAux;
+}
+
+
+/**
+ * Fast way for HM to access the MSR_K8_TSC_AUX register.
+ *
+ * @param   pVCpu               Pointer to the cross context CPU structure for
+ *                              the calling EMT.
+ * @param   uValue              The new value.
+ * @thread  EMT(pVCpu)
+ */
+VMMR0_INT_DECL(void) CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue)
+{
+    pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
+}
+
+#endif /* IN_RING0 */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 49893)
@@ -870,4 +870,5 @@
 }
 
+#ifndef VBOX_WITH_NEW_MSR_CODE
 
 /**
@@ -1585,4 +1586,6 @@
 }
 
+#endif /* !VBOX_WITH_NEW_MSR_CODE */
+
 
 VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
@@ -1849,4 +1852,64 @@
 {
     return pVCpu->cpum.s.Guest.msrEFER;
+}
+
+
+/**
+ * Looks up a CPUID leaf in the CPUID leaf array.
+ *
+ * @returns Pointer to the leaf if found, NULL if not.
+ *
+ * @param   pVM                 Pointer to the cross context VM structure.
+ * @param   uLeaf               The leaf to get.
+ * @param   uSubLeaf            The subleaf, if applicable.  Just pass 0 if it
+ *                              isn't.
+ */
+PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf)
+{
+    unsigned            iEnd     = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
+    if (iEnd)
+    {
+        unsigned        iStart   = 0;
+        PCPUMCPUIDLEAF  paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
+        for (;;)
+        {
+            unsigned i = iStart + (iEnd - iStart) / 2U;
+            if (uLeaf < paLeaves[i].uLeaf)
+            {
+                if (i <= iStart)
+                    return NULL;
+                iEnd = i;
+            }
+            else if (uLeaf > paLeaves[i].uLeaf)
+            {
+                i += 1;
+                if (i >= iEnd)
+                    return NULL;
+                iStart = i;
+            }
+            else
+            {
+                uSubLeaf &= paLeaves[i].fSubLeafMask;
+                if (uSubLeaf != paLeaves[i].uSubLeaf)
+                {
+                    /* Find the right subleaf.  We return the last one before
+                       uSubLeaf if we don't find an exact match. */
+                    if (uSubLeaf < paLeaves[i].uSubLeaf)
+                        while (   i > 0
+                               && uLeaf    == paLeaves[i].uLeaf
+                               && uSubLeaf  < paLeaves[i].uSubLeaf)
+                            i--;
+                    else
+                        while (   i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
+                               && uLeaf    == paLeaves[i + 1].uLeaf
+                               && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
+                            i++;
+                }
+                return &paLeaves[i];
+            }
+        }
+    }
+
+    return NULL;
 }
 
@@ -1895,5 +1958,5 @@
     if (    iLeaf == 4
         &&  cCurrentCacheIndex < 3
-        &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
+        &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
     {
         uint32_t type, level, sharing, linesize,
@@ -1997,4 +2060,6 @@
 VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
 {
+    PCPUMCPUIDLEAF pLeaf;
+
     switch (enmFeature)
     {
@@ -2003,9 +2068,14 @@
          */
         case CPUMCPUIDFEATURE_APIC:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   pLeaf
+                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
+
+            pVM->cpum.s.GuestFeatures.fApic = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
             break;
@@ -2015,6 +2085,8 @@
         */
         case CPUMCPUIDFEATURE_X2APIC:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
+            pVM->cpum.s.GuestFeatures.fX2Apic = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
             break;
@@ -2025,6 +2097,5 @@
          */
         case CPUMCPUIDFEATURE_SEP:
-        {
-            if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
+            if (!pVM->cpum.s.HostFeatures.fSysEnter)
             {
                 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
@@ -2032,9 +2103,10 @@
             }
 
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
+            pVM->cpum.s.GuestFeatures.fSysEnter = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
             break;
-        }
 
         /*
@@ -2043,15 +2115,13 @@
          */
         case CPUMCPUIDFEATURE_SYSCALL:
-        {
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   !pLeaf
+                || !pVM->cpum.s.HostFeatures.fSysCall)
             {
 #if HC_ARCH_BITS == 32
-                /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
-                 * Even when the cpu is capable of doing so in 64 bits mode.
-                 */
-                if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                    ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
-                    ||  !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
+                /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
+                   mode by Intel, even when the cpu is capable of doing so in
+                   64-bit mode.  Long mode requires syscall support. */
+                if (!pVM->cpum.s.HostFeatures.fLongMode)
 #endif
                 {
@@ -2060,9 +2130,10 @@
                 }
             }
+
             /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
-            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
+            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
+            pVM->cpum.s.GuestFeatures.fSysCall = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
             break;
-        }
 
         /*
@@ -2071,6 +2142,5 @@
          */
         case CPUMCPUIDFEATURE_PAE:
-        {
-            if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
+            if (!pVM->cpum.s.HostFeatures.fPae)
             {
                 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
@@ -2078,12 +2148,16 @@
             }
 
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (    pLeaf
+                &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
+
+            pVM->cpum.s.GuestFeatures.fPae = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
             break;
-        }
 
         /*
@@ -2092,7 +2166,7 @@
          */
         case CPUMCPUIDFEATURE_LONG_MODE:
-        {
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   !pLeaf
+                || !pVM->cpum.s.HostFeatures.fLongMode)
             {
                 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
@@ -2101,8 +2175,8 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+            pVM->cpum.s.GuestFeatures.fLongMode = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
             break;
-        }
 
         /*
@@ -2111,7 +2185,7 @@
          */
         case CPUMCPUIDFEATURE_NX:
-        {
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   !pLeaf
+                || !pVM->cpum.s.HostFeatures.fNoExecute)
             {
                 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
@@ -2120,8 +2194,9 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
+            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
+            pVM->cpum.s.GuestFeatures.fNoExecute = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
             break;
-        }
+
 
         /*
@@ -2130,7 +2205,7 @@
          */
         case CPUMCPUIDFEATURE_LAHF:
-        {
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                ||  !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   !pLeaf
+                || !pVM->cpum.s.HostFeatures.fLahfSahf)
             {
                 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
@@ -2139,19 +2214,27 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+            pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+            pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
             break;
-        }
-
+
+        /*
+         * Set the page attribute table bit.  This is alternative page level
+         * cache control that doesn't much matter when everything is
+         * virtualized, though it may when passing thru device memory.
+         */
         case CPUMCPUIDFEATURE_PAT:
-        {
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   pLeaf
+                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
+
+            pVM->cpum.s.GuestFeatures.fPat = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
             break;
-        }
 
         /*
@@ -2160,8 +2243,8 @@
          */
         case CPUMCPUIDFEATURE_RDTSCP:
-        {
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
-                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
-                ||  pVM->cpum.s.u8PortableCpuIdLevel > 0)
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   !pLeaf
+                || !pVM->cpum.s.HostFeatures.fRdTscP
+                || pVM->cpum.s.u8PortableCpuIdLevel > 0)
             {
                 if (!pVM->cpum.s.u8PortableCpuIdLevel)
@@ -2171,8 +2254,8 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+            pVM->cpum.s.HostFeatures.fRdTscP = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
             break;
-        }
 
        /*
@@ -2180,6 +2263,8 @@
         */
         case CPUMCPUIDFEATURE_HVP:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
                 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
+            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
             break;
@@ -2189,4 +2274,5 @@
             break;
     }
+
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
@@ -2208,36 +2294,114 @@
     switch (enmFeature)
     {
+        case CPUMCPUIDFEATURE_APIC:         return pVM->cpum.s.GuestFeatures.fApic;
+        case CPUMCPUIDFEATURE_X2APIC:       return pVM->cpum.s.GuestFeatures.fX2Apic;
+        case CPUMCPUIDFEATURE_SYSCALL:      return pVM->cpum.s.GuestFeatures.fSysCall;
+        case CPUMCPUIDFEATURE_SEP:          return pVM->cpum.s.GuestFeatures.fSysEnter;
+        case CPUMCPUIDFEATURE_PAE:          return pVM->cpum.s.GuestFeatures.fPae;
+        case CPUMCPUIDFEATURE_NX:           return pVM->cpum.s.GuestFeatures.fNoExecute;
+        case CPUMCPUIDFEATURE_LAHF:         return pVM->cpum.s.GuestFeatures.fLahfSahf;
+        case CPUMCPUIDFEATURE_LONG_MODE:    return pVM->cpum.s.GuestFeatures.fLongMode;
+        case CPUMCPUIDFEATURE_PAT:          return pVM->cpum.s.GuestFeatures.fPat;
+        case CPUMCPUIDFEATURE_RDTSCP:       return pVM->cpum.s.GuestFeatures.fRdTscP;
+        case CPUMCPUIDFEATURE_HVP:          return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
+
+        case CPUMCPUIDFEATURE_INVALID:
+        case CPUMCPUIDFEATURE_32BIT_HACK:
+            break;
+    }
+    AssertFailed();
+    return false;
+}
+
+
+/**
+ * Clears a CPUID feature bit.
+ *
+ * @param   pVM             Pointer to the VM.
+ * @param   enmFeature      The feature to clear.
+ */
+VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
+{
+    PCPUMCPUIDLEAF pLeaf;
+    switch (enmFeature)
+    {
+        case CPUMCPUIDFEATURE_APIC:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   pLeaf
+                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
+
+            pVM->cpum.s.GuestFeatures.fApic = 0;
+            Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
+            break;
+
+        case CPUMCPUIDFEATURE_X2APIC:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
+            pVM->cpum.s.GuestFeatures.fX2Apic = 0;
+            Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
+            break;
+
         case CPUMCPUIDFEATURE_PAE:
-        {
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_NX:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
-        }
-
-        case CPUMCPUIDFEATURE_SYSCALL:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
-        }
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   pLeaf
+                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
+
+            pVM->cpum.s.GuestFeatures.fPae = 0;
+            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
+            break;
+
+        case CPUMCPUIDFEATURE_PAT:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
+
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (   pLeaf
+                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
+
+            pVM->cpum.s.GuestFeatures.fPat = 0;
+            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
+            break;
+
+        case CPUMCPUIDFEATURE_LONG_MODE:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+            pVM->cpum.s.GuestFeatures.fLongMode = 0;
+            break;
+
+        case CPUMCPUIDFEATURE_LAHF:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+            pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
+            break;
 
         case CPUMCPUIDFEATURE_RDTSCP:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_LONG_MODE:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
-            break;
-        }
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+            pVM->cpum.s.GuestFeatures.fRdTscP = 0;
+            Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
+            break;
+
+        case CPUMCPUIDFEATURE_HVP:
+            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
+            if (pLeaf)
+                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
+            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
+            break;
 
         default:
@@ -2245,92 +2409,5 @@
             break;
     }
-    return false;
-}
-
-
-/**
- * Clears a CPUID feature bit.
- *
- * @param   pVM             Pointer to the VM.
- * @param   enmFeature      The feature to clear.
- */
-VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
-{
-    switch (enmFeature)
-    {
-        /*
-         * Set the APIC bit in both feature masks.
-         */
-        case CPUMCPUIDFEATURE_APIC:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
-            Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
-            break;
-
-        /*
-         * Clear the x2APIC bit in the standard feature mask.
-         */
-        case CPUMCPUIDFEATURE_X2APIC:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
-            Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
-            break;
-
-        case CPUMCPUIDFEATURE_PAE:
-        {
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
-            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_PAT:
-        {
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
-            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
-                &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
-            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_LONG_MODE:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_LAHF:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_RDTSCP:
-        {
-            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
-                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
-            Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
-            break;
-        }
-
-        case CPUMCPUIDFEATURE_HVP:
-            if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
-                pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
-            break;
-
-        default:
-            AssertMsgFailed(("enmFeature=%d\n", enmFeature));
-            break;
-    }
+
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
@@ -2349,5 +2426,5 @@
 VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
 {
-    return pVM->cpum.s.enmHostCpuVendor;
+    return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
 }
 
@@ -2361,5 +2438,5 @@
 VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
 {
-    return pVM->cpum.s.enmGuestCpuVendor;
+    return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/MMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/MMAll.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMAll/MMAll.cpp	(revision 49893)
@@ -568,4 +568,8 @@
         TAG2STR(CFGM_USER);
 
+        TAG2STR(CPUM_CTX);
+        TAG2STR(CPUM_CPUID);
+        TAG2STR(CPUM_MSRS);
+
         TAG2STR(CSAM);
         TAG2STR(CSAM_PATCH);
Index: /trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp	(revision 49893)
@@ -322,4 +322,5 @@
 }
 
+
 /**
  * Wrapper for mmHyperAllocInternal
@@ -327,7 +328,5 @@
 VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
 {
-    int rc;
-
-    rc = mmHyperLock(pVM);
+    int rc = mmHyperLock(pVM);
     AssertRCReturn(rc, rc);
 
@@ -339,4 +338,17 @@
     return rc;
 }
+
+
+/**
+ * Duplicates a block of memory.
+ */
+VMMDECL(int) MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
+{
+    int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
+    if (RT_SUCCESS(rc))
+        memcpy(*ppv, pvSrc, cb);
+    return rc;
+}
+
 
 /**
Index: /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 49893)
@@ -76,4 +76,23 @@
 #endif
 
+/**
+ * CPUID bits to unify among all cores.
+ */
+static struct
+{
+    uint32_t uLeaf;  /**< Leaf to check. */
+    uint32_t ecx;    /**< which bits in ecx to unify between CPUs. */
+    uint32_t edx;    /**< which bits in edx to unify between CPUs. */
+}
+const g_aCpuidUnifyBits[] =
+{
+    {
+        0x00000001,
+        X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR,
+        X86_CPUID_FEATURE_EDX_CX8
+    }
+};
+
+
 
 /*******************************************************************************
@@ -114,4 +133,6 @@
 
 /**
+ *
+ *
  * Check the CPUID features of this particular CPU and disable relevant features
  * for the guest which do not exist on this CPU. We have seen systems where the
@@ -127,36 +148,30 @@
 static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
 {
+    PVM     pVM   = (PVM)pvUser1;
+    PCPUM   pCPUM = &pVM->cpum.s;
+
     NOREF(idCpu); NOREF(pvUser2);
-
-    struct
-    {
-        uint32_t uLeave; /* leave to check */
-        uint32_t ecx;    /* which bits in ecx to unify between CPUs */
-        uint32_t edx;    /* which bits in edx to unify between CPUs */
-    } aCpuidUnify[]
-    =
-    {
-        { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
-                    | X86_CPUID_FEATURE_ECX_MONITOR,
-                      X86_CPUID_FEATURE_EDX_CX8 }
-    };
-    PVM pVM = (PVM)pvUser1;
-    PCPUM pCPUM = &pVM->cpum.s;
-    for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
-    {
-        uint32_t uLeave = aCpuidUnify[i].uLeave;
+    for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
+    {
+        /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
+                 necessarily in the VM process context.  So, we using the
+                 legacy arrays as temporary storage. */
+
+        uint32_t   uLeaf = g_aCpuidUnifyBits[i].uLeaf;
+        PCPUMCPUID pLegacyLeaf;
+        if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
+            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
+        else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
+            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
+        else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
+            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
+        else
+            continue;
+
         uint32_t eax, ebx, ecx, edx;
-
-        ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
-        PCPUMCPUID paLeaves;
-        if (uLeave < 0x80000000)
-            paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
-        else if (uLeave < 0xc0000000)
-            paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
-        else
-            paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
-        /* unify important bits */
-        ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
-        ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
+        ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
+
+        ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
+        ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
     }
 }
@@ -260,5 +275,36 @@
         }
 
+        /*
+         * Unify/cross check some CPUID feature bits on all available CPU cores
+         * and threads.  We've seen CPUs where the monitor support differed.
+         *
+         * Because the hyper heap isn't always mapped into ring-0, we cannot
+         * access it from a RTMpOnAll callback.  We use the legacy CPUID arrays
+         * as temp ring-0 accessible memory instead, ASSUMING that they're all
+         * up to date when we get here.
+         */
         RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
+
+        for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
+        {
+            uint32_t        uLeaf = g_aCpuidUnifyBits[i].uLeaf;
+            PCPUMCPUIDLEAF  pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0);
+            if (pLeaf)
+            {
+                PCPUMCPUID pLegacyLeaf;
+                if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
+                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
+                else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
+                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
+                else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
+                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
+                else
+                    continue;
+
+                pLeaf->uEcx = pLegacyLeaf->ecx;
+                pLeaf->uEdx = pLegacyLeaf->edx;
+            }
+        }
+
     }
 
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 49893)
@@ -5899,5 +5899,5 @@
         switch (pMsr->u32Msr)
         {
-            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
+            case MSR_K8_TSC_AUX:        CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value);             break;
             case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR        = pMsr->u64Value;             break;
             case MSR_K6_STAR:           pMixedCtx->msrSTAR         = pMsr->u64Value;             break;
@@ -8147,8 +8147,5 @@
             AssertRC(rc2);
             Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
-            uint64_t u64GuestTscAuxMsr;
-            rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAuxMsr);
-            AssertRC(rc2);
-            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr, true /* fUpdateHostMsr */);
+            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */);
         }
         else
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 49893)
@@ -55,9 +55,10 @@
 #include <VBox/err.h>
 #include <VBox/log.h>
+#include <iprt/asm-amd64-x86.h>
 #include <iprt/assert.h>
-#include <iprt/asm-amd64-x86.h>
+#include <iprt/cpuset.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
 #include <iprt/string.h>
-#include <iprt/mp.h>
-#include <iprt/cpuset.h>
 #include "internal/pgm.h"
 
@@ -115,5 +116,4 @@
 *   Internal Functions                                                         *
 *******************************************************************************/
-static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
 static int cpumR3CpuIdInit(PVM pVM);
 static DECLCALLBACK(int)  cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
@@ -581,5 +581,5 @@
 
     /*
-     * Assert alignment and sizes.
+     * Assert alignment, sizes and tables.
      */
     AssertCompileMemberAlignment(VM, cpum.s, 32);
@@ -592,8 +592,13 @@
     AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
     AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
+#ifdef VBOX_STRICT
+    int rc2 = cpumR3MsrStrictInitChecks();
+    AssertRCReturn(rc2, rc2);
+#endif
 
     /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
     pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
     Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
+
 
     /* Calculate the offset from CPUMCPU to CPUM. */
@@ -647,11 +652,15 @@
 
     /*
-     * Detect the host CPU vendor.
-     * (The guest CPU vendor is re-detected later on.)
-     */
-    uint32_t uEAX, uEBX, uECX, uEDX;
-    ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
-    pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
-    pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
+     * Gather info about the host CPU.
+     */
+    PCPUMCPUIDLEAF  paLeaves;
+    uint32_t        cLeaves;
+    int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
+    AssertLogRelRCReturn(rc, rc);
+
+    rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
+    RTMemFree(paLeaves);
+    AssertLogRelRCReturn(rc, rc);
+    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
 
     /*
@@ -662,8 +671,8 @@
      * Register saved state data item.
      */
-    int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
-                                   NULL, cpumR3LiveExec, NULL,
-                                   NULL, cpumR3SaveExec, NULL,
-                                   cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
+    rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
+                               NULL, cpumR3LiveExec, NULL,
+                               NULL, cpumR3SaveExec, NULL,
+                               cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
     if (RT_FAILURE(rc))
         return rc;
@@ -700,30 +709,173 @@
 
 /**
- * Detect the CPU vendor give n the
- *
- * @returns The vendor.
- * @param   uEAX                EAX from CPUID(0).
- * @param   uEBX                EBX from CPUID(0).
- * @param   uECX                ECX from CPUID(0).
- * @param   uEDX                EDX from CPUID(0).
- */
-static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
-{
-    if (ASMIsValidStdRange(uEAX))
-    {
-        if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
-            return CPUMCPUVENDOR_AMD;
-
-        if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
-            return CPUMCPUVENDOR_INTEL;
-
-        if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
-            return CPUMCPUVENDOR_VIA;
-
-        /** @todo detect the other buggers... */
-    }
-
-    return CPUMCPUVENDOR_UNKNOWN;
+ * Loads MSR range overrides.
+ *
+ * This must be called before the MSR ranges are moved from the normal heap to
+ * the hyper heap!
+ *
+ * @returns VBox status code (VMSetError called).
+ * @param   pVM                 Pointer to the cross context VM structure
+ * @param   pMsrNode            The CFGM node with the MSR overrides.
+ */
+static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
+{
+    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+    {
+        /*
+         * Assemble a valid MSR range.
+         */
+        CPUMMSRRANGE MsrRange;
+        MsrRange.offCpumCpu = 0;
+        MsrRange.fReserved  = 0;
+
+        int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
+
+        rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
+                              MsrRange.szName, rc);
+
+        rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
+                              MsrRange.szName, rc);
+
+        char szType[32];
+        rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
+                              MsrRange.szName, rc);
+        if (!RTStrICmp(szType, "FixedValue"))
+        {
+            MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
+            MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
+
+            rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uInitOrReadValue, 0);
+            if (RT_FAILURE(rc))
+                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
+                                  MsrRange.szName, rc);
+
+            rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
+            if (RT_FAILURE(rc))
+                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
+                                  MsrRange.szName, rc);
+
+            rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
+            if (RT_FAILURE(rc))
+                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
+                                  MsrRange.szName, rc);
+        }
+        else
+            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
+                              "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
+
+        /*
+         * Insert the range into the table (replaces/splits/shrinks existing
+         * MSR ranges).
+         */
+        rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
+    }
+
+    return VINF_SUCCESS;
 }
+
+
+/**
+ * Loads CPUID leaf overrides.
+ *
+ * This must be called before the CPUID leaves are moved from the normal
+ * heap to the hyper heap!
+ *
+ * @returns VBox status code (VMSetError called).
+ * @param   pVM             Pointer to the cross context VM structure
+ * @param   pParentNode     The CFGM node with the CPUID leaves.
+ * @param   pszLabel        How to label the overrides we're loading.
+ */
+static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
+{
+    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
+    {
+        /*
+         * Get the leaf and subleaf numbers.
+         */
+        char szName[128];
+        int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
+
+        /* The leaf number is either specified directly or thru the node name. */
+        uint32_t uLeaf;
+        rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
+        if (rc == VERR_CFGM_VALUE_NOT_FOUND)
+        {
+            rc = RTStrToUInt32Full(szName, 16, &uLeaf);
+            if (rc != VINF_SUCCESS)
+                return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
+                                  "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
+        }
+        else if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
+                              pszLabel, szName, rc);
+
+        uint32_t uSubLeaf;
+        rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
+                              pszLabel, szName, rc);
+
+        uint32_t fSubLeafMask;
+        rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
+                              pszLabel, szName, rc);
+
+        /*
+         * Look up the specified leaf, since the output register values
+         * defaults to any existing values.  This allows overriding a single
+         * register, without needing to know the other values.
+         */
+        PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
+                                                   uLeaf, uSubLeaf);
+        CPUMCPUIDLEAF   Leaf;
+        if (pLeaf)
+            Leaf = *pLeaf;
+        else
+            RT_ZERO(Leaf);
+        Leaf.uLeaf          = uLeaf;
+        Leaf.uSubLeaf       = uSubLeaf;
+        Leaf.fSubLeafMask   = fSubLeafMask;
+
+        rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
+                              pszLabel, szName, rc);
+        rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
+                              pszLabel, szName, rc);
+        rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
+                              pszLabel, szName, rc);
+        rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
+                              pszLabel, szName, rc);
+
+        /*
+         * Insert the leaf into the table (replaces existing ones).
+         */
+        rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf);
+        if (RT_FAILURE(rc))
+            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
+    }
+
+    return VINF_SUCCESS;
+}
+
 
 
@@ -815,4 +967,66 @@
 
 
+static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
+{
+    /*
+     * Install the CPUID information.
+     */
+    int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,
+                           MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);
+
+    AssertLogRelRCReturn(rc, rc);
+
+    pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
+    pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
+    Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
+    Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
+
+    /*
+     * Explode the guest CPU features.
+     */
+    rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
+    AssertLogRelRCReturn(rc, rc);
+
+
+    /*
+     * Populate the legacy arrays.  Currently used for everything, later only
+     * for patch manager.
+     */
+    struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
+    {
+        { pCPUM->aGuestCpuIdStd,        RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     0x00000000 },
+        { pCPUM->aGuestCpuIdExt,        RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     0x80000000 },
+        { pCPUM->aGuestCpuIdCentaur,    RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },
+        { pCPUM->aGuestCpuIdHyper,      RT_ELEMENTS(pCPUM->aGuestCpuIdHyper),   0x40000000 },
+    };
+    for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
+    {
+        uint32_t    cLeft       = aOldRanges[i].cCpuIds;
+        uint32_t    uLeaf       = aOldRanges[i].uBase + cLeft;
+        PCPUMCPUID  pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
+        while (cLeft-- > 0)
+        {
+            uLeaf--;
+            pLegacyLeaf--;
+
+            PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0);
+            if (pLeaf)
+            {
+                pLegacyLeaf->eax = pLeaf->uEax;
+                pLegacyLeaf->ebx = pLeaf->uEbx;
+                pLegacyLeaf->ecx = pLeaf->uEcx;
+                pLegacyLeaf->edx = pLeaf->uEdx;
+            }
+            else
+                *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;
+        }
+    }
+
+    pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;
+
+    return VINF_SUCCESS;
+}
+
+
 /**
  * Initializes the emulated CPU's cpuid information.
@@ -825,18 +1039,17 @@
     PCPUM       pCPUM    = &pVM->cpum.s;
     PCFGMNODE   pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
-    uint32_t    i;
     int         rc;
 
-#define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
-    if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
+#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
+    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
     { \
-        LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
-        pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
-    }
-#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
-    if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
+        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
+        (a_pLeafReg) &= ~(uint32_t)(fMask); \
+    }
+#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
+    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
     { \
-        LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
-        pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
+        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
+        (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
     }
 
@@ -847,6 +1060,9 @@
      * Enables the Synthetic CPU.  The Vendor ID and Processor Name are
      * completely overridden by VirtualBox custom strings.  Some
-     * CPUID information is withheld, like the cache info. */
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &pCPUM->fSyntheticCpu, false);
+     * CPUID information is withheld, like the cache info.
+     *
+     * This is obsoleted by PortableCpuIdLevel. */
+    bool fSyntheticCpu;
+    rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &fSyntheticCpu, false);
     AssertRCReturn(rc, rc);
 
@@ -856,39 +1072,13 @@
      * values should only be used when older CPUs are involved since it may
      * harm performance and maybe also cause problems with specific guests. */
-    rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
-    AssertRCReturn(rc, rc);
-
-    AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
-
-    /*
-     * Get the host CPUID leaves and redetect the guest CPU vendor (could've
-     * been overridden).
-     */
-    /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
-     * Overrides the host CPUID leaf values used for calculating the guest CPUID
-     * leaves.  This can be used to preserve the CPUID values when moving a VM to a
-     * different machine.  Another use is restricting (or extending) the feature set
-     * exposed to the guest. */
-    PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
-    rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pHostOverrideCfg);
-    AssertRCReturn(rc, rc);
-    rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pHostOverrideCfg);
-    AssertRCReturn(rc, rc);
-    rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
-    AssertRCReturn(rc, rc);
-
-    pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
-                                                  pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
-
-    /*
-     * Determine the default leaf.
-     *
-     * Intel returns values of the highest standard function, while AMD
-     * returns zeros. VIA on the other hand seems to returning nothing or
-     * perhaps some random garbage, we don't try to duplicate this behavior.
-     */
-    ASMCpuIdExSlow(pCPUM->aGuestCpuIdStd[0].eax + 10, 0, 0, 0, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
-                   &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
-                   &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
+    rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{CPUM/GuestCpuName, string}
+     * The name of of the CPU we're to emulate.  The default is the host CPU.
+     * Note! CPUs other than "host" one is currently unsupported. */
+    char szCpuName[128];
+    rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");
+    AssertLogRelRCReturn(rc, rc);
 
     /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
@@ -896,5 +1086,6 @@
      */
     bool fCmpXchg16b;
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc);
+    rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);
+    AssertLogRelRCReturn(rc, rc);
 
     /** @cfgm{/CPUM/MONITOR, boolean, true}
@@ -902,7 +1093,85 @@
      */
     bool fMonitor;
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc);
-
-    /* Cpuid 1 & 0x80000001:
+    rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
+     * Expose MWAIT extended features to the guest.  For now we expose just MWAIT
+     * break on interrupt feature (bit 1).
+     */
+    bool fMWaitExtensions;
+    rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
+     * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
+     * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
+     * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
+     */
+    bool fNt4LeafLimit;
+    rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
+     * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
+     * probably going to be a temporary hack, so don't depend on this.
+     * The 1st byte of the value is the stepping, the 2nd byte value is the model
+     * number and the 3rd byte value is the family, and the 4th value must be zero.
+     */
+    uint32_t uMaxIntelFamilyModelStep;
+    rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
+    AssertLogRelRCReturn(rc, rc);
+
+    /*
+     * Get the guest CPU data from the database and/or the host.
+     */
+    rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);
+    if (RT_FAILURE(rc))
+        return rc == VERR_CPUM_DB_CPU_NOT_FOUND
+             ? VMSetError(pVM, rc, RT_SRC_POS,
+                          "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)
+             : rc;
+
+    /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
+     * Overrides the guest MSRs.
+     */
+    rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
+
+    /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
+     * Overrides the CPUID leaf values (from the host CPU usually) used for
+     * calculating the guest CPUID leaves.  This can be used to preserve the CPUID
+     * values when moving a VM to a different machine.  Another use is restricting
+     * (or extending) the feature set exposed to the guest. */
+    if (RT_SUCCESS(rc))
+        rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
+
+    if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
+        rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
+                        "Found unsupported configuration node '/CPUM/CPUID/'. "
+                        "Please use IMachine::setCPUIDLeaf() instead.");
+
+    /*
+     * Pre-exploded the CPUID info.
+     */
+    if (RT_SUCCESS(rc))
+        rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
+    if (RT_FAILURE(rc))
+    {
+        RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);
+        pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;
+        RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);
+        pCPUM->GuestInfo.paMsrRangesR3 = NULL;
+        return rc;
+    }
+
+
+    /* ... split this function about here ... */
+
+
+    PCPUMCPUIDLEAF pStdLeaf0 = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
+    AssertLogRelReturn(pStdLeaf0, VERR_CPUM_IPE_2);
+
+
+    /* Cpuid 1:
      * Only report features we can support.
      *
@@ -910,5 +1179,7 @@
      *       options may require adjusting (i.e. stripping what was enabled).
      */
-    pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
+    PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
+    AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
+    pStdFeatureLeaf->uEdx        &= X86_CPUID_FEATURE_EDX_FPU
                                   | X86_CPUID_FEATURE_EDX_VME
                                   | X86_CPUID_FEATURE_EDX_DE
@@ -941,5 +1212,5 @@
                                   //| X86_CPUID_FEATURE_EDX_PBE   - no pending break enabled.
                                   | 0;
-    pCPUM->aGuestCpuIdStd[1].ecx &= 0
+    pStdFeatureLeaf->uEcx        &= 0
                                   | X86_CPUID_FEATURE_ECX_SSE3
                                   /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
@@ -961,14 +1232,14 @@
     if (pCPUM->u8PortableCpuIdLevel > 0)
     {
-        PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
-        PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
-        PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
-        PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
-        PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
-        PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
-
-        Assert(!(pCPUM->aGuestCpuIdStd[1].edx & (  X86_CPUID_FEATURE_EDX_SEP
+        PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
+        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
+        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
+        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
+        PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
+        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
+        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
+        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
+
+        Assert(!(pStdFeatureLeaf->uEdx        & (  X86_CPUID_FEATURE_EDX_SEP
                                                  | X86_CPUID_FEATURE_EDX_PSN
                                                  | X86_CPUID_FEATURE_EDX_DS
@@ -978,5 +1249,5 @@
                                                  | X86_CPUID_FEATURE_EDX_PBE
                                                  )));
-        Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & (  X86_CPUID_FEATURE_ECX_PCLMUL
+        Assert(!(pStdFeatureLeaf->uEcx        & (  X86_CPUID_FEATURE_ECX_PCLMUL
                                                  | X86_CPUID_FEATURE_ECX_DTES64
                                                  | X86_CPUID_FEATURE_ECX_CPLDS
@@ -1008,5 +1279,9 @@
      * ASSUMES that this is ALWAYS the AMD defined feature set if present.
      */
-    pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
+    PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+                                                        UINT32_C(0x80000001), 0);
+    if (pExtFeatureLeaf)
+    {
+        pExtFeatureLeaf->uEdx    &= X86_CPUID_AMD_FEATURE_EDX_FPU
                                   | X86_CPUID_AMD_FEATURE_EDX_VME
                                   | X86_CPUID_AMD_FEATURE_EDX_DE
@@ -1037,5 +1312,5 @@
                                   | X86_CPUID_AMD_FEATURE_EDX_3DNOW
                                   | 0;
-    pCPUM->aGuestCpuIdExt[1].ecx &= 0
+        pExtFeatureLeaf->uEcx    &= 0
                                   //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
                                   //| X86_CPUID_AMD_FEATURE_ECX_CMPL
@@ -1054,97 +1329,39 @@
                                   //| X86_CPUID_AMD_FEATURE_ECX_WDT
                                   | 0;
-    if (pCPUM->u8PortableCpuIdLevel > 0)
-    {
-        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
-        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
-        PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
-        PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
-
-        Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
-                                                 | X86_CPUID_AMD_FEATURE_ECX_SVM
-                                                 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
-                                                 | X86_CPUID_AMD_FEATURE_ECX_CR8L
-                                                 | X86_CPUID_AMD_FEATURE_ECX_ABM
-                                                 | X86_CPUID_AMD_FEATURE_ECX_SSE4A
-                                                 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
-                                                 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
-                                                 | X86_CPUID_AMD_FEATURE_ECX_OSVW
-                                                 | X86_CPUID_AMD_FEATURE_ECX_IBS
-                                                 | X86_CPUID_AMD_FEATURE_ECX_SSE5
-                                                 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
-                                                 | X86_CPUID_AMD_FEATURE_ECX_WDT
-                                                 | UINT32_C(0xffffc000)
-                                                 )));
-        Assert(!(pCPUM->aGuestCpuIdExt[1].edx & (  RT_BIT(10)
-                                                 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
-                                                 | RT_BIT(18)
-                                                 | RT_BIT(19)
-                                                 | RT_BIT(21)
-                                                 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
-                                                 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
-                                                 | RT_BIT(28)
-                                                 )));
-    }
-
-    /*
-     * Apply the Synthetic CPU modifications. (TODO: move this up)
-     */
-    if (pCPUM->fSyntheticCpu)
-    {
-        static const char s_szVendor[13]    = "VirtualBox  ";
-        static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000            "; /* includes null terminator */
-
-        pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
-
-        /* Limit the nr of standard leaves; 5 for monitor/mwait */
-        pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
-
-        /* 0: Vendor */
-        pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
-        pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
-        pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
-
-        /* 1.eax: Version information.  family : model : stepping */
-        pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
-
-        /* Leaves 2 - 4 are Intel only - zero them out */
-        memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
-        memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
-        memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
-
-        /* Leaf 5 = monitor/mwait */
-
-        /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
-        pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
-        /* AMD only - set to zero. */
-        pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
-
-        /* 0x800000001: shared feature bits are set dynamically. */
-        memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
-
-        /* 0x800000002-4: Processor Name String Identifier. */
-        pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
-        pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
-        pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
-        pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
-        pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
-        pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
-        pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
-        pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
-        pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
-        pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
-        pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
-        pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
-
-        /* 0x800000005-7 - reserved -> zero */
-        memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
-        memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
-        memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
-
-        /* 0x800000008: only the max virtual and physical address size. */
-        pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
+        if (pCPUM->u8PortableCpuIdLevel > 0)
+        {
+            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
+            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+            PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+            PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
+
+            Assert(!(pExtFeatureLeaf->uEcx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
+                                              | X86_CPUID_AMD_FEATURE_ECX_SVM
+                                              | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
+                                              | X86_CPUID_AMD_FEATURE_ECX_CR8L
+                                              | X86_CPUID_AMD_FEATURE_ECX_ABM
+                                              | X86_CPUID_AMD_FEATURE_ECX_SSE4A
+                                              | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
+                                              | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
+                                              | X86_CPUID_AMD_FEATURE_ECX_OSVW
+                                              | X86_CPUID_AMD_FEATURE_ECX_IBS
+                                              | X86_CPUID_AMD_FEATURE_ECX_SSE5
+                                              | X86_CPUID_AMD_FEATURE_ECX_SKINIT
+                                              | X86_CPUID_AMD_FEATURE_ECX_WDT
+                                              | UINT32_C(0xffffc000)
+                                              )));
+            Assert(!(pExtFeatureLeaf->uEdx & (  RT_BIT(10)
+                                              | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
+                                              | RT_BIT(18)
+                                              | RT_BIT(19)
+                                              | RT_BIT(21)
+                                              | X86_CPUID_AMD_FEATURE_EDX_AXMMX
+                                              | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
+                                              | RT_BIT(28)
+                                              )));
+        }
     }
 
@@ -1153,12 +1370,11 @@
      * (APIC-ID := 0 and #LogCpus := 0)
      */
-    pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
+    pStdFeatureLeaf->uEbx &= 0x0000ffff;
 #ifdef VBOX_WITH_MULTI_CORE
-    if (    pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
-        &&  pVM->cCpus > 1)
+    if (pVM->cCpus > 1)
     {
         /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
-        pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
-        pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
+        pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);
+        pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
     }
 #endif
@@ -1170,10 +1386,11 @@
      * Safe to expose; restrict the number of calls to 1 for the portable case.
      */
-    if (    pCPUM->u8PortableCpuIdLevel > 0
-        &&  pCPUM->aGuestCpuIdStd[0].eax >= 2
-        && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
-    {
-        LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
-        pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
+    PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);
+    if (   pCPUM->u8PortableCpuIdLevel > 0
+        && pCurLeaf
+        && (pCurLeaf->uEax & 0xff) > 1)
+    {
+        LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
+        pCurLeaf->uEax &= UINT32_C(0xfffffffe);
     }
 
@@ -1185,9 +1402,11 @@
      * Safe to expose
      */
-    if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
-    {
-        pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);
+    if (   !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)
+        && pCurLeaf)
+    {
+        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
         if (pCPUM->u8PortableCpuIdLevel > 0)
-            pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
+            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
     }
 
@@ -1202,16 +1421,29 @@
      * Note: These SMP values are constant regardless of ECX
      */
-    pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
-    pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
+    CPUMCPUIDLEAF NewLeaf;
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
+    if (pCurLeaf)
+    {
+        NewLeaf.uLeaf        = 4;
+        NewLeaf.uSubLeaf     = 0;
+        NewLeaf.fSubLeafMask = 0;
+        NewLeaf.uEax         = 0;
+        NewLeaf.uEbx         = 0;
+        NewLeaf.uEcx         = 0;
+        NewLeaf.uEdx         = 0;
+        NewLeaf.fFlags       = 0;
 #ifdef VBOX_WITH_MULTI_CORE
-    if (   pVM->cCpus > 1
-        && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
-    {
-        AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
-        /* One logical processor with possibly multiple cores. */
-        /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
-        pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
-    }
+        if (   pVM->cCpus > 1
+            && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+        {
+            AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
+            /* One logical processor with possibly multiple cores. */
+            /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
+            NewLeaf.uEax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
+        }
 #endif
+        rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+        AssertLogRelRCReturn(rc, rc);
+    }
 
     /* Cpuid 5:     Monitor/mwait Leaf
@@ -1224,32 +1456,30 @@
      * Safe to expose
      */
-    if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
-        pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
-
-    pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
-    /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
-     * Expose MWAIT extended features to the guest.  For now we expose
-     * just MWAIT break on interrupt feature (bit 1).
-     */
-    bool fMWaitExtensions;
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
-    if (fMWaitExtensions)
-    {
-        pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
-        /** @todo: for now we just expose host's MWAIT C-states, although conceptually
-           it shall be part of our power management virtualization model */
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
+    if (pCurLeaf)
+    {
+        if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
+            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
+
+        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+        if (fMWaitExtensions)
+        {
+            pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
+            /** @todo: for now we just expose host's MWAIT C-states, although conceptually
+               it shall be part of our power management virtualization model */
 #if 0
-        /* MWAIT sub C-states */
-        pCPUM->aGuestCpuIdStd[5].edx =
-                (0 << 0)  /* 0 in C0 */ |
-                (2 << 4)  /* 2 in C1 */ |
-                (2 << 8)  /* 2 in C2 */ |
-                (2 << 12) /* 2 in C3 */ |
-                (0 << 16) /* 0 in C4 */
-                ;
+            /* MWAIT sub C-states */
+            pCurLeaf->uEdx =
+                    (0 << 0)  /* 0 in C0 */ |
+                    (2 << 4)  /* 2 in C1 */ |
+                    (2 << 8)  /* 2 in C2 */ |
+                    (2 << 12) /* 2 in C3 */ |
+                    (0 << 16) /* 0 in C4 */
+                    ;
 #endif
-    }
-    else
-        pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
+        }
+        else
+            pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
+    }
 
     /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
@@ -1270,14 +1500,15 @@
      * VIA:               Reserved
      */
-    if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
-    {
-        Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
-
-        pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
-
-        if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);
+    if (pCurLeaf)
+    {
+        Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
+
+        pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
+
+        if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
         {
             /* Only expose the TSC invariant capability bit to the guest. */
-            pCPUM->aGuestCpuIdExt[7].edx    &= 0
+            pCurLeaf->uEdx                  &= 0
                                             //| X86_CPUID_AMD_ADVPOWER_EDX_TS
                                             //| X86_CPUID_AMD_ADVPOWER_EDX_FID
@@ -1300,5 +1531,5 @@
         }
         else
-            pCPUM->aGuestCpuIdExt[7].edx    = 0;
+            pCurLeaf->uEdx = 0;
     }
 
@@ -1312,35 +1543,28 @@
      *                    EBX, ECX, EDX - reserved
      */
-    if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);
+    if (pCurLeaf)
     {
         /* Only expose the virtual and physical address sizes to the guest. */
-        pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
-        pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
+        pCurLeaf->uEax &= UINT32_C(0x0000ffff);
+        pCurLeaf->uEbx = pCurLeaf->uEdx = 0;  /* reserved */
         /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
          * NC (0-7) Number of cores; 0 equals 1 core */
-        pCPUM->aGuestCpuIdExt[8].ecx = 0;
+        pCurLeaf->uEcx = 0;
 #ifdef VBOX_WITH_MULTI_CORE
         if (    pVM->cCpus > 1
-            &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
+            &&  pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
         {
             /* Legacy method to determine the number of cores. */
-            pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
-            pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
+            pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
+            if (pExtFeatureLeaf)
+                pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
         }
 #endif
     }
 
-    /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
-     * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
-     * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
-     * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
-     */
-    bool fNt4LeafLimit;
-    rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
-    if (fNt4LeafLimit && pCPUM->aGuestCpuIdStd[0].eax > 3)
-        pCPUM->aGuestCpuIdStd[0].eax = 3;
-
-    /*
-     * Limit it the number of entries and fill the remaining with the defaults.
+
+    /*
+     * Limit it the number of entries, zapping the remainder.
      *
      * The limits are masking off stuff about power saving and similar, this
@@ -1348,17 +1572,29 @@
      * info too in these leaves (like words about having a constant TSC).
      */
-    if (pCPUM->aGuestCpuIdStd[0].eax > 5)
-        pCPUM->aGuestCpuIdStd[0].eax = 5;
-    for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
-        pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
-
-    if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
-        pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
-    for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
-           ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
-           : 0;
-         i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
-         i++)
-        pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
+    if (pCurLeaf)
+    {
+        if (pCurLeaf->uEax > 5)
+        {
+            pCurLeaf->uEax = 5;
+            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+                                   UINT32_C(0x00000006), UINT32_C(0x000fffff));
+        }
+
+        /* NT4 hack, no zapping of extra leaves here. */
+        if (fNt4LeafLimit && pCurLeaf->uEax > 3)
+            pCurLeaf->uEax = 3;
+    }
+
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);
+    if (pCurLeaf)
+    {
+        if (pCurLeaf->uEax > UINT32_C(0x80000008))
+        {
+            pCurLeaf->uEax = UINT32_C(0x80000008);
+            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+                                   UINT32_C(0x80000008), UINT32_C(0x800fffff));
+        }
+    }
 
     /*
@@ -1370,17 +1606,23 @@
      * temperature/hz/++ stuff, include it as well (static).
      */
-    if (    pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
-        &&  pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
-    {
-        pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
-        pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
-        for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
-             i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
-             i++)
-            pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
-    }
-    else
-        for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
-            pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
+    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);
+    if (pCurLeaf)
+    {
+        if (   pCurLeaf->uEax >= UINT32_C(0xc0000000)
+            && pCurLeaf->uEax <= UINT32_C(0xc0000004))
+        {
+            pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));
+            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+                                   UINT32_C(0xc0000002), UINT32_C(0xc00fffff));
+
+            pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
+                                          UINT32_C(0xc0000001), 0);
+            if (pCurLeaf)
+                pCurLeaf->uEdx = 0; /* all features hidden */
+        }
+        else
+            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
+                                   UINT32_C(0xc0000000), UINT32_C(0xc00fffff));
+    }
 
     /*
@@ -1391,32 +1633,36 @@
      * Currently we do not support any hypervisor-specific interface.
      */
-    pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
-    pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
-                                   = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256;   /* 'VBox' */
-    pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e;                            /* 'none' */
-    pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
-                                   = pCPUM->aGuestCpuIdHyper[1].edx = 0;    /* Reserved */
+    NewLeaf.uLeaf        = UINT32_C(0x40000000);
+    NewLeaf.uSubLeaf     = 0;
+    NewLeaf.fSubLeafMask = 0;
+    NewLeaf.uEax         = UINT32_C(0x40000001);
+    NewLeaf.uEbx         = 0x786f4256 /* 'VBox' */;
+    NewLeaf.uEcx         = 0x786f4256 /* 'VBox' */;
+    NewLeaf.uEdx         = 0x786f4256 /* 'VBox' */;
+    NewLeaf.fFlags       = 0;
+    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+    AssertLogRelRCReturn(rc, rc);
+
+    NewLeaf.uLeaf        = UINT32_C(0x40000001);
+    NewLeaf.uEax         = 0x656e6f6e;                            /* 'none' */
+    NewLeaf.uEbx         = 0;
+    NewLeaf.uEcx         = 0;
+    NewLeaf.uEdx         = 0;
+    NewLeaf.fFlags       = 0;
+    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
+    AssertLogRelRCReturn(rc, rc);
 
     /*
      * Mini CPU selection support for making Mac OS X happy.
      */
-    if (pCPUM->enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
-    {
-        /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
-         * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
-         * probably going to be a temporary hack, so don't depend on this.
-         * The 1st byte of the value is the stepping, the 2nd byte value is the model
-         * number and the 3rd byte value is the family, and the 4th value must be zero.
-         */
-        uint32_t uMaxIntelFamilyModelStep;
-        rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
-        AssertRCReturn(rc, rc);
-        uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pCPUM->aGuestCpuIdStd[1].eax),
-                                                                ASMGetCpuModelIntel(pCPUM->aGuestCpuIdStd[1].eax),
-                                                                ASMGetCpuFamily(pCPUM->aGuestCpuIdStd[1].eax),
+    if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
+    {
+        uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
+                                                                ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
+                                                                ASMGetCpuFamily(pStdFeatureLeaf->uEax),
                                                                 0);
         if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
         {
-            uint32_t uNew = pCPUM->aGuestCpuIdStd[1].eax & UINT32_C(0xf0003000);
+            uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
             uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
             uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
@@ -1426,26 +1672,36 @@
                 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
             LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
-                    pCPUM->aGuestCpuIdStd[1].eax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
-            pCPUM->aGuestCpuIdStd[1].eax = uNew;
+                    pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
+            pStdFeatureLeaf->uEax = uNew;
         }
     }
 
-    /*
-     * Load CPUID overrides from configuration.
-     * Note: Kind of redundant now, but allows unchanged overrides
-     */
-    /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
-     * Overrides the CPUID leaf values. */
-    PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
-    rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pOverrideCfg);
-    AssertRCReturn(rc, rc);
-    rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pOverrideCfg);
-    AssertRCReturn(rc, rc);
-    rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
-    AssertRCReturn(rc, rc);
-
-    /*
-     * Check if PAE was explicitely enabled by the user.
-     */
+
+    /*
+     * Move the MSR and CPUID arrays over on the hypervisor heap, and explode
+     * guest CPU features again.
+     */
+    void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;
+    int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);
+    RTMemFree(pvFree);
+
+    pvFree = pCPUM->GuestInfo.paMsrRangesR3;
+    int rc2 = MMHyperDupMem(pVM, pvFree,
+                            sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,
+                            MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);
+    RTMemFree(pvFree);
+    AssertLogRelRCReturn(rc1, rc1);
+    AssertLogRelRCReturn(rc2, rc2);
+
+    pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);
+    pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);
+    cpumR3MsrRegStats(pVM);
+
+    /*
+     * Some more configuration that we're applying at the end of everything
+     * via the CPUMSetGuestCpuIdFeature API.
+     */
+
+    /* Check if PAE was explicitely enabled by the user. */
     bool fEnable;
     rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false);      AssertRCReturn(rc, rc);
@@ -1453,16 +1709,10 @@
         CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
 
-    /*
-     * We don't normally enable NX for raw-mode, so give the user a chance to
-     * force it on.
-     */
+    /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
     rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false);                 AssertRCReturn(rc, rc);
     if (fEnable)
         CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
 
-    /*
-     * We don't enable the Hypervisor Present bit by default, but it may
-     * be needed by some guests.
-     */
+    /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */
     rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false);                AssertRCReturn(rc, rc);
     if (fEnable)
@@ -1488,4 +1738,7 @@
 {
     LogFlow(("CPUMR3Relocate\n"));
+
+    pVM->cpum.s.GuestInfo.paMsrRangesRC   = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
+    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
 
     /* Recheck the guest DRx values in raw-mode. */
@@ -1552,7 +1805,10 @@
  * Used by CPUMR3Reset and CPU hot plugging.
  *
- * @param   pVCpu               Pointer to the VMCPU.
- */
-VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
+ * @param   pVM         Pointer to the cross context VM structure.
+ * @param   pVCpu       Pointer to the cross context virtual CPU structure of
+ *                      the CPU that is being reset.  This may differ from the
+ *                      current EMT.
+ */
+VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
 {
     /** @todo anything different for VCPU > 0? */
@@ -1635,4 +1891,7 @@
                                                         supports all bits, since a zero value here should be read as 0xffbf. */
 
+    /*
+     * MSRs.
+     */
     /* Init PAT MSR */
     pCtx->msrPAT                    = UINT64_C(0x0007040600070406); /** @todo correct? */
@@ -1642,8 +1901,24 @@
     Assert(!pCtx->msrEFER);
 
+    /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
+       is supposed to be here, just trying provide useful/sensible values. */
+    PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
+    if (pRange)
+    {
+        pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+                                               | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
+                                               | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
+                                               | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
+        pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
+                            | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+        pRange->fWrGpMask  &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
+    }
+
+    /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
+
     /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
      *        called from each EMT while we're getting called by CPUMR3Reset()
      *        iteratively on the same thread. Fix later.  */
-#if 0
+#if 0 /** @todo r=bird: This we will do in TM, not here. */
     /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
     CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
@@ -1673,5 +1948,5 @@
     for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
-        CPUMR3ResetCpu(&pVM->aCpus[i]);
+        CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
 
 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
@@ -1725,4 +2000,59 @@
     SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
     SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
+}
+
+
+static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+    uint32_t cCpuIds;
+    int rc = SSMR3GetU32(pSSM, &cCpuIds);
+    if (RT_SUCCESS(rc))
+    {
+        if (cCpuIds < 64)
+        {
+            for (uint32_t i = 0; i < cCpuIds; i++)
+            {
+                CPUMCPUID CpuId;
+                rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
+                if (RT_FAILURE(rc))
+                    break;
+
+                CPUMCPUIDLEAF NewLeaf;
+                NewLeaf.uLeaf           = uBase + i;
+                NewLeaf.uSubLeaf        = 0;
+                NewLeaf.fSubLeafMask    = 0;
+                NewLeaf.uEax            = CpuId.eax;
+                NewLeaf.uEbx            = CpuId.ebx;
+                NewLeaf.uEcx            = CpuId.ecx;
+                NewLeaf.uEdx            = CpuId.edx;
+                NewLeaf.fFlags          = 0;
+                rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf);
+            }
+        }
+        else
+            rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
+    }
+    if (RT_FAILURE(rc))
+    {
+        RTMemFree(*ppaLeaves);
+        *ppaLeaves = NULL;
+        *pcLeaves = 0;
+    }
+    return rc;
+}
+
+
+static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+    *ppaLeaves = NULL;
+    *pcLeaves = 0;
+
+    int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
+    if (RT_SUCCESS(rc))
+        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
+    if (RT_SUCCESS(rc))
+        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
+
+    return rc;
 }
 
@@ -1809,5 +2139,4 @@
             && !(aHostRaw##set [1].reg & bit) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
         { \
@@ -1823,5 +2152,4 @@
             && !(aHostRaw##set [1].reg & bit) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
             LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
@@ -1832,5 +2160,4 @@
             && !(aHostRaw##set [1].reg & bit) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
             LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
@@ -1845,5 +2172,4 @@
             && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
         { \
@@ -1860,5 +2186,4 @@
             && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
             LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
@@ -1870,5 +2195,4 @@
             && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
             && !(aHostOverride##set [1].reg & bit) \
-            && !(aGuestOverride##set [1].reg & bit) \
            ) \
             LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
@@ -1885,5 +2209,4 @@
                  : aHostRawStd[1].reg      & (StdBit)) \
             && !(aHostOverrideExt[1].reg   & (ExtBit)) \
-            && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
            ) \
         { \
@@ -1901,5 +2224,4 @@
                  : aHostRawStd[1].reg      & (StdBit)) \
             && !(aHostOverrideExt[1].reg   & (ExtBit)) \
-            && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
            ) \
             LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
@@ -1912,5 +2234,4 @@
                  : aHostRawStd[1].reg      & (StdBit)) \
             && !(aHostOverrideExt[1].reg   & (ExtBit)) \
-            && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
            ) \
             LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
@@ -1921,24 +2242,10 @@
      * Load them into stack buffers first.
      */
-    CPUMCPUID   aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
-    uint32_t    cGuestCpuIdStd;
-    int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
-    if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
-        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
-    SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
-
-    CPUMCPUID   aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
-    uint32_t    cGuestCpuIdExt;
-    rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
-    if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
-        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
-    SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
-
-    CPUMCPUID   aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
-    uint32_t    cGuestCpuIdCentaur;
-    rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
-    if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
-        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
-    SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
+    PCPUMCPUIDLEAF paLeaves;
+    uint32_t       cLeaves;
+    int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);
+    AssertRCReturn(rc, rc);
+
+    /** @todo we'll be leaking paLeaves on error return... */
 
     CPUMCPUID   GuestCpuIdDef;
@@ -1951,5 +2258,8 @@
     if (cRawStd > RT_ELEMENTS(aRawStd))
         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
-    SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
+    rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
+    AssertRCReturn(rc, rc);
+    for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
+        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
 
     CPUMCPUID   aRawExt[32];
@@ -1960,23 +2270,4 @@
     rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
     AssertRCReturn(rc, rc);
-
-    /*
-     * Note that we support restoring less than the current amount of standard
-     * leaves because we've been allowed more is newer version of VBox.
-     *
-     * So, pad new entries with the default.
-     */
-    for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
-        aGuestCpuIdStd[i] = GuestCpuIdDef;
-
-    for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
-        aGuestCpuIdExt[i] = GuestCpuIdDef;
-
-    for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
-        aGuestCpuIdCentaur[i] = GuestCpuIdDef;
-
-    for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
-        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
-
     for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
         ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
@@ -1999,14 +2290,5 @@
      * Note! We currently only need the feature leaves, so skip rest.
      */
-    PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
-    CPUMCPUID   aGuestOverrideStd[2];
-    memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
-    cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
-
-    CPUMCPUID   aGuestOverrideExt[2];
-    memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
-    cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
-
-    pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
+    PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
     CPUMCPUID   aHostOverrideStd[2];
     memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
@@ -2259,4 +2541,8 @@
      *      "EMU?" - Can this be emulated?
      */
+    CPUMCPUID aGuestCpuIdStd[2];
+    RT_ZERO(aGuestCpuIdStd);
+    cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
+
     /* CPUID(1).ecx */
     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
@@ -2328,6 +2614,7 @@
 
     /* CPUID(0x80000000). */
-    if (    aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
-        &&  aGuestCpuIdExt[0].eax <  UINT32_C(0x8000007f))
+    CPUMCPUID aGuestCpuIdExt[2];
+    RT_ZERO(aGuestCpuIdExt);
+    if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
     {
         /** @todo deal with no 0x80000001 on the host. */
@@ -2407,8 +2694,12 @@
      * We're good, commit the CPU ID leaves.
      */
-    memcpy(&pVM->cpum.s.aGuestCpuIdStd[0],     &aGuestCpuIdStd[0],     sizeof(aGuestCpuIdStd));
-    memcpy(&pVM->cpum.s.aGuestCpuIdExt[0],     &aGuestCpuIdExt[0],     sizeof(aGuestCpuIdExt));
-    memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
-    pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
+    MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
+    pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
+    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
+    pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;
+    rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
+    RTMemFree(paLeaves);
+    AssertLogRelRCReturn(rc, rc);
+
 
 #undef CPUID_CHECK_RET
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 49893)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 49893)
@@ -0,0 +1,1293 @@
+/* $Id$ */
+/** @file
+ * CPUM - CPU ID part.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+*   Header Files                                                               *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/ctype.h>
+#include <iprt/mem.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+/**
+ * The intel pentium family.
+ */
+static const CPUMMICROARCH g_aenmIntelFamily06[] =
+{
+    /* [ 0(0x00)] = */ kCpumMicroarch_Intel_P6,           /* Pentium Pro A-step (says sandpile.org). */
+    /* [ 1(0x01)] = */ kCpumMicroarch_Intel_P6,           /* Pentium Pro */
+    /* [ 2(0x02)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [ 3(0x03)] = */ kCpumMicroarch_Intel_P6_II,        /* PII Klamath */
+    /* [ 4(0x04)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [ 5(0x05)] = */ kCpumMicroarch_Intel_P6_II,        /* PII Deschutes */
+    /* [ 6(0x06)] = */ kCpumMicroarch_Intel_P6_II,        /* Celeron Mendocino. */
+    /* [ 7(0x07)] = */ kCpumMicroarch_Intel_P6_III,       /* PIII Katmai. */
+    /* [ 8(0x08)] = */ kCpumMicroarch_Intel_P6_III,       /* PIII Coppermine (includes Celeron). */
+    /* [ 9(0x09)] = */ kCpumMicroarch_Intel_P6_M_Banias,  /* Pentium/Celeron M Banias. */
+    /* [10(0x0a)] = */ kCpumMicroarch_Intel_P6_III,       /* PIII Xeon */
+    /* [11(0x0b)] = */ kCpumMicroarch_Intel_P6_III,       /* PIII Tualatin (includes Celeron). */
+    /* [12(0x0c)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [13(0x0d)] = */ kCpumMicroarch_Intel_P6_M_Dothan,  /* Pentium/Celeron M Dothan. */
+    /* [14(0x0e)] = */ kCpumMicroarch_Intel_Core_Yonah,   /* Core Yonah (Enhanced Pentium M). */
+    /* [15(0x0f)] = */ kCpumMicroarch_Intel_Core2_Merom,  /* Merom */
+    /* [16(0x10)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [17(0x11)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [18(0x12)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [19(0x13)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [20(0x14)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [21(0x15)] = */ kCpumMicroarch_Intel_P6_M_Dothan,  /* Tolapai - System-on-a-chip. */
+    /* [22(0x16)] = */ kCpumMicroarch_Intel_Core2_Merom,
+    /* [23(0x17)] = */ kCpumMicroarch_Intel_Core2_Penryn,
+    /* [24(0x18)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [25(0x19)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem,
+    /* [27(0x1b)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [28(0x1c)] = */ kCpumMicroarch_Intel_Atom_Bonnell, /* Diamonville, Pineview, */
+    /* [29(0x1d)] = */ kCpumMicroarch_Intel_Core2_Penryn,
+    /* [30(0x1e)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Clarksfield, Lynnfield, Jasper Forest. */
+    /* [31(0x1f)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Only listed by sandpile.org.  2 cores ABD/HVD, whatever that means. */
+    /* [32(0x20)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [33(0x21)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [34(0x22)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [35(0x23)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [36(0x24)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [37(0x25)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Arrandale, Clarksdale. */
+    /* [38(0x26)] = */ kCpumMicroarch_Intel_Atom_Lincroft,
+    /* [39(0x27)] = */ kCpumMicroarch_Intel_Atom_Saltwell,
+    /* [40(0x28)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [41(0x29)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [42(0x2a)] = */ kCpumMicroarch_Intel_Core7_SandyBridge,
+    /* [43(0x2b)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [44(0x2c)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Gulftown, Westmere-EP. */
+    /* [45(0x2d)] = */ kCpumMicroarch_Intel_Core7_SandyBridge, /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP. */
+    /* [46(0x2e)] = */ kCpumMicroarch_Intel_Core7_Nehalem,  /* Beckton (Xeon). */
+    /* [47(0x2f)] = */ kCpumMicroarch_Intel_Core7_Westmere, /* Westmere-EX. */
+    /* [48(0x30)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [49(0x31)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [50(0x32)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [51(0x33)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [52(0x34)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [53(0x35)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* ?? */
+    /* [54(0x36)] = */ kCpumMicroarch_Intel_Atom_Saltwell, /* Cedarview, ++ */
+    /* [55(0x37)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+    /* [56(0x38)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [57(0x39)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [58(0x3a)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+    /* [59(0x3b)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [60(0x3c)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+    /* [61(0x3d)] = */ kCpumMicroarch_Intel_Core7_Broadwell,
+    /* [62(0x3e)] = */ kCpumMicroarch_Intel_Core7_IvyBridge,
+    /* [63(0x3f)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+    /* [64(0x40)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [65(0x41)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [66(0x42)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [67(0x43)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [68(0x44)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [69(0x45)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+    /* [70(0x46)] = */ kCpumMicroarch_Intel_Core7_Haswell,
+    /* [71(0x47)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [72(0x48)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [73(0x49)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [74(0x4a)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+    /* [75(0x4b)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [76(0x4c)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [77(0x4d)] = */ kCpumMicroarch_Intel_Atom_Silvermont,
+    /* [78(0x4e)] = */ kCpumMicroarch_Intel_Unknown,
+    /* [79(0x4f)] = */ kCpumMicroarch_Intel_Unknown,
+};
+
+
+
+/**
+ * Figures out the (sub-)micro architecture given a bit of CPUID info.
+ *
+ * @returns Micro architecture.
+ * @param   enmVendor           The CPU vendor .
+ * @param   bFamily             The CPU family.
+ * @param   bModel              The CPU model.
+ * @param   bStepping           The CPU stepping.
+ */
+VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
+                                                         uint8_t bModel, uint8_t bStepping)
+{
+    if (enmVendor == CPUMCPUVENDOR_AMD)
+    {
+        switch (bFamily)
+        {
+            case 0x02:  return kCpumMicroarch_AMD_Am286; /* Not really kosher... */
+            case 0x03:  return kCpumMicroarch_AMD_Am386;
+            case 0x23:  return kCpumMicroarch_AMD_Am386; /* SX*/
+            case 0x04:  return bModel < 14 ? kCpumMicroarch_AMD_Am486 : kCpumMicroarch_AMD_Am486Enh;
+            case 0x05:  return bModel <  6 ? kCpumMicroarch_AMD_K5    : kCpumMicroarch_AMD_K6; /* Genode LX is 0x0a, lump it with K6. */
+            case 0x06:
+                switch (bModel)
+                {
+                    case  0: kCpumMicroarch_AMD_K7_Palomino;
+                    case  1: kCpumMicroarch_AMD_K7_Palomino;
+                    case  2: kCpumMicroarch_AMD_K7_Palomino;
+                    case  3: kCpumMicroarch_AMD_K7_Spitfire;
+                    case  4: kCpumMicroarch_AMD_K7_Thunderbird;
+                    case  6: kCpumMicroarch_AMD_K7_Palomino;
+                    case  7: kCpumMicroarch_AMD_K7_Morgan;
+                    case  8: kCpumMicroarch_AMD_K7_Thoroughbred;
+                    case 10: kCpumMicroarch_AMD_K7_Barton; /* Thorton too. */
+                }
+                return kCpumMicroarch_AMD_K7_Unknown;
+            case 0x0f:
+                /*
+                 * This family is a friggin mess. Trying my best to make some
+                 * sense out of it. Too much happened in the 0x0f family to
+                 * lump it all together as K8 (130nm->90nm->65nm, AMD-V, ++).
+                 *
+                 * Emperical CPUID.01h.EAX evidence from revision guides, wikipedia,
+                 * cpu-world.com, and other places:
+                 *  - 130nm:
+                 *     - ClawHammer:    F7A/SH-CG, F5A/-CG, F4A/-CG, F50/-B0, F48/-C0, F58/-C0,
+                 *     - SledgeHammer:  F50/SH-B0, F48/-C0, F58/-C0, F4A/-CG, F5A/-CG, F7A/-CG, F51/-B3
+                 *     - Newcastle:     FC0/DH-CG (errum #180: FE0/DH-CG), FF0/DH-CG
+                 *     - Dublin:        FC0/-CG, FF0/-CG, F82/CH-CG, F4A/-CG, F48/SH-C0,
+                 *     - Odessa:        FC0/DH-CG (errum #180: FE0/DH-CG)
+                 *     - Paris:         FF0/DH-CG, FC0/DH-CG (errum #180: FE0/DH-CG),
+                 *  - 90nm:
+                 *     - Winchester:    10FF0/DH-D0, 20FF0/DH-E3.
+                 *     - Oakville:      10FC0/DH-D0.
+                 *     - Georgetown:    10FC0/DH-D0.
+                 *     - Sonora:        10FC0/DH-D0.
+                 *     - Venus:         20F71/SH-E4
+                 *     - Troy:          20F51/SH-E4
+                 *     - Athens:        20F51/SH-E4
+                 *     - San Diego:     20F71/SH-E4.
+                 *     - Lancaster:     20F42/SH-E5
+                 *     - Newark:        20F42/SH-E5.
+                 *     - Albany:        20FC2/DH-E6.
+                 *     - Roma:          20FC2/DH-E6.
+                 *     - Venice:        20FF0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6.
+                 *     - Palermo:       10FC0/DH-D0, 20FF0/DH-E3, 20FC0/DH-E3, 20FC2/DH-E6, 20FF2/DH-E6
+                 *  - 90nm introducing Dual core:
+                 *     - Denmark:       20F30/JH-E1, 20F32/JH-E6
+                 *     - Italy:         20F10/JH-E1, 20F12/JH-E6
+                 *     - Egypt:         20F10/JH-E1, 20F12/JH-E6
+                 *     - Toledo:        20F32/JH-E6, 30F72/DH-E6 (single code variant).
+                 *     - Manchester:    20FB1/BH-E4, 30FF2/BH-E4.
+                 *  - 90nm 2nd gen opteron ++, AMD-V introduced (might be missing in some cheeper models):
+                 *     - Santa Ana:     40F32/JH-F2, /-F3
+                 *     - Santa Rosa:    40F12/JH-F2, 40F13/JH-F3
+                 *     - Windsor:       40F32/JH-F2, 40F33/JH-F3, C0F13/JH-F3, 40FB2/BH-F2, ??20FB1/BH-E4??.
+                 *     - Manila:        50FF2/DH-F2, 40FF2/DH-F2
+                 *     - Orleans:       40FF2/DH-F2, 50FF2/DH-F2, 50FF3/DH-F3.
+                 *     - Keene:         40FC2/DH-F2.
+                 *     - Richmond:      40FC2/DH-F2
+                 *     - Taylor:        40F82/BH-F2
+                 *     - Trinidad:      40F82/BH-F2
+                 *
+                 *  - 65nm:
+                 *     - Brisbane:      60FB1/BH-G1, 60FB2/BH-G2.
+                 *     - Tyler:         60F81/BH-G1, 60F82/BH-G2.
+                 *     - Sparta:        70FF1/DH-G1, 70FF2/DH-G2.
+                 *     - Lima:          70FF1/DH-G1, 70FF2/DH-G2.
+                 *     - Sherman:       /-G1, 70FC2/DH-G2.
+                 *     - Huron:         70FF2/DH-G2.
+                 */
+                if (bModel < 0x10)
+                    return kCpumMicroarch_AMD_K8_130nm;
+                if (bModel >= 0x60 && bModel < 0x80)
+                    return kCpumMicroarch_AMD_K8_65nm;
+                if (bModel >= 0x40)
+                    return kCpumMicroarch_AMD_K8_90nm_AMDV;
+                switch (bModel)
+                {
+                    case 0x21:
+                    case 0x23:
+                    case 0x2b:
+                    case 0x2f:
+                    case 0x37:
+                    case 0x3f:
+                        return kCpumMicroarch_AMD_K8_90nm_DualCore;
+                }
+                return kCpumMicroarch_AMD_K8_90nm;
+            case 0x10:
+                return kCpumMicroarch_AMD_K10;
+            case 0x11:
+                return kCpumMicroarch_AMD_K10_Lion;
+            case 0x12:
+                return kCpumMicroarch_AMD_K10_Llano;
+            case 0x14:
+                return kCpumMicroarch_AMD_Bobcat;
+            case 0x15:
+                switch (bModel)
+                {
+                    case 0x00:  return kCpumMicroarch_AMD_15h_Bulldozer;    /* Any? prerelease? */
+                    case 0x01:  return kCpumMicroarch_AMD_15h_Bulldozer;    /* Opteron 4200, FX-81xx. */
+                    case 0x02:  return kCpumMicroarch_AMD_15h_Piledriver;   /* Opteron 4300, FX-83xx. */
+                    case 0x10:  return kCpumMicroarch_AMD_15h_Piledriver;   /* A10-5800K for e.g. */
+                    case 0x11:  /* ?? */
+                    case 0x12:  /* ?? */
+                    case 0x13:  return kCpumMicroarch_AMD_15h_Piledriver;   /* A10-6800K for e.g. */
+                }
+                return kCpumMicroarch_AMD_15h_Unknown;
+            case 0x16:
+                return kCpumMicroarch_AMD_Jaguar;
+
+        }
+        return kCpumMicroarch_AMD_Unknown;
+    }
+
+    if (enmVendor == CPUMCPUVENDOR_INTEL)
+    {
+        switch (bFamily)
+        {
+            case 3:
+                return kCpumMicroarch_Intel_80386;
+            case 4:
+                return kCpumMicroarch_Intel_80486;
+            case 5:
+                return kCpumMicroarch_Intel_P5;
+            case 6:
+                if (bModel < RT_ELEMENTS(g_aenmIntelFamily06))
+                    return g_aenmIntelFamily06[bModel];
+                return kCpumMicroarch_Intel_Atom_Unknown;
+            case 15:
+                switch (bModel)
+                {
+                    case 0:     return kCpumMicroarch_Intel_NB_Willamette;
+                    case 1:     return kCpumMicroarch_Intel_NB_Willamette;
+                    case 2:     return kCpumMicroarch_Intel_NB_Northwood;
+                    case 3:     return kCpumMicroarch_Intel_NB_Prescott;
+                    case 4:     return kCpumMicroarch_Intel_NB_Prescott2M; /* ?? */
+                    case 5:     return kCpumMicroarch_Intel_NB_Unknown; /*??*/
+                    case 6:     return kCpumMicroarch_Intel_NB_CedarMill;
+                    case 7:     return kCpumMicroarch_Intel_NB_Gallatin;
+                    default:    return kCpumMicroarch_Intel_NB_Unknown;
+                }
+                break;
+            /* The following are not kosher but kind of follow intuitively from 6, 5 & 4. */
+            case 1:
+                return kCpumMicroarch_Intel_8086;
+            case 2:
+                return kCpumMicroarch_Intel_80286;
+        }
+        return kCpumMicroarch_Intel_Unknown;
+    }
+
+    if (enmVendor == CPUMCPUVENDOR_VIA)
+    {
+        switch (bFamily)
+        {
+            case 5:
+                switch (bModel)
+                {
+                    case 1: return kCpumMicroarch_Centaur_C6;
+                    case 4: return kCpumMicroarch_Centaur_C6;
+                    case 8: return kCpumMicroarch_Centaur_C2;
+                    case 9: return kCpumMicroarch_Centaur_C3;
+                }
+                break;
+
+            case 6:
+                switch (bModel)
+                {
+                    case  5: return kCpumMicroarch_VIA_C3_M2;
+                    case  6: return kCpumMicroarch_VIA_C3_C5A;
+                    case  7: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5B : kCpumMicroarch_VIA_C3_C5C;
+                    case  8: return kCpumMicroarch_VIA_C3_C5N;
+                    case  9: return bStepping < 8 ? kCpumMicroarch_VIA_C3_C5XL : kCpumMicroarch_VIA_C3_C5P;
+                    case 10: return kCpumMicroarch_VIA_C7_C5J;
+                    case 15: return kCpumMicroarch_VIA_Isaiah;
+                }
+                break;
+        }
+        return kCpumMicroarch_VIA_Unknown;
+    }
+
+    if (enmVendor == CPUMCPUVENDOR_CYRIX)
+    {
+        switch (bFamily)
+        {
+            case 4:
+                switch (bModel)
+                {
+                    case 9: return kCpumMicroarch_Cyrix_5x86;
+                }
+                break;
+
+            case 5:
+                switch (bModel)
+                {
+                    case 2: return kCpumMicroarch_Cyrix_M1;
+                    case 4: return kCpumMicroarch_Cyrix_MediaGX;
+                    case 5: return kCpumMicroarch_Cyrix_MediaGXm;
+                }
+                break;
+
+            case 6:
+                switch (bModel)
+                {
+                    case 0: return kCpumMicroarch_Cyrix_M2;
+                }
+                break;
+
+        }
+        return kCpumMicroarch_Cyrix_Unknown;
+    }
+
+    return kCpumMicroarch_Unknown;
+}
+
+
+/**
+ * Translates a microarchitecture enum value to the corresponding string
+ * constant.
+ *
+ * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
+ *          NULL if the value is invalid.
+ *
+ * @param   enmMicroarch    The enum value to convert.
+ */
+VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch)
+{
+    switch (enmMicroarch)
+    {
+#define CASE_RET_STR(enmValue)  case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
+        CASE_RET_STR(kCpumMicroarch_Intel_8086);
+        CASE_RET_STR(kCpumMicroarch_Intel_80186);
+        CASE_RET_STR(kCpumMicroarch_Intel_80286);
+        CASE_RET_STR(kCpumMicroarch_Intel_80386);
+        CASE_RET_STR(kCpumMicroarch_Intel_80486);
+        CASE_RET_STR(kCpumMicroarch_Intel_P5);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_P6);
+        CASE_RET_STR(kCpumMicroarch_Intel_P6_II);
+        CASE_RET_STR(kCpumMicroarch_Intel_P6_III);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Banias);
+        CASE_RET_STR(kCpumMicroarch_Intel_P6_M_Dothan);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core_Yonah);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_Core2_Merom);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core2_Penryn);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Nehalem);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Westmere);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_SandyBridge);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_IvyBridge);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Haswell);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Broadwell);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Skylake);
+        CASE_RET_STR(kCpumMicroarch_Intel_Core7_Cannonlake);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Bonnell);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Lincroft);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Saltwell);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Silvermont);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Airmount);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Goldmont);
+        CASE_RET_STR(kCpumMicroarch_Intel_Atom_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Willamette);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Northwood);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Prescott2M);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_CedarMill);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Gallatin);
+        CASE_RET_STR(kCpumMicroarch_Intel_NB_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_Intel_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_Am286);
+        CASE_RET_STR(kCpumMicroarch_AMD_Am386);
+        CASE_RET_STR(kCpumMicroarch_AMD_Am486);
+        CASE_RET_STR(kCpumMicroarch_AMD_Am486Enh);
+        CASE_RET_STR(kCpumMicroarch_AMD_K5);
+        CASE_RET_STR(kCpumMicroarch_AMD_K6);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Palomino);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Spitfire);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Thunderbird);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Morgan);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Thoroughbred);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Barton);
+        CASE_RET_STR(kCpumMicroarch_AMD_K7_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_K8_130nm);
+        CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm);
+        CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_DualCore);
+        CASE_RET_STR(kCpumMicroarch_AMD_K8_90nm_AMDV);
+        CASE_RET_STR(kCpumMicroarch_AMD_K8_65nm);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_K10);
+        CASE_RET_STR(kCpumMicroarch_AMD_K10_Lion);
+        CASE_RET_STR(kCpumMicroarch_AMD_K10_Llano);
+        CASE_RET_STR(kCpumMicroarch_AMD_Bobcat);
+        CASE_RET_STR(kCpumMicroarch_AMD_Jaguar);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_15h_Bulldozer);
+        CASE_RET_STR(kCpumMicroarch_AMD_15h_Piledriver);
+        CASE_RET_STR(kCpumMicroarch_AMD_15h_Steamroller);
+        CASE_RET_STR(kCpumMicroarch_AMD_15h_Excavator);
+        CASE_RET_STR(kCpumMicroarch_AMD_15h_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_16h_First);
+
+        CASE_RET_STR(kCpumMicroarch_AMD_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_Centaur_C6);
+        CASE_RET_STR(kCpumMicroarch_Centaur_C2);
+        CASE_RET_STR(kCpumMicroarch_Centaur_C3);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_M2);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5A);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5B);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5C);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5N);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5XL);
+        CASE_RET_STR(kCpumMicroarch_VIA_C3_C5P);
+        CASE_RET_STR(kCpumMicroarch_VIA_C7_C5J);
+        CASE_RET_STR(kCpumMicroarch_VIA_Isaiah);
+        CASE_RET_STR(kCpumMicroarch_VIA_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_Cyrix_5x86);
+        CASE_RET_STR(kCpumMicroarch_Cyrix_M1);
+        CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGX);
+        CASE_RET_STR(kCpumMicroarch_Cyrix_MediaGXm);
+        CASE_RET_STR(kCpumMicroarch_Cyrix_M2);
+        CASE_RET_STR(kCpumMicroarch_Cyrix_Unknown);
+
+        CASE_RET_STR(kCpumMicroarch_Unknown);
+
+#undef CASE_RET_STR
+        case kCpumMicroarch_Invalid:
+        case kCpumMicroarch_Intel_End:
+        case kCpumMicroarch_Intel_Core7_End:
+        case kCpumMicroarch_Intel_Atom_End:
+        case kCpumMicroarch_Intel_P6_Core_Atom_End:
+        case kCpumMicroarch_Intel_NB_End:
+        case kCpumMicroarch_AMD_K7_End:
+        case kCpumMicroarch_AMD_K8_End:
+        case kCpumMicroarch_AMD_15h_End:
+        case kCpumMicroarch_AMD_16h_End:
+        case kCpumMicroarch_AMD_End:
+        case kCpumMicroarch_VIA_End:
+        case kCpumMicroarch_Cyrix_End:
+        case kCpumMicroarch_32BitHack:
+            break;
+        /* no default! */
+    }
+
+    return NULL;
+}
+
+
+
+/**
+ * Gets a matching leaf in the CPUID leaf array.
+ *
+ * @returns Pointer to the matching leaf, or NULL if not found.
+ * @param   paLeaves            The CPUID leaves to search.  This is sorted.
+ * @param   cLeaves             The number of leaves in the array.
+ * @param   uLeaf               The leaf to locate.
+ * @param   uSubLeaf            The subleaf to locate.  Pass 0 if no subleaves.
+ */
+PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf)
+{
+    /* Lazy bird does linear lookup here since this is only used for the
+       occational CPUID overrides. */
+    for (uint32_t i = 0; i < cLeaves; i++)
+        if (   paLeaves[i].uLeaf    == uLeaf
+            && paLeaves[i].uSubLeaf == (uSubLeaf & paLeaves[i].fSubLeafMask))
+            return &paLeaves[i];
+    return NULL;
+}
+
+
+/**
+ * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
+ *
+ * @returns true if found, false it not.
+ * @param   paLeaves            The CPUID leaves to search.  This is sorted.
+ * @param   cLeaves             The number of leaves in the array.
+ * @param   uLeaf               The leaf to locate.
+ * @param   uSubLeaf            The subleaf to locate.  Pass 0 if no subleaves.
+ * @param   pLegacy             The legacy output leaf.
+ */
+bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf, PCPUMCPUID pLeagcy)
+{
+    PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, uLeaf, uSubLeaf);
+    if (pLeaf)
+    {
+        pLeagcy->eax = pLeaf->uEax;
+        pLeagcy->ebx = pLeaf->uEbx;
+        pLeagcy->ecx = pLeaf->uEcx;
+        pLeagcy->edx = pLeaf->uEdx;
+        return true;
+    }
+    return false;
+}
+
+
+/**
+ * Ensures that the CPUID leaf array can hold one more leaf.
+ *
+ * @returns Pointer to the CPUID leaf array (*ppaLeaves) on success.  NULL on
+ *          failure.
+ * @param   ppaLeaves           Pointer to the variable holding the array
+ *                              pointer (input/output).
+ * @param   cLeaves             The current array size.
+ */
+static PCPUMCPUIDLEAF cpumR3CpuIdEnsureSpace(PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves)
+{
+    uint32_t cAllocated = RT_ALIGN(cLeaves, 16);
+    if (cLeaves + 1 > cAllocated)
+    {
+        void *pvNew = RTMemRealloc(*ppaLeaves, (cAllocated + 16) * sizeof(**ppaLeaves));
+        if (!pvNew)
+        {
+            RTMemFree(*ppaLeaves);
+            *ppaLeaves = NULL;
+            return NULL;
+        }
+        *ppaLeaves   = (PCPUMCPUIDLEAF)pvNew;
+    }
+    return *ppaLeaves;
+}
+
+
+/**
+ * Append a CPUID leaf or sub-leaf.
+ *
+ * ASSUMES linear insertion order, so we'll won't need to do any searching or
+ * replace anything.  Use cpumR3CpuIdInsert for those cases.
+ *
+ * @returns VINF_SUCCESS or VERR_NO_MEMORY.  On error, *ppaLeaves is freed, so
+ *          the caller need do no more work.
+ * @param   ppaLeaves       Pointer to the the pointer to the array of sorted
+ *                          CPUID leaves and sub-leaves.
+ * @param   pcLeaves        Where we keep the leaf count for *ppaLeaves.
+ * @param   uLeaf           The leaf we're adding.
+ * @param   uSubLeaf        The sub-leaf number.
+ * @param   fSubLeafMask    The sub-leaf mask.
+ * @param   uEax            The EAX value.
+ * @param   uEbx            The EBX value.
+ * @param   uEcx            The ECX value.
+ * @param   uEdx            The EDX value.
+ * @param   fFlags          The flags.
+ */
+static int cpumR3CollectCpuIdInfoAddOne(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves,
+                                        uint32_t uLeaf, uint32_t uSubLeaf, uint32_t fSubLeafMask,
+                                        uint32_t uEax, uint32_t uEbx, uint32_t uEcx, uint32_t uEdx, uint32_t fFlags)
+{
+    if (!cpumR3CpuIdEnsureSpace(ppaLeaves, *pcLeaves))
+        return VERR_NO_MEMORY;
+
+    PCPUMCPUIDLEAF pNew = &(*ppaLeaves)[*pcLeaves];
+    Assert(   *pcLeaves == 0
+           || pNew[-1].uLeaf < uLeaf
+           || (pNew[-1].uLeaf == uLeaf && pNew[-1].uSubLeaf < uSubLeaf) );
+
+    pNew->uLeaf        = uLeaf;
+    pNew->uSubLeaf     = uSubLeaf;
+    pNew->fSubLeafMask = fSubLeafMask;
+    pNew->uEax         = uEax;
+    pNew->uEbx         = uEbx;
+    pNew->uEcx         = uEcx;
+    pNew->uEdx         = uEdx;
+    pNew->fFlags       = fFlags;
+
+    *pcLeaves += 1;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Inserts a CPU ID leaf, replacing any existing ones.
+ *
+ * When inserting a simple leaf where we already got a series of subleaves with
+ * the same leaf number (eax), the simple leaf will replace the whole series.
+ *
+ * This ASSUMES that the leave array is still on the normal heap and has only
+ * been allocated/reallocated by the cpumR3CpuIdEnsureSpace function.
+ *
+ * @returns VBox status code.
+ * @param   ppaLeaves       Pointer to the the pointer to the array of sorted
+ *                          CPUID leaves and sub-leaves.
+ * @param   pcLeaves        Where we keep the leaf count for *ppaLeaves.
+ * @param   pNewLeaf        Pointer to the data of the new leaf we're about to
+ *                          insert.
+ */
+int cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf)
+{
+    PCPUMCPUIDLEAF  paLeaves = *ppaLeaves;
+    uint32_t        cLeaves  = *pcLeaves;
+
+    /*
+     * Validate the new leaf a little.
+     */
+    AssertReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED), VERR_INVALID_FLAGS);
+    AssertReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0, VERR_INVALID_PARAMETER);
+    AssertReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1), VERR_INVALID_PARAMETER);
+    AssertReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf, VERR_INVALID_PARAMETER);
+
+
+    /*
+     * Find insertion point. The lazy bird uses the same excuse as in
+     * cpumR3CpuIdGetLeaf().
+     */
+    uint32_t i = 0;
+    while (   i < cLeaves
+           && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
+        i++;
+    if (   i < cLeaves
+        && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
+    {
+        if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
+        {
+            /*
+             * The subleaf mask differs, replace all existing leaves with the
+             * same leaf number.
+             */
+            uint32_t c = 1;
+            while (   i + c < cLeaves
+                   && paLeaves[i + c].uSubLeaf == pNewLeaf->uLeaf)
+                c++;
+            if (c > 1 && i + c < cLeaves)
+            {
+                memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
+                *pcLeaves = cLeaves -= c - 1;
+            }
+
+            paLeaves[i] = *pNewLeaf;
+            return VINF_SUCCESS;
+        }
+
+        /* Find subleaf insertion point. */
+        while (   i < cLeaves
+               && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf)
+            i++;
+
+        /*
+         * If we've got an exactly matching leaf, replace it.
+         */
+        if (   paLeaves[i].uLeaf    == pNewLeaf->uLeaf
+            && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
+        {
+            paLeaves[i] = *pNewLeaf;
+            return VINF_SUCCESS;
+        }
+    }
+
+    /*
+     * Adding a new leaf at 'i'.
+     */
+    paLeaves = cpumR3CpuIdEnsureSpace(ppaLeaves, cLeaves);
+    if (!paLeaves)
+        return VERR_NO_MEMORY;
+
+    if (i < cLeaves)
+        memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
+    *pcLeaves += 1;
+    paLeaves[i] = *pNewLeaf;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Removes a range of CPUID leaves.
+ *
+ * This will not reallocate the array.
+ *
+ * @param   paLeaves        The array of sorted CPUID leaves and sub-leaves.
+ * @param   pcLeaves        Where we keep the leaf count for @a paLeaves.
+ * @param   uFirst          The first leaf.
+ * @param   uLast           The last leaf.
+ */
+void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
+{
+    uint32_t cLeaves = *pcLeaves;
+
+    Assert(uFirst <= uLast);
+
+    /*
+     * Find the first one.
+     */
+    uint32_t iFirst = 0;
+    while (   iFirst < cLeaves
+           && paLeaves[iFirst].uLeaf < uFirst)
+        iFirst++;
+
+    /*
+     * Find the end (last + 1).
+     */
+    uint32_t iEnd = iFirst;
+    while (   iEnd < cLeaves
+           && paLeaves[iEnd].uLeaf <= uLast)
+        iEnd++;
+
+    /*
+     * Adjust the array if anything needs removing.
+     */
+    if (iFirst < iEnd)
+    {
+        if (iEnd < cLeaves)
+            memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
+        *pcLeaves = cLeaves -= (iEnd - iFirst);
+    }
+}
+
+
+
+/**
+ * Checks if ECX make a difference when reading a given CPUID leaf.
+ *
+ * @returns @c true if it does, @c false if it doesn't.
+ * @param   uLeaf               The leaf we're reading.
+ * @param   pcSubLeaves         Number of sub-leaves accessible via ECX.
+ * @param   pfFinalEcxUnchanged Whether ECX is passed thru when going beyond the
+ *                              final sub-leaf.
+ */
+static bool cpumR3IsEcxRelevantForCpuIdLeaf(uint32_t uLeaf, uint32_t *pcSubLeaves, bool *pfFinalEcxUnchanged)
+{
+    *pfFinalEcxUnchanged = false;
+
+    uint32_t auPrev[4];
+    ASMCpuIdExSlow(uLeaf, 0, 0, 0, &auPrev[0], &auPrev[1], &auPrev[2], &auPrev[3]);
+
+    /* Look for sub-leaves. */
+    uint32_t uSubLeaf = 1;
+    for (;;)
+    {
+        uint32_t auCur[4];
+        ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+        if (memcmp(auCur, auPrev, sizeof(auCur)))
+            break;
+
+        /* Advance / give up. */
+        uSubLeaf++;
+        if (uSubLeaf >= 64)
+        {
+            *pcSubLeaves = 1;
+            return false;
+        }
+    }
+
+    /* Count sub-leaves. */
+    uSubLeaf = 0;
+    for (;;)
+    {
+        uint32_t auCur[4];
+        ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+
+        /* Exactly when this terminates isn't quite consistent.  When working
+           0xb, we should probably only check if ebx == 0... */
+        if (   auCur[0] == 0
+            && auCur[1] == 0
+            && (auCur[2] == 0 || auCur[2] == uSubLeaf)
+            && (auCur[3] == 0 || uLeaf == 0xb) )
+        {
+            if (auCur[2] == uSubLeaf)
+                *pfFinalEcxUnchanged = true;
+            *pcSubLeaves = uSubLeaf + 1;
+            return true;
+        }
+
+        /* Advance / give up. */
+        uSubLeaf++;
+        if (uSubLeaf >= 128)
+        {
+            *pcSubLeaves = UINT32_MAX;
+            return true;
+        }
+    }
+}
+
+
+/**
+ * Collects CPUID leaves and sub-leaves, returning a sorted array of them.
+ *
+ * @returns VBox status code.
+ * @param   ppaLeaves           Where to return the array pointer on success.
+ *                              Use RTMemFree to release.
+ * @param   pcLeaves            Where to return the size of the array on
+ *                              success.
+ */
+VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
+{
+    *ppaLeaves = NULL;
+    *pcLeaves = 0;
+
+    /*
+     * Try out various candidates. This must be sorted!
+     */
+    static struct { uint32_t uMsr; bool fSpecial; } const s_aCandidates[] =
+    {
+        { UINT32_C(0x00000000), false },
+        { UINT32_C(0x10000000), false },
+        { UINT32_C(0x20000000), false },
+        { UINT32_C(0x30000000), false },
+        { UINT32_C(0x40000000), false },
+        { UINT32_C(0x50000000), false },
+        { UINT32_C(0x60000000), false },
+        { UINT32_C(0x70000000), false },
+        { UINT32_C(0x80000000), false },
+        { UINT32_C(0x80860000), false },
+        { UINT32_C(0x8ffffffe), true  },
+        { UINT32_C(0x8fffffff), true  },
+        { UINT32_C(0x90000000), false },
+        { UINT32_C(0xa0000000), false },
+        { UINT32_C(0xb0000000), false },
+        { UINT32_C(0xc0000000), false },
+        { UINT32_C(0xd0000000), false },
+        { UINT32_C(0xe0000000), false },
+        { UINT32_C(0xf0000000), false },
+    };
+
+    for (uint32_t iOuter = 0; iOuter < RT_ELEMENTS(s_aCandidates); iOuter++)
+    {
+        uint32_t uLeaf = s_aCandidates[iOuter].uMsr;
+        uint32_t uEax, uEbx, uEcx, uEdx;
+        ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+
+        /*
+         * Does EAX look like a typical leaf count value?
+         */
+        if (   uEax         > uLeaf
+            && uEax - uLeaf < UINT32_C(0xff)) /* Adjust 0xff limit when exceeded by real HW. */
+        {
+            /* Yes, dump them. */
+            uint32_t cLeaves = uEax - uLeaf + 1;
+            while (cLeaves-- > 0)
+            {
+                /* Check three times here to reduce the chance of CPU migration
+                   resulting in false positives with things like the APIC ID. */
+                uint32_t cSubLeaves;
+                bool fFinalEcxUnchanged;
+                if (   cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
+                    && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged)
+                    && cpumR3IsEcxRelevantForCpuIdLeaf(uLeaf, &cSubLeaves, &fFinalEcxUnchanged))
+                {
+                    if (cSubLeaves > 16)
+                        return VERR_CPUM_TOO_MANY_CPUID_SUBLEAVES;
+                    for (uint32_t uSubLeaf = 0; uSubLeaf < cSubLeaves; uSubLeaf++)
+                    {
+                        ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &uEax, &uEbx, &uEcx, &uEdx);
+                        int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+                                                              uLeaf, uSubLeaf, UINT32_MAX, uEax, uEbx, uEcx, uEdx,
+                                                              uSubLeaf + 1 == cSubLeaves && fFinalEcxUnchanged
+                                                              ? CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED : 0);
+                        if (RT_FAILURE(rc))
+                            return rc;
+                    }
+                }
+                else
+                {
+                    ASMCpuIdExSlow(uLeaf, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+                    int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+                                                          uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
+                    if (RT_FAILURE(rc))
+                        return rc;
+                }
+
+                /* next */
+                uLeaf++;
+            }
+        }
+        /*
+         * Special CPUIDs needs special handling as they don't follow the
+         * leaf count principle used above.
+         */
+        else if (s_aCandidates[iOuter].fSpecial)
+        {
+            bool fKeep = false;
+            if (uLeaf == 0x8ffffffe && uEax == UINT32_C(0x00494544))
+                fKeep = true;
+            else if (   uLeaf == 0x8fffffff
+                     && RT_C_IS_PRINT(RT_BYTE1(uEax))
+                     && RT_C_IS_PRINT(RT_BYTE2(uEax))
+                     && RT_C_IS_PRINT(RT_BYTE3(uEax))
+                     && RT_C_IS_PRINT(RT_BYTE4(uEax))
+                     && RT_C_IS_PRINT(RT_BYTE1(uEbx))
+                     && RT_C_IS_PRINT(RT_BYTE2(uEbx))
+                     && RT_C_IS_PRINT(RT_BYTE3(uEbx))
+                     && RT_C_IS_PRINT(RT_BYTE4(uEbx))
+                     && RT_C_IS_PRINT(RT_BYTE1(uEcx))
+                     && RT_C_IS_PRINT(RT_BYTE2(uEcx))
+                     && RT_C_IS_PRINT(RT_BYTE3(uEcx))
+                     && RT_C_IS_PRINT(RT_BYTE4(uEcx))
+                     && RT_C_IS_PRINT(RT_BYTE1(uEdx))
+                     && RT_C_IS_PRINT(RT_BYTE2(uEdx))
+                     && RT_C_IS_PRINT(RT_BYTE3(uEdx))
+                     && RT_C_IS_PRINT(RT_BYTE4(uEdx)) )
+                fKeep = true;
+            if (fKeep)
+            {
+                int rc = cpumR3CollectCpuIdInfoAddOne(ppaLeaves, pcLeaves,
+                                                      uLeaf, 0, 0, uEax, uEbx, uEcx, uEdx, 0);
+                if (RT_FAILURE(rc))
+                    return rc;
+            }
+        }
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Determines the method the CPU uses to handle unknown CPUID leaves.
+ *
+ * @returns VBox status code.
+ * @param   penmUnknownMethod   Where to return the method.
+ * @param   pDefUnknown         Where to return default unknown values.  This
+ *                              will be set, even if the resulting method
+ *                              doesn't actually needs it.
+ */
+VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
+{
+    uint32_t uLastStd = ASMCpuId_EAX(0);
+    uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
+    if (!ASMIsValidExtRange(uLastExt))
+        uLastExt = 0x80000000;
+
+    uint32_t auChecks[] =
+    {
+        uLastStd + 1,
+        uLastStd + 5,
+        uLastStd + 8,
+        uLastStd + 32,
+        uLastStd + 251,
+        uLastExt + 1,
+        uLastExt + 8,
+        uLastExt + 15,
+        uLastExt + 63,
+        uLastExt + 255,
+        0x7fbbffcc,
+        0x833f7872,
+        0xefff2353,
+        0x35779456,
+        0x1ef6d33e,
+    };
+
+    static const uint32_t s_auValues[] =
+    {
+        0xa95d2156,
+        0x00000001,
+        0x00000002,
+        0x00000008,
+        0x00000000,
+        0x55773399,
+        0x93401769,
+        0x12039587,
+    };
+
+    /*
+     * Simple method, all zeros.
+     */
+    *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS;
+    pDefUnknown->eax = 0;
+    pDefUnknown->ebx = 0;
+    pDefUnknown->ecx = 0;
+    pDefUnknown->edx = 0;
+
+    /*
+     * Intel has been observed returning the last standard leaf.
+     */
+    uint32_t auLast[4];
+    ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
+
+    uint32_t cChecks = RT_ELEMENTS(auChecks);
+    while (cChecks > 0)
+    {
+        uint32_t auCur[4];
+        ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+        if (memcmp(auCur, auLast, sizeof(auCur)))
+            break;
+        cChecks--;
+    }
+    if (cChecks == 0)
+    {
+        /* Now, what happens when the input changes? Esp. ECX. */
+        uint32_t cTotal       = 0;
+        uint32_t cSame        = 0;
+        uint32_t cLastWithEcx = 0;
+        uint32_t cNeither     = 0;
+        uint32_t cValues = RT_ELEMENTS(s_auValues);
+        while (cValues > 0)
+        {
+            uint32_t uValue = s_auValues[cValues - 1];
+            uint32_t auLastWithEcx[4];
+            ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
+                           &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
+
+            cChecks = RT_ELEMENTS(auChecks);
+            while (cChecks > 0)
+            {
+                uint32_t auCur[4];
+                ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+                if (!memcmp(auCur, auLast, sizeof(auCur)))
+                {
+                    cSame++;
+                    if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+                        cLastWithEcx++;
+                }
+                else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
+                    cLastWithEcx++;
+                else
+                    cNeither++;
+                cTotal++;
+                cChecks--;
+            }
+            cValues--;
+        }
+
+        RTStrmPrintf(g_pStdErr, "cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal);
+        if (cSame == cTotal)
+            *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
+        else if (cLastWithEcx == cTotal)
+            *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
+        else
+            *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
+        pDefUnknown->eax = auLast[0];
+        pDefUnknown->ebx = auLast[1];
+        pDefUnknown->ecx = auLast[2];
+        pDefUnknown->edx = auLast[3];
+        return VINF_SUCCESS;
+    }
+
+    /*
+     * Unchanged register values?
+     */
+    cChecks = RT_ELEMENTS(auChecks);
+    while (cChecks > 0)
+    {
+        uint32_t const  uLeaf   = auChecks[cChecks - 1];
+        uint32_t        cValues = RT_ELEMENTS(s_auValues);
+        while (cValues > 0)
+        {
+            uint32_t uValue = s_auValues[cValues - 1];
+            uint32_t auCur[4];
+            ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
+            if (   auCur[0] != uLeaf
+                || auCur[1] != uValue
+                || auCur[2] != uValue
+                || auCur[3] != uValue)
+                break;
+            cValues--;
+        }
+        if (cValues != 0)
+            break;
+        cChecks--;
+    }
+    if (cChecks == 0)
+    {
+        *penmUnknownMethod = CPUMUKNOWNCPUID_PASSTHRU;
+        return VINF_SUCCESS;
+    }
+
+    /*
+     * Just go with the simple method.
+     */
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Translates a unknow CPUID leaf method into the constant name (sans prefix).
+ *
+ * @returns Read only name string.
+ * @param   enmUnknownMethod    The method to translate.
+ */
+VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUKNOWNCPUID enmUnknownMethod)
+{
+    switch (enmUnknownMethod)
+    {
+        case CPUMUKNOWNCPUID_DEFAULTS:                  return "DEFAULTS";
+        case CPUMUKNOWNCPUID_LAST_STD_LEAF:             return "LAST_STD_LEAF";
+        case CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX:    return "LAST_STD_LEAF_WITH_ECX";
+        case CPUMUKNOWNCPUID_PASSTHRU:                  return "PASSTHRU";
+
+        case CPUMUKNOWNCPUID_INVALID:
+        case CPUMUKNOWNCPUID_END:
+        case CPUMUKNOWNCPUID_32BIT_HACK:
+            break;
+    }
+    return "Invalid-unknown-CPUID-method";
+}
+
+
+/**
+ * Detect the CPU vendor give n the
+ *
+ * @returns The vendor.
+ * @param   uEAX                EAX from CPUID(0).
+ * @param   uEBX                EBX from CPUID(0).
+ * @param   uECX                ECX from CPUID(0).
+ * @param   uEDX                EDX from CPUID(0).
+ */
+VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
+{
+    if (ASMIsValidStdRange(uEAX))
+    {
+        if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
+            return CPUMCPUVENDOR_AMD;
+
+        if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
+            return CPUMCPUVENDOR_INTEL;
+
+        if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
+            return CPUMCPUVENDOR_VIA;
+
+        if (   uEBX == UINT32_C(0x69727943) /* CyrixInstead */
+            && uECX == UINT32_C(0x64616574)
+            && uEDX == UINT32_C(0x736E4978))
+            return CPUMCPUVENDOR_CYRIX;
+
+        /* "Geode by NSC", example: family 5, model 9.  */
+
+        /** @todo detect the other buggers... */
+    }
+
+    return CPUMCPUVENDOR_UNKNOWN;
+}
+
+
+/**
+ * Translates a CPU vendor enum value into the corresponding string constant.
+ *
+ * The named can be prefixed with 'CPUMCPUVENDOR_' to construct a valid enum
+ * value name.  This can be useful when generating code.
+ *
+ * @returns Read only name string.
+ * @param   enmVendor           The CPU vendor value.
+ */
+VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor)
+{
+    switch (enmVendor)
+    {
+        case CPUMCPUVENDOR_INTEL:       return "INTEL";
+        case CPUMCPUVENDOR_AMD:         return "AMD";
+        case CPUMCPUVENDOR_VIA:         return "VIA";
+        case CPUMCPUVENDOR_CYRIX:       return "CYRIX";
+        case CPUMCPUVENDOR_UNKNOWN:     return "UNKNOWN";
+
+        case CPUMCPUVENDOR_INVALID:
+        case CPUMCPUVENDOR_32BIT_HACK:
+            break;
+    }
+    return "Invalid-cpu-vendor";
+}
+
+
+static PCCPUMCPUIDLEAF cpumR3CpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf)
+{
+    /* Could do binary search, doing linear now because I'm lazy. */
+    PCCPUMCPUIDLEAF pLeaf = paLeaves;
+    while (cLeaves-- > 0)
+    {
+        if (pLeaf->uLeaf == uLeaf)
+            return pLeaf;
+        pLeaf++;
+    }
+    return NULL;
+}
+
+
+int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures)
+{
+    RT_ZERO(*pFeatures);
+    if (cLeaves >= 2)
+    {
+        AssertLogRelReturn(paLeaves[0].uLeaf == 0, VERR_CPUM_IPE_1);
+        AssertLogRelReturn(paLeaves[1].uLeaf == 1, VERR_CPUM_IPE_1);
+
+        pFeatures->enmCpuVendor = CPUMR3CpuIdDetectVendorEx(paLeaves[0].uEax,
+                                                            paLeaves[0].uEbx,
+                                                            paLeaves[0].uEcx,
+                                                            paLeaves[0].uEdx);
+        pFeatures->uFamily      = ASMGetCpuFamily(paLeaves[1].uEax);
+        pFeatures->uModel       = ASMGetCpuModel(paLeaves[1].uEax, pFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL);
+        pFeatures->uStepping    = ASMGetCpuStepping(paLeaves[1].uEax);
+        pFeatures->enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx((CPUMCPUVENDOR)pFeatures->enmCpuVendor,
+                                                                  pFeatures->uFamily,
+                                                                  pFeatures->uModel,
+                                                                  pFeatures->uStepping);
+
+        PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
+        if (pLeaf)
+            pFeatures->cMaxPhysAddrWidth = pLeaf->uEax & 0xff;
+        else if (paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36)
+            pFeatures->cMaxPhysAddrWidth = 36;
+        else
+            pFeatures->cMaxPhysAddrWidth = 32;
+
+        /* Standard features. */
+        pFeatures->fMsr                 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_MSR);
+        pFeatures->fApic                = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_APIC);
+        pFeatures->fX2Apic              = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_X2APIC);
+        pFeatures->fPse                 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE);
+        pFeatures->fPse36               = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PSE36);
+        pFeatures->fPae                 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAE);
+        pFeatures->fPat                 = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_PAT);
+        pFeatures->fFxSaveRstor         = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_FXSR);
+        pFeatures->fSysEnter            = RT_BOOL(paLeaves[1].uEdx & X86_CPUID_FEATURE_EDX_SEP);
+        pFeatures->fHypervisorPresent   = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_HVP);
+        pFeatures->fMonitorMWait        = RT_BOOL(paLeaves[1].uEcx & X86_CPUID_FEATURE_ECX_MONITOR);
+
+        /* Extended features. */
+        PCCPUMCPUIDLEAF const pExtLeaf  = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000001);
+        if (pExtLeaf)
+        {
+            pFeatures->fLongMode        = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+            pFeatures->fSysCall         = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
+            pFeatures->fNoExecute       = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX);
+            pFeatures->fLahfSahf        = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+            pFeatures->fRdTscP          = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+        }
+
+        if (   pExtLeaf
+            && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD)
+        {
+            /* AMD features. */
+            pFeatures->fMsr            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MSR);
+            pFeatures->fApic           |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC);
+            pFeatures->fPse            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE);
+            pFeatures->fPse36          |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PSE36);
+            pFeatures->fPae            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAE);
+            pFeatures->fPat            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_PAT);
+            pFeatures->fFxSaveRstor    |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FXSR);
+        }
+
+        /*
+         * Quirks.
+         */
+        pFeatures->fLeakyFxSR = pExtLeaf
+                             && (pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
+                             && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD
+                             && pFeatures->uFamily >= 6 /* K7 and up */;
+    }
+    else
+        AssertLogRelReturn(cLeaves == 0, VERR_CPUM_IPE_1);
+    return VINF_SUCCESS;
+}
+
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp	(revision 49893)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp	(revision 49893)
@@ -0,0 +1,654 @@
+/* $Id$ */
+/** @file
+ * CPUM - CPU database part.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*******************************************************************************
+*   Header Files                                                               *
+*******************************************************************************/
+#define LOG_GROUP LOG_GROUP_CPUM
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include <VBox/vmm/vm.h>
+
+#include <VBox/err.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+typedef struct CPUMDBENTRY
+{
+    /** The CPU name. */
+    const char     *pszName;
+    /** The full CPU name. */
+    const char     *pszFullName;
+    /** The CPU vendor (CPUMCPUVENDOR). */
+    uint8_t         enmVendor;
+    /** The CPU family. */
+    uint8_t         uFamily;
+    /** The CPU model. */
+    uint8_t         uModel;
+    /** The CPU stepping. */
+    uint8_t         uStepping;
+    /** The microarchitecture. */
+    CPUMMICROARCH   enmMicroarch;
+    /** Flags (TBD). */
+    uint32_t        fFlags;
+    /** The maximum physical address with of the CPU.  This should correspond to
+     * the value in CPUID leaf 0x80000008 when present. */
+    uint8_t         cMaxPhysAddrWidth;
+    /** Pointer to an array of CPUID leaves.  */
+    PCCPUMCPUIDLEAF paCpuIdLeaves;
+    /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
+    uint32_t        cCpuIdLeaves;
+    /** The method used to deal with unknown CPUID leaves. */
+    CPUMUKNOWNCPUID enmUnknownCpuId;
+    /** The default unknown CPUID value. */
+    CPUMCPUID       DefUnknownCpuId;
+
+    /** MSR mask.  Several microarchitectures ignore higher bits of the    */
+    uint32_t        fMsrMask;
+
+    /** The number of ranges in the table pointed to b paMsrRanges. */
+    uint32_t        cMsrRanges;
+    /** MSR ranges for this CPU. */
+    PCCPUMMSRRANGE  paMsrRanges;
+} CPUMDBENTRY;
+
+
+/*******************************************************************************
+*   Defined Constants And Macros                                               *
+*******************************************************************************/
+
+/** @def NULL_ALONE
+ * For eliminating an unnecessary data dependency in standalone builds (for
+ * VBoxSVC). */
+/** @def ZERO_ALONE
+ * For eliminating an unnecessary data size dependency in standalone builds (for
+ * VBoxSVC). */
+#ifndef CPUM_DB_STANDALONE
+# define NULL_ALONE(a_aTable)    a_aTable
+# define ZERO_ALONE(a_cTable)    a_cTable
+#else
+# define NULL_ALONE(a_aTable)    NULL
+# define ZERO_ALONE(a_cTable)    0
+#endif
+
+
+/** @name Short macros for the MSR range entries.
+ *
+ * These are rather cryptic, but this is to reduce the attack on the right
+ * margin.
+ *
+ * @{ */
+/** Alias one MSR onto another (a_uTarget). */
+#define MAL(a_uMsr, a_szName, a_uTarget) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_MsrAlias, kCpumMsrWrFn_MsrAlias, 0, a_uTarget, 0, 0, a_szName)
+/** Functions handles everything. */
+#define MFN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Functions handles everything, with GP mask. */
+#define MFG(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
+/** Function handlers, read-only. */
+#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
+/** Function handlers, ignore all writes. */
+#define MFI(a_uMsr, a_szName, a_enmRdFnSuff) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
+/** Function handlers, with value. */
+#define MFV(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
+/** Function handlers, with write ignore mask. */
+#define MFW(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_fWrIgnMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
+/** Function handlers, extended version. */
+#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Function handlers, with CPUMCPU storage variable. */
+#define MFS(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+         RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, 0, 0, a_szName)
+/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
+#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, \
+         RT_OFFSETOF(CPUMCPU, a_CpumCpuMember), 0, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** Read-only fixed value. */
+#define MVO(a_uMsr, a_szName, a_uValue) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Read-only fixed value, ignores all writes. */
+#define MVI(a_uMsr, a_szName, a_uValue) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Read fixed value, ignore writes outside GP mask. */
+#define MVG(a_uMsr, a_szName, a_uValue, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
+/** Read fixed value, extended version with both GP and ignore masks. */
+#define MVX(a_uMsr, a_szName, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+/** The short form, no CPUM backing. */
+#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+    RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+         a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Range: Functions handles everything. */
+#define RFN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff) \
+    RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
+/** Range: Read fixed value, read-only. */
+#define RVO(a_uFirst, a_uLast, a_szName, a_uValue) \
+    RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
+/** Range: Read fixed value, ignore writes. */
+#define RVI(a_uFirst, a_uLast, a_szName, a_uValue) \
+    RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
+/** Range: The short form, no CPUM backing. */
+#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
+    RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, \
+         a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
+
+/** Internal form used by the macros. */
+#ifdef VBOX_WITH_STATISTICS
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+    { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
+      { 0 }, { 0 }, { 0 }, { 0 } }
+#else
+# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
+    { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
+#endif
+/** @} */
+
+
+#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
+#include "cpus/Intel_Core_i7_3960X.h"
+#include "cpus/AMD_FX_8150_Eight_Core.h"
+#include "cpus/Quad_Core_AMD_Opteron_2384.h"
+
+
+
+/**
+ * The database entries.
+ *
+ * Warning! The first entry is special.  It is the fallback for unknown
+ *          processors.  Thus, it better be pretty representative.
+ */
+static CPUMDBENTRY const * const g_apCpumDbEntries[] =
+{
+#ifdef VBOX_CPUDB_Intel_Core_i7_3960X
+    &g_Entry_Intel_Core_i7_3960X,
+#endif
+#ifdef Intel_Pentium_M_processor_2_00GHz
+    &g_Entry_Intel_Pentium_M_processor_2_00GHz,
+#endif
+#ifdef VBOX_CPUDB_AMD_FX_8150_Eight_Core
+    &g_Entry_AMD_FX_8150_Eight_Core,
+#endif
+#ifdef VBOX_CPUDB_AMD_Phenom_II_X6_1100T
+    &g_Entry_AMD_Phenom_II_X6_1100T,
+#endif
+#ifdef VBOX_CPUDB_Quad_Core_AMD_Opteron_2384
+    &g_Entry_Quad_Core_AMD_Opteron_2384,
+#endif
+};
+
+
+#ifndef CPUM_DB_STANDALONE
+
+/**
+ * Binary search used by cpumR3MsrRangesInsert and has some special properties
+ * wrt to mismatches.
+ *
+ * @returns Insert location.
+ * @param   paMsrRanges         The MSR ranges to search.
+ * @param   cMsrRanges          The number of MSR ranges.
+ * @param   uMsr                What to search for.
+ */
+static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
+{
+    if (!cMsrRanges)
+        return 0;
+
+    uint32_t iStart = 0;
+    uint32_t iLast  = cMsrRanges - 1;
+    for (;;)
+    {
+        uint32_t i = iStart + (iLast - iStart + 1) / 2;
+        if (   uMsr >= paMsrRanges[i].uFirst
+            && uMsr <= paMsrRanges[i].uLast)
+            return i;
+        if (uMsr < paMsrRanges[i].uFirst)
+        {
+            if (i <= iStart)
+                return i;
+            iLast = i - 1;
+        }
+        else
+        {
+            if (i >= iLast)
+            {
+                if (i < cMsrRanges)
+                    i++;
+                return i;
+            }
+            iStart = i + 1;
+        }
+    }
+}
+
+
+/**
+ * Ensures that there is space for at least @a cNewRanges in the table,
+ * reallocating the table if necessary.
+ *
+ * @returns Pointer to the MSR ranges on success, NULL on failure.  On failure
+ *          @a *ppaMsrRanges is freed and set to NULL.
+ * @param   ppaMsrRanges    The variable pointing to the ranges (input/output).
+ * @param   cMsrRanges      The current number of ranges.
+ * @param   cNewRanges      The number of ranges to be added.
+ */
+static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
+{
+    uint32_t cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
+    if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
+    {
+        uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
+        void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
+        if (!pvNew)
+        {
+            RTMemFree(*ppaMsrRanges);
+            *ppaMsrRanges = NULL;
+            return NULL;
+        }
+        *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
+    }
+    return *ppaMsrRanges;
+}
+
+
+/**
+ * Inserts a new MSR range in into an sorted MSR range array.
+ *
+ * If the new MSR range overlaps existing ranges, the existing ones will be
+ * adjusted/removed to fit in the new one.
+ *
+ * @returns VBox status code.
+ * @retval  VINF_SUCCESS
+ * @retval  VERR_NO_MEMORY
+ *
+ * @param   ppaMsrRanges    The variable pointing to the ranges (input/output).
+ * @param   pcMsrRanges     The variable holding number of ranges.
+ * @param   pNewRange       The new range.
+ */
+int cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
+{
+    uint32_t        cMsrRanges  = *pcMsrRanges;
+    PCPUMMSRRANGE   paMsrRanges = *ppaMsrRanges;
+
+    Assert(pNewRange->uLast >= pNewRange->uFirst);
+    Assert(pNewRange->enmRdFn > kCpumMsrRdFn_Invalid && pNewRange->enmRdFn < kCpumMsrRdFn_End);
+    Assert(pNewRange->enmWrFn > kCpumMsrWrFn_Invalid && pNewRange->enmWrFn < kCpumMsrWrFn_End);
+
+    /*
+     * Optimize the linear insertion case where we add new entries at the end.
+     */
+    if (   cMsrRanges > 0
+        && paMsrRanges[cMsrRanges - 1].uLast < pNewRange->uFirst)
+    {
+        paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+        if (!paMsrRanges)
+            return VERR_NO_MEMORY;
+        paMsrRanges[cMsrRanges] = *pNewRange;
+        *pcMsrRanges += 1;
+    }
+    else
+    {
+        uint32_t i = cpumR3MsrRangesBinSearch(paMsrRanges, cMsrRanges, pNewRange->uFirst);
+        Assert(i == cMsrRanges || pNewRange->uFirst <= paMsrRanges[i].uLast);
+        Assert(i == 0 || pNewRange->uFirst > paMsrRanges[i - 1].uLast);
+
+        /*
+         * Adding an entirely new entry?
+         */
+        if (   i >= cMsrRanges
+            || pNewRange->uLast < paMsrRanges[i].uFirst)
+        {
+            paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+            if (!paMsrRanges)
+                return VERR_NO_MEMORY;
+            if (i < cMsrRanges)
+                memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+            paMsrRanges[i] = *pNewRange;
+            *pcMsrRanges += 1;
+        }
+        /*
+         * Replace existing entry?
+         */
+        else if (   pNewRange->uFirst == paMsrRanges[i].uFirst
+                 && pNewRange->uLast  == paMsrRanges[i].uLast)
+            paMsrRanges[i] = *pNewRange;
+        /*
+         * Splitting an existing entry?
+         */
+        else if (   pNewRange->uFirst > paMsrRanges[i].uFirst
+                 && pNewRange->uLast  < paMsrRanges[i].uLast)
+        {
+            paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 2);
+            if (!paMsrRanges)
+                return VERR_NO_MEMORY;
+            if (i < cMsrRanges)
+                memmove(&paMsrRanges[i + 2], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+            paMsrRanges[i + 1] = *pNewRange;
+            paMsrRanges[i + 2] = paMsrRanges[i];
+            paMsrRanges[i    ].uLast  = pNewRange->uFirst - 1;
+            paMsrRanges[i + 2].uFirst = pNewRange->uLast  + 1;
+            *pcMsrRanges += 2;
+        }
+        /*
+         * Complicated scenarios that can affect more than one range.
+         *
+         * The current code does not optimize memmove calls when replacing
+         * one or more existing ranges, because it's tedious to deal with and
+         * not expected to be a frequent usage scenario.
+         */
+        else
+        {
+            /* Adjust start of first match? */
+            if (   pNewRange->uFirst <= paMsrRanges[i].uFirst
+                && pNewRange->uLast  <  paMsrRanges[i].uLast)
+                paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+            else
+            {
+                /* Adjust end of first match? */
+                if (pNewRange->uFirst > paMsrRanges[i].uFirst)
+                {
+                    Assert(paMsrRanges[i].uLast >= pNewRange->uFirst);
+                    paMsrRanges[i].uLast = pNewRange->uFirst - 1;
+                    i++;
+                }
+                /* Replace the whole first match (lazy bird). */
+                else
+                {
+                    if (i + 1 < cMsrRanges)
+                        memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+                    cMsrRanges = *pcMsrRanges -= 1;
+                }
+
+                /* Do the new range affect more ranges? */
+                while (   i < cMsrRanges
+                       && pNewRange->uLast >= paMsrRanges[i].uFirst)
+                {
+                    if (pNewRange->uLast < paMsrRanges[i].uLast)
+                    {
+                        /* Adjust the start of it, then we're done. */
+                        paMsrRanges[i].uFirst = pNewRange->uLast + 1;
+                        break;
+                    }
+
+                    /* Remove it entirely. */
+                    if (i + 1 < cMsrRanges)
+                        memmove(&paMsrRanges[i], &paMsrRanges[i + 1], (cMsrRanges - i - 1) * sizeof(paMsrRanges[0]));
+                    cMsrRanges = *pcMsrRanges -= 1;
+                }
+            }
+
+            /* Now, perform a normal insertion. */
+            paMsrRanges = cpumR3MsrRangesEnsureSpace(ppaMsrRanges, cMsrRanges, 1);
+            if (!paMsrRanges)
+                return VERR_NO_MEMORY;
+            if (i < cMsrRanges)
+                memmove(&paMsrRanges[i + 1], &paMsrRanges[i], (cMsrRanges - i) * sizeof(paMsrRanges[0]));
+            paMsrRanges[i] = *pNewRange;
+            *pcMsrRanges += 1;
+        }
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo)
+{
+    CPUMDBENTRY const *pEntry = NULL;
+    int                rc;
+
+    if (!strcmp(pszName, "host"))
+    {
+        /*
+         * Create a CPU database entry for the host CPU.  This means getting
+         * the CPUID bits from the real CPU and grabbing the closest matching
+         * database entry for MSRs.
+         */
+        rc = CPUMR3CpuIdDetectUnknownLeafMethod(&pInfo->enmUnknownCpuIdMethod, &pInfo->DefCpuId);
+        if (RT_FAILURE(rc))
+            return rc;
+        rc = CPUMR3CpuIdCollectLeaves(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
+        if (RT_FAILURE(rc))
+            return rc;
+
+        /* Lookup database entry for MSRs. */
+        CPUMCPUVENDOR const enmVendor    = CPUMR3CpuIdDetectVendorEx(pInfo->paCpuIdLeavesR3[0].uEax,
+                                                                     pInfo->paCpuIdLeavesR3[0].uEbx,
+                                                                     pInfo->paCpuIdLeavesR3[0].uEcx,
+                                                                     pInfo->paCpuIdLeavesR3[0].uEdx);
+        uint32_t      const uStd1Eax     = pInfo->paCpuIdLeavesR3[1].uEax;
+        uint8_t       const uFamily      = ASMGetCpuFamily(uStd1Eax);
+        uint8_t       const uModel       = ASMGetCpuModel(uStd1Eax, enmVendor == CPUMCPUVENDOR_INTEL);
+        uint8_t       const uStepping    = ASMGetCpuStepping(uStd1Eax);
+        CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
+
+        for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+        {
+            CPUMDBENTRY const *pCur = g_apCpumDbEntries[i];
+            if ((CPUMCPUVENDOR)pCur->enmVendor == enmVendor)
+            {
+                /* Anything from the same vendor is better than nothing: */
+                if (!pEntry)
+                    pEntry = pCur;
+                /* Newer micro arch is better than an older one: */
+                else if (   pEntry->enmMicroarch < enmMicroarch
+                         && pCur->enmMicroarch   >= enmMicroarch)
+                    pEntry = pCur;
+                /* Prefer a micro arch match: */
+                else if (   pEntry->enmMicroarch != enmMicroarch
+                         && pCur->enmMicroarch   == enmMicroarch)
+                    pEntry = pCur;
+                /* If the micro arch matches, check model and stepping. Stop
+                   looping if we get an exact match. */
+                else if (   pEntry->enmMicroarch == enmMicroarch
+                         && pCur->enmMicroarch   == enmMicroarch)
+                {
+                    if (pCur->uModel == uModel)
+                    {
+                        /* Perfect match? */
+                        if (pCur->uStepping == uStepping)
+                        {
+                            pEntry = pCur;
+                            break;
+                        }
+
+                        /* Better model match? */
+                        if (pEntry->uModel != uModel)
+                            pEntry = pCur;
+                        /* The one with the closest stepping, prefering ones over earlier ones. */
+                        else if (  pCur->uStepping > uStepping
+                                 ? pCur->uStepping < pEntry->uStepping || pEntry->uStepping < uStepping
+                                 : pCur->uStepping > pEntry->uStepping)
+                            pEntry = pCur;
+                    }
+                    /* The one with the closest model, prefering later ones over earlier ones. */
+                    else if (  pCur->uModel > uModel
+                             ? pCur->uModel < pEntry->uModel || pEntry->uModel < uModel
+                             : pCur->uModel > pEntry->uModel)
+                        pEntry = pCur;
+                }
+            }
+        }
+
+        if (pEntry)
+            LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
+                    CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
+                    pEntry->pszName,  CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
+                    pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
+        else
+        {
+            pEntry = g_apCpumDbEntries[0];
+            LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'.\n",
+                    CPUMR3CpuVendorName(enmVendor), uFamily, uModel, uStepping, CPUMR3MicroarchName(enmMicroarch),
+                    pEntry->pszName));
+        }
+    }
+    else
+    {
+        /*
+         * We're supposed to be emulating a specific CPU that is included in
+         * our CPU database.  The CPUID tables needs to be copied onto the
+         * heap so the caller can modify them and so they can be freed like
+         * in the host case above.
+         */
+        for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
+            if (!strcmp(pszName, g_apCpumDbEntries[i]->pszName))
+            {
+                pEntry = g_apCpumDbEntries[i];
+                break;
+            }
+        if (!pEntry)
+        {
+            LogRel(("CPUM: Cannot locate any CPU by the name '%s'\n", pszName));
+            return VERR_CPUM_DB_CPU_NOT_FOUND;
+        }
+
+        pInfo->cCpuIdLeaves = pEntry->cCpuIdLeaves;
+        if (pEntry->cCpuIdLeaves)
+        {
+            pInfo->paCpuIdLeavesR3 = (PCPUMCPUIDLEAF)RTMemDup(pEntry->paCpuIdLeaves,
+                                                              sizeof(pEntry->paCpuIdLeaves[0]) * pEntry->cCpuIdLeaves);
+            if (!pInfo->paCpuIdLeavesR3)
+                return VERR_NO_MEMORY;
+        }
+        else
+            pInfo->paCpuIdLeavesR3 = NULL;
+
+        pInfo->enmUnknownCpuIdMethod = pEntry->enmUnknownCpuId;
+        pInfo->DefCpuId         = pEntry->DefUnknownCpuId;
+
+        LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s).\n",
+                pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor),
+                pEntry->uFamily, pEntry->uModel, pEntry->uStepping, CPUMR3MicroarchName(pEntry->enmMicroarch) ));
+    }
+
+    pInfo->fMsrMask             = pEntry->fMsrMask;
+    pInfo->iFirstExtCpuIdLeaf   = 0; /* Set by caller. */
+    pInfo->uPadding             = 0;
+    pInfo->paCpuIdLeavesR0      = NIL_RTR0PTR;
+    pInfo->paMsrRangesR0        = NIL_RTR0PTR;
+    pInfo->paCpuIdLeavesRC      = NIL_RTRCPTR;
+    pInfo->paMsrRangesRC        = NIL_RTRCPTR;
+
+    /*
+     * Copy the MSR range.
+     */
+    uint32_t        cMsrs   = 0;
+    PCPUMMSRRANGE   paMsrs  = NULL;
+
+    PCCPUMMSRRANGE  pCurMsr = pEntry->paMsrRanges;
+    uint32_t        cLeft   = pEntry->cMsrRanges;
+    while (cLeft-- > 0)
+    {
+        rc = cpumR3MsrRangesInsert(&paMsrs, &cMsrs, pCurMsr);
+        if (RT_FAILURE(rc))
+        {
+            Assert(!paMsrs); /* The above function frees this. */
+            RTMemFree(pInfo->paCpuIdLeavesR3);
+            pInfo->paCpuIdLeavesR3 = NULL;
+            return rc;
+        }
+        pCurMsr++;
+    }
+
+    pInfo->paMsrRangesR3   = paMsrs;
+    pInfo->cMsrRanges      = cMsrs;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Register statistics for the MSRs.
+ *
+ * This must not be called before the MSRs have been finalized and moved to the
+ * hyper heap.
+ *
+ * @returns VBox status code.
+ * @param   pVM                 Pointer to the cross context VM structure.
+ */
+int cpumR3MsrRegStats(PVM pVM)
+{
+    /*
+     * Global statistics.
+     */
+    PCPUM pCpum = &pVM->cpum.s;
+    STAM_REL_REG(pVM, &pCpum->cMsrReads,                STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/Reads",
+                 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
+    STAM_REL_REG(pVM, &pCpum->cMsrReadsRaiseGp,         STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/ReadsRaisingGP",
+                 STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
+    STAM_REL_REG(pVM, &pCpum->cMsrReadsUnknown,         STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/ReadsUnknown",
+                 STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
+    STAM_REL_REG(pVM, &pCpum->cMsrWrites,               STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/Writes",
+                 STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
+    STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits,  STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/WritesRaisingGP",
+                 STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
+    STAM_REL_REG(pVM, &pCpum->cMsrWritesRaiseGp,        STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/WritesToIgnoredBits",
+                 STAMUNIT_OCCURENCES, "Writing of ignored bits.");
+    STAM_REL_REG(pVM, &pCpum->cMsrWritesUnknown,        STAMTYPE_COUNTER,   "/CPUM/MSR-Totals/WritesUnknown",
+                 STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
+
+
+# ifdef VBOX_WITH_STATISTICS
+    /*
+     * Per range.
+     */
+    PCPUMMSRRANGE   paRanges = pVM->cpum.s.GuestInfo.paMsrRangesR3;
+    uint32_t        cRanges  = pVM->cpum.s.GuestInfo.cMsrRanges;
+    for (uint32_t i = 0; i < cRanges; i++)
+    {
+        char    szName[160];
+        ssize_t cchName;
+
+        if (paRanges[i].uFirst == paRanges[i].uLast)
+            cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%s",
+                                  paRanges[i].uFirst, paRanges[i].szName);
+        else
+            cchName = RTStrPrintf(szName, sizeof(szName), "/CPUM/MSRs/%#010x-%#010x-%s",
+                                  paRanges[i].uFirst, paRanges[i].uLast, paRanges[i].szName);
+
+        RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-reads");
+        STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
+
+        RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-writes");
+        STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
+
+        RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-GPs");
+        STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
+
+        RTStrCopy(&szName[cchName], sizeof(szName) - cchName, "-ign-bits-writes");
+        STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
+    }
+# endif /* VBOX_WITH_STATISTICS */
+
+    return VINF_SUCCESS;
+}
+
+#endif /* !CPUM_DB_STANDALONE */
+
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 49893)
@@ -2398,5 +2398,5 @@
                         PGMR3ResetCpu(pVM, pVCpu);
                         TRPMR3ResetCpu(pVCpu);
-                        CPUMR3ResetCpu(pVCpu);
+                        CPUMR3ResetCpu(pVM, pVCpu);
                         EMR3ResetCpu(pVCpu);
                         HMR3ResetCpu(pVCpu);
Index: /trunk/src/VBox/VMM/VMMR3/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/PGM.cpp	(revision 49893)
@@ -2168,4 +2168,5 @@
     }
 
+    /** @todo query from CPUM. */
     pVM->pgm.s.GCPhysInvAddrMask = 0;
     for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
Index: /trunk/src/VBox/VMM/VMMR3/VM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VM.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/VM.cpp	(revision 49893)
@@ -4380,5 +4380,5 @@
     PDMR3ResetCpu(pVCpu);
     TRPMR3ResetCpu(pVCpu);
-    CPUMR3ResetCpu(pVCpu);
+    CPUMR3ResetCpu(pVM, pVCpu);
     EMR3ResetCpu(pVCpu);
     HMR3ResetCpu(pVCpu);
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 49893)
@@ -1420,5 +1420,5 @@
 
     PGMR3ResetCpu(pVM, pVCpu);
-    CPUMR3ResetCpu(pVCpu);
+    CPUMR3ResetCpu(pVM, pVCpu);
 
     return VINF_EM_WAIT_SIPI;
Index: /trunk/src/VBox/VMM/VMMR3/VMMTests.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMMTests.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMR3/VMMTests.cpp	(revision 49893)
@@ -872,19 +872,50 @@
      * Do the experiments.
      */
-    uint32_t uMsr   = 0xc0011011;
-    uint64_t uValue = 0x10000;
+    uint32_t uMsr   = 0x00000277;
+    uint64_t uValue = UINT64_C(0x0007010600070106);
 #if 0
+    uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
+    uValue |= RT_BIT_64(13);
     rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
                      RCPtrValues, RCPtrValues + sizeof(uint64_t));
     RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
              uMsr, pauValues[0], uValue, pauValues[1], rc);
-#endif
+#elif 1
+    const uint64_t uOrgValue = uValue;
+    uint32_t       cChanges = 0;
+    for (int iBit = 63; iBit >= 58; iBit--)
+    {
+        uValue = uOrgValue & ~RT_BIT_64(iBit);
+        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
+        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
+                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
+                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
+        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
+
+        uValue = uOrgValue | RT_BIT_64(iBit);
+        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
+        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset   bit=%u -> %s\n",
+                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
+                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
+        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
+    }
+    RTPrintf("%u change(s)\n", cChanges);
+#else
+    uint64_t fWriteable = 0;
     for (uint32_t i = 0; i <= 63; i++)
     {
         uValue = RT_BIT_64(i);
+# if 0
+        if (uValue & (0x7))
+            continue;
+# endif
         rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
                          RCPtrValues, RCPtrValues + sizeof(uint64_t));
         RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
                  uMsr, pauValues[0], uValue, pauValues[1], rc);
+        if (RT_SUCCESS(rc))
+            fWriteable |= RT_BIT_64(i);
     }
 
@@ -900,4 +931,12 @@
     RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
              uMsr, pauValues[0], uValue, pauValues[1], rc);
+
+    uValue = fWriteable;
+    rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
+                     RCPtrValues, RCPtrValues + sizeof(uint64_t));
+    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
+             uMsr, pauValues[0], uValue, pauValues[1], rc);
+
+#endif
 
     /*
Index: /trunk/src/VBox/VMM/VMMRC/VMMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/VMMRC.cpp	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMRC/VMMRC.cpp	(revision 49893)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2013 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -49,4 +49,5 @@
 static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame);
 DECLASM(bool)   vmmRCSafeMsrRead(uint32_t uMsr, uint64_t *pu64Value);
+DECLASM(bool)   vmmRCSafeMsrWrite(uint32_t uMsr, uint64_t u64Value);
 
 
@@ -378,4 +379,46 @@
 
 
+/**
+ * Tries to write the given value to an MSR, returns the effect and restors the
+ * original value.
+ *
+ * This is called directly via VMMR3CallRC.
+ *
+ * @returns VBox status code.
+ * @param   pVM             The VM handle.
+ * @param   uMsr            The MSR to start at.
+ * @param   u32ValueLow     The low part of the value to write.
+ * @param   u32ValueHi      The high part of the value to write.
+ * @param   puValueBefore   The value before writing.
+ * @param   puValueAfter    The value read back after writing.
+ */
+extern "C" VMMRCDECL(int)
+VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi,
+                      uint64_t *puValueBefore, uint64_t *puValueAfter)
+{
+    AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER);
+    AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER);
+    ASMIntDisable();
+
+    int      rc           = VINF_SUCCESS;
+    uint64_t uValueBefore = UINT64_MAX;
+    uint64_t uValueAfter  = UINT64_MAX;
+    if (vmmRCSafeMsrRead(uMsr, &uValueBefore))
+    {
+        if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi)))
+            rc = VERR_WRITE_PROTECT;
+        if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc))
+            rc = VERR_READ_ERROR;
+        vmmRCSafeMsrWrite(uMsr, uValueBefore);
+    }
+    else
+        rc = VERR_ACCESS_DENIED;
+
+    *puValueBefore = uValueBefore;
+    *puValueAfter  = uValueAfter;
+    return rc;
+}
+
+
 
 /**
Index: /trunk/src/VBox/VMM/VMMRC/VMMRCA.asm
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/VMMRCA.asm	(revision 49892)
+++ /trunk/src/VBox/VMM/VMMRC/VMMRCA.asm	(revision 49893)
@@ -235,4 +235,8 @@
     pushf
     cli
+    push    esi
+    push    edi
+    push    ebx
+    push    ebp
 
     mov     ecx, [ebp + 8]              ; The MSR to read.
@@ -247,14 +251,17 @@
     mov     [ecx + 4], edx
 
+    mov     eax, 1
+.return:
+    pop     ebp
+    pop     ebx
+    pop     edi
+    pop     esi
     popf
-    mov     eax, 1
     leave
     ret
 
 .trapped:
-    popf
     mov     eax, 0
-    leave
-    ret
+    jmp     .return
 ENDPROC vmmRCSafeMsrRead
 
@@ -271,4 +278,8 @@
     pushf
     cli
+    push    esi
+    push    edi
+    push    ebx
+    push    ebp
 
     mov     ecx, [ebp + 8]              ; The MSR to write to.
@@ -279,14 +290,17 @@
     wrmsr
 
+    mov     eax, 1
+.return:
+    pop     ebp
+    pop     ebx
+    pop     edi
+    pop     esi
     popf
-    mov     eax, 1
     leave
     ret
 
 .trapped:
-    popf
     mov     eax, 0
-    leave
-    ret
+    jmp     .return
 ENDPROC vmmRCSafeMsrWrite
 
Index: /trunk/src/VBox/VMM/include/CPUMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.h	(revision 49892)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.h	(revision 49893)
@@ -22,4 +22,5 @@
 # include <VBox/cdefs.h>
 # include <VBox/types.h>
+# include <VBox/vmm/stam.h>
 # include <iprt/x86.h>
 #else
@@ -108,4 +109,616 @@
 #endif
 #endif
+
+
+/**
+ * MSR read functions.
+ */
+typedef enum CPUMMSRRDFN
+{
+    /** Invalid zero value. */
+    kCpumMsrRdFn_Invalid = 0,
+    /** Return the CPUMMSRRANGE::uInitOrReadValue. */
+    kCpumMsrRdFn_FixedValue,
+    /** Alias to the MSR range starting at the MSR given by
+     * CPUMMSRRANGE::uInitOrReadValue.  Must be used in pair with
+     * kCpumMsrWrFn_MsrAlias. */
+    kCpumMsrRdFn_MsrAlias,
+    /** Write only register, GP all read attempts. */
+    kCpumMsrRdFn_WriteOnly,
+
+    kCpumMsrRdFn_Ia32P5McAddr,
+    kCpumMsrRdFn_Ia32P5McType,
+    kCpumMsrRdFn_Ia32TimestampCounter,
+    kCpumMsrRdFn_Ia32ApicBase,
+    kCpumMsrRdFn_Ia32FeatureControl,
+    kCpumMsrRdFn_Ia32SmmMonitorCtl,
+    kCpumMsrRdFn_Ia32PmcN,
+    kCpumMsrRdFn_Ia32MonitorFilterLineSize,
+    kCpumMsrRdFn_Ia32MPerf,
+    kCpumMsrRdFn_Ia32APerf,
+    kCpumMsrRdFn_Ia32MtrrCap,               /**< Takes real CPU value for reference.  */
+    kCpumMsrRdFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
+    kCpumMsrRdFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
+    kCpumMsrRdFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
+    kCpumMsrRdFn_Ia32MtrrDefType,
+    kCpumMsrRdFn_Ia32Pat,
+    kCpumMsrRdFn_Ia32SysEnterCs,
+    kCpumMsrRdFn_Ia32SysEnterEsp,
+    kCpumMsrRdFn_Ia32SysEnterEip,
+    kCpumMsrRdFn_Ia32McgCap,
+    kCpumMsrRdFn_Ia32McgStatus,
+    kCpumMsrRdFn_Ia32McgCtl,
+    kCpumMsrRdFn_Ia32DebugCtl,
+    kCpumMsrRdFn_Ia32SmrrPhysBase,
+    kCpumMsrRdFn_Ia32SmrrPhysMask,
+    kCpumMsrRdFn_Ia32PlatformDcaCap,
+    kCpumMsrRdFn_Ia32CpuDcaCap,
+    kCpumMsrRdFn_Ia32Dca0Cap,
+    kCpumMsrRdFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
+    kCpumMsrRdFn_Ia32PerfStatus,            /**< Range value returned. */
+    kCpumMsrRdFn_Ia32PerfCtl,               /**< Range value returned. */
+    kCpumMsrRdFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
+    kCpumMsrRdFn_Ia32PerfCapabilities,      /**< Takes reference value. */
+    kCpumMsrRdFn_Ia32FixedCtrCtrl,
+    kCpumMsrRdFn_Ia32PerfGlobalStatus,      /**< Takes reference value. */
+    kCpumMsrRdFn_Ia32PerfGlobalCtrl,
+    kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
+    kCpumMsrRdFn_Ia32PebsEnable,
+    kCpumMsrRdFn_Ia32ClockModulation,       /**< Range value returned. */
+    kCpumMsrRdFn_Ia32ThermInterrupt,        /**< Range value returned. */
+    kCpumMsrRdFn_Ia32ThermStatus,           /**< Range value returned. */
+    kCpumMsrRdFn_Ia32Therm2Ctl,             /**< Range value returned. */
+    kCpumMsrRdFn_Ia32MiscEnable,            /**< Range value returned. */
+    kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
+    kCpumMsrRdFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
+    kCpumMsrRdFn_Ia32DsArea,
+    kCpumMsrRdFn_Ia32TscDeadline,
+    kCpumMsrRdFn_Ia32X2ApicN,
+    kCpumMsrRdFn_Ia32VmxBase,               /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxPinbasedCtls,       /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxProcbasedCtls,      /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxExitCtls,           /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxEntryCtls,          /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxMisc,               /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxCr0Fixed0,          /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxCr0Fixed1,          /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxCr4Fixed0,          /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxCr4Fixed1,          /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxVmcsEnum,           /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxProcBasedCtls2,     /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxEptVpidCap,         /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxTruePinbasedCtls,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls,  /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxTrueExitCtls,       /**< Takes real value as reference. */
+    kCpumMsrRdFn_Ia32VmxTrueEntryCtls,      /**< Takes real value as reference. */
+
+    kCpumMsrRdFn_Amd64Efer,
+    kCpumMsrRdFn_Amd64SyscallTarget,
+    kCpumMsrRdFn_Amd64LongSyscallTarget,
+    kCpumMsrRdFn_Amd64CompSyscallTarget,
+    kCpumMsrRdFn_Amd64SyscallFlagMask,
+    kCpumMsrRdFn_Amd64FsBase,
+    kCpumMsrRdFn_Amd64GsBase,
+    kCpumMsrRdFn_Amd64KernelGsBase,
+    kCpumMsrRdFn_Amd64TscAux,
+
+    kCpumMsrRdFn_IntelEblCrPowerOn,
+    kCpumMsrRdFn_IntelPlatformInfo100MHz,
+    kCpumMsrRdFn_IntelPlatformInfo133MHz,
+    kCpumMsrRdFn_IntelPkgCStConfigControl,
+    kCpumMsrRdFn_IntelPmgIoCaptureBase,
+    kCpumMsrRdFn_IntelLastBranchFromToN,
+    kCpumMsrRdFn_IntelLastBranchFromN,
+    kCpumMsrRdFn_IntelLastBranchToN,
+    kCpumMsrRdFn_IntelLastBranchTos,
+    kCpumMsrRdFn_IntelBblCrCtl,
+    kCpumMsrRdFn_IntelBblCrCtl3,
+    kCpumMsrRdFn_IntelI7TemperatureTarget,  /**< Range value returned. */
+    kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
+    kCpumMsrRdFn_IntelI7MiscPwrMgmt,
+    kCpumMsrRdFn_IntelP6CrN,
+    kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
+    kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
+    kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
+    kCpumMsrRdFn_IntelI7SandyAesNiCtl,
+    kCpumMsrRdFn_IntelI7TurboRatioLimit,    /**< Returns range value. */
+    kCpumMsrRdFn_IntelI7LbrSelect,
+    kCpumMsrRdFn_IntelI7SandyErrorControl,
+    kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
+    kCpumMsrRdFn_IntelI7PowerCtl,
+    kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
+    kCpumMsrRdFn_IntelI7PebsLdLat,
+    kCpumMsrRdFn_IntelI7PkgCnResidencyN,     /**< Takes C-state number. */
+    kCpumMsrRdFn_IntelI7CoreCnResidencyN,    /**< Takes C-state number. */
+    kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7SandyVrMiscConfig,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7SandyRaplPowerUnit,  /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN,     /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPkgPowerLimit,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPkgPerfStatus,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPkgPowerInfo,    /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplDramPowerLimit,  /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplDramPerfStatus,  /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplDramPowerInfo,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp0PowerLimit,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp0Policy,       /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp0PerfStatus,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp1PowerLimit,   /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
+    kCpumMsrRdFn_IntelI7RaplPp1Policy,       /**< Takes real value as reference. */
+
+    kCpumMsrRdFn_P6LastBranchFromIp,
+    kCpumMsrRdFn_P6LastBranchToIp,
+    kCpumMsrRdFn_P6LastIntFromIp,
+    kCpumMsrRdFn_P6LastIntToIp,
+
+    kCpumMsrRdFn_AmdFam15hTscRate,
+    kCpumMsrRdFn_AmdFam15hLwpCfg,
+    kCpumMsrRdFn_AmdFam15hLwpCbAddr,
+    kCpumMsrRdFn_AmdFam10hMc4MiscN,
+    kCpumMsrRdFn_AmdK8PerfCtlN,
+    kCpumMsrRdFn_AmdK8PerfCtrN,
+    kCpumMsrRdFn_AmdK8SysCfg,               /**< Range value returned. */
+    kCpumMsrRdFn_AmdK8HwCr,
+    kCpumMsrRdFn_AmdK8IorrBaseN,
+    kCpumMsrRdFn_AmdK8IorrMaskN,
+    kCpumMsrRdFn_AmdK8TopOfMemN,
+    kCpumMsrRdFn_AmdK8NbCfg1,
+    kCpumMsrRdFn_AmdK8McXcptRedir,
+    kCpumMsrRdFn_AmdK8CpuNameN,
+    kCpumMsrRdFn_AmdK8HwThermalCtrl,        /**< Range value returned. */
+    kCpumMsrRdFn_AmdK8SwThermalCtrl,
+    kCpumMsrRdFn_AmdK8McCtlMaskN,
+    kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
+    kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
+    kCpumMsrRdFn_AmdK8IntPendingMessage,
+    kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
+    kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
+    kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
+    kCpumMsrRdFn_AmdFam10hPStateCurLimit,   /**< Returns range value. */
+    kCpumMsrRdFn_AmdFam10hPStateControl,    /**< Returns range value. */
+    kCpumMsrRdFn_AmdFam10hPStateStatus,     /**< Returns range value. */
+    kCpumMsrRdFn_AmdFam10hPStateN,          /**< Returns range value. This isn't an register index! */
+    kCpumMsrRdFn_AmdFam10hCofVidControl,    /**< Returns range value. */
+    kCpumMsrRdFn_AmdFam10hCofVidStatus,     /**< Returns range value. */
+    kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
+    kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
+    kCpumMsrRdFn_AmdK8SmmBase,
+    kCpumMsrRdFn_AmdK8SmmAddr,
+    kCpumMsrRdFn_AmdK8SmmMask,
+    kCpumMsrRdFn_AmdK8VmCr,
+    kCpumMsrRdFn_AmdK8IgnNe,
+    kCpumMsrRdFn_AmdK8SmmCtl,
+    kCpumMsrRdFn_AmdK8VmHSavePa,
+    kCpumMsrRdFn_AmdFam10hVmLockKey,
+    kCpumMsrRdFn_AmdFam10hSmmLockKey,
+    kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
+    kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
+    kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
+    kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
+    kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
+    kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
+    kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
+    kCpumMsrRdFn_AmdK7MicrocodeCtl,         /**< Returns range value. */
+    kCpumMsrRdFn_AmdK7ClusterIdMaybe,       /**< Returns range value. */
+    kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
+    kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
+    kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
+    kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
+    kCpumMsrRdFn_AmdK7DebugStatusMaybe,
+    kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
+    kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
+    kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
+    kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
+    kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
+    kCpumMsrRdFn_AmdK7NodeId,
+    kCpumMsrRdFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
+    kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
+    kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
+    kCpumMsrRdFn_AmdK7LoadStoreCfg,
+    kCpumMsrRdFn_AmdK7InstrCacheCfg,
+    kCpumMsrRdFn_AmdK7DataCacheCfg,
+    kCpumMsrRdFn_AmdK7BusUnitCfg,
+    kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
+    kCpumMsrRdFn_AmdFam15hFpuCfg,
+    kCpumMsrRdFn_AmdFam15hDecoderCfg,
+    kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
+    kCpumMsrRdFn_AmdFam15hCombUnitCfg,
+    kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
+    kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
+    kCpumMsrRdFn_AmdFam15hExecUnitCfg,
+    kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
+    kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
+    kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
+    kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
+    kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
+    kCpumMsrRdFn_AmdFam10hIbsOpRip,
+    kCpumMsrRdFn_AmdFam10hIbsOpData,
+    kCpumMsrRdFn_AmdFam10hIbsOpData2,
+    kCpumMsrRdFn_AmdFam10hIbsOpData3,
+    kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
+    kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
+    kCpumMsrRdFn_AmdFam10hIbsCtl,
+    kCpumMsrRdFn_AmdFam14hIbsBrTarget,
+
+    /** End of valid MSR read function indexes. */
+    kCpumMsrRdFn_End
+} CPUMMSRRDFN;
+
+/**
+ * MSR write functions.
+ */
+typedef enum CPUMMSRWRFN
+{
+    /** Invalid zero value. */
+    kCpumMsrWrFn_Invalid = 0,
+    /** Writes are ignored, the fWrGpMask is observed though. */
+    kCpumMsrWrFn_IgnoreWrite,
+    /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
+    kCpumMsrWrFn_ReadOnly,
+    /** Alias to the MSR range starting at the MSR given by
+     * CPUMMSRRANGE::uInitOrReadValue.  Must be used in pair with
+     * kCpumMsrRdFn_MsrAlias. */
+    kCpumMsrWrFn_MsrAlias,
+
+    kCpumMsrWrFn_Ia32P5McAddr,
+    kCpumMsrWrFn_Ia32P5McType,
+    kCpumMsrWrFn_Ia32TimestampCounter,
+    kCpumMsrWrFn_Ia32ApicBase,
+    kCpumMsrWrFn_Ia32FeatureControl,
+    kCpumMsrWrFn_Ia32BiosUpdateTrigger,
+    kCpumMsrWrFn_Ia32SmmMonitorCtl,
+    kCpumMsrWrFn_Ia32PmcN,
+    kCpumMsrWrFn_Ia32MonitorFilterLineSize,
+    kCpumMsrWrFn_Ia32MPerf,
+    kCpumMsrWrFn_Ia32APerf,
+    kCpumMsrWrFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
+    kCpumMsrWrFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
+    kCpumMsrWrFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
+    kCpumMsrWrFn_Ia32MtrrDefType,
+    kCpumMsrWrFn_Ia32Pat,
+    kCpumMsrWrFn_Ia32SysEnterCs,
+    kCpumMsrWrFn_Ia32SysEnterEsp,
+    kCpumMsrWrFn_Ia32SysEnterEip,
+    kCpumMsrWrFn_Ia32McgStatus,
+    kCpumMsrWrFn_Ia32McgCtl,
+    kCpumMsrWrFn_Ia32DebugCtl,
+    kCpumMsrWrFn_Ia32SmrrPhysBase,
+    kCpumMsrWrFn_Ia32SmrrPhysMask,
+    kCpumMsrWrFn_Ia32PlatformDcaCap,
+    kCpumMsrWrFn_Ia32Dca0Cap,
+    kCpumMsrWrFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
+    kCpumMsrWrFn_Ia32PerfCtl,
+    kCpumMsrWrFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
+    kCpumMsrWrFn_Ia32PerfCapabilities,
+    kCpumMsrWrFn_Ia32FixedCtrCtrl,
+    kCpumMsrWrFn_Ia32PerfGlobalStatus,
+    kCpumMsrWrFn_Ia32PerfGlobalCtrl,
+    kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
+    kCpumMsrWrFn_Ia32PebsEnable,
+    kCpumMsrWrFn_Ia32ClockModulation,
+    kCpumMsrWrFn_Ia32ThermInterrupt,
+    kCpumMsrWrFn_Ia32ThermStatus,
+    kCpumMsrWrFn_Ia32Therm2Ctl,
+    kCpumMsrWrFn_Ia32MiscEnable,
+    kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
+    kCpumMsrWrFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
+    kCpumMsrWrFn_Ia32DsArea,
+    kCpumMsrWrFn_Ia32TscDeadline,
+    kCpumMsrWrFn_Ia32X2ApicN,
+
+    kCpumMsrWrFn_Amd64Efer,
+    kCpumMsrWrFn_Amd64SyscallTarget,
+    kCpumMsrWrFn_Amd64LongSyscallTarget,
+    kCpumMsrWrFn_Amd64CompSyscallTarget,
+    kCpumMsrWrFn_Amd64SyscallFlagMask,
+    kCpumMsrWrFn_Amd64FsBase,
+    kCpumMsrWrFn_Amd64GsBase,
+    kCpumMsrWrFn_Amd64KernelGsBase,
+    kCpumMsrWrFn_Amd64TscAux,
+    kCpumMsrWrFn_IntelEblCrPowerOn,
+    kCpumMsrWrFn_IntelPkgCStConfigControl,
+    kCpumMsrWrFn_IntelPmgIoCaptureBase,
+    kCpumMsrWrFn_IntelLastBranchFromToN,
+    kCpumMsrWrFn_IntelLastBranchFromN,
+    kCpumMsrWrFn_IntelLastBranchToN,
+    kCpumMsrWrFn_IntelLastBranchTos,
+    kCpumMsrWrFn_IntelBblCrCtl,
+    kCpumMsrWrFn_IntelBblCrCtl3,
+    kCpumMsrWrFn_IntelI7TemperatureTarget,
+    kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
+    kCpumMsrWrFn_IntelI7MiscPwrMgmt,
+    kCpumMsrWrFn_IntelP6CrN,
+    kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
+    kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
+    kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
+    kCpumMsrWrFn_IntelI7SandyAesNiCtl,
+    kCpumMsrWrFn_IntelI7TurboRatioLimit,
+    kCpumMsrWrFn_IntelI7LbrSelect,
+    kCpumMsrWrFn_IntelI7SandyErrorControl,
+    kCpumMsrWrFn_IntelI7PowerCtl,
+    kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
+    kCpumMsrWrFn_IntelI7PebsLdLat,
+    kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
+    kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
+    kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
+    kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
+    kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
+    kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
+    kCpumMsrWrFn_IntelI7RaplPp0Policy,
+    kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
+    kCpumMsrWrFn_IntelI7RaplPp1Policy,
+
+    kCpumMsrWrFn_P6LastIntFromIp,
+    kCpumMsrWrFn_P6LastIntToIp,
+
+    kCpumMsrWrFn_AmdFam15hTscRate,
+    kCpumMsrWrFn_AmdFam15hLwpCfg,
+    kCpumMsrWrFn_AmdFam15hLwpCbAddr,
+    kCpumMsrWrFn_AmdFam10hMc4MiscN,
+    kCpumMsrWrFn_AmdK8PerfCtlN,
+    kCpumMsrWrFn_AmdK8PerfCtrN,
+    kCpumMsrWrFn_AmdK8SysCfg,
+    kCpumMsrWrFn_AmdK8HwCr,
+    kCpumMsrWrFn_AmdK8IorrBaseN,
+    kCpumMsrWrFn_AmdK8IorrMaskN,
+    kCpumMsrWrFn_AmdK8TopOfMemN,
+    kCpumMsrWrFn_AmdK8NbCfg1,
+    kCpumMsrWrFn_AmdK8McXcptRedir,
+    kCpumMsrWrFn_AmdK8CpuNameN,
+    kCpumMsrWrFn_AmdK8HwThermalCtrl,
+    kCpumMsrWrFn_AmdK8SwThermalCtrl,
+    kCpumMsrWrFn_AmdK8McCtlMaskN,
+    kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
+    kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
+    kCpumMsrWrFn_AmdK8IntPendingMessage,
+    kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
+    kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
+    kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
+    kCpumMsrWrFn_AmdFam10hPStateControl,
+    kCpumMsrWrFn_AmdFam10hPStateStatus,
+    kCpumMsrWrFn_AmdFam10hPStateN,
+    kCpumMsrWrFn_AmdFam10hCofVidControl,
+    kCpumMsrWrFn_AmdFam10hCofVidStatus,
+    kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
+    kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
+    kCpumMsrWrFn_AmdK8SmmBase,
+    kCpumMsrWrFn_AmdK8SmmAddr,
+    kCpumMsrWrFn_AmdK8SmmMask,
+    kCpumMsrWrFn_AmdK8VmCr,
+    kCpumMsrWrFn_AmdK8IgnNe,
+    kCpumMsrWrFn_AmdK8SmmCtl,
+    kCpumMsrWrFn_AmdK8VmHSavePa,
+    kCpumMsrWrFn_AmdFam10hVmLockKey,
+    kCpumMsrWrFn_AmdFam10hSmmLockKey,
+    kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
+    kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
+    kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
+    kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
+    kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
+    kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
+    kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
+    kCpumMsrWrFn_AmdK7MicrocodeCtl,
+    kCpumMsrWrFn_AmdK7ClusterIdMaybe,
+    kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
+    kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
+    kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
+    kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
+    kCpumMsrWrFn_AmdK7DebugStatusMaybe,
+    kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
+    kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
+    kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
+    kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
+    kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
+    kCpumMsrWrFn_AmdK7NodeId,
+    kCpumMsrWrFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
+    kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
+    kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
+    kCpumMsrWrFn_AmdK7LoadStoreCfg,
+    kCpumMsrWrFn_AmdK7InstrCacheCfg,
+    kCpumMsrWrFn_AmdK7DataCacheCfg,
+    kCpumMsrWrFn_AmdK7BusUnitCfg,
+    kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
+    kCpumMsrWrFn_AmdFam15hFpuCfg,
+    kCpumMsrWrFn_AmdFam15hDecoderCfg,
+    kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
+    kCpumMsrWrFn_AmdFam15hCombUnitCfg,
+    kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
+    kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
+    kCpumMsrWrFn_AmdFam15hExecUnitCfg,
+    kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
+    kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
+    kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
+    kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
+    kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
+    kCpumMsrWrFn_AmdFam10hIbsOpRip,
+    kCpumMsrWrFn_AmdFam10hIbsOpData,
+    kCpumMsrWrFn_AmdFam10hIbsOpData2,
+    kCpumMsrWrFn_AmdFam10hIbsOpData3,
+    kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
+    kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
+    kCpumMsrWrFn_AmdFam10hIbsCtl,
+    kCpumMsrWrFn_AmdFam14hIbsBrTarget,
+
+    /** End of valid MSR write function indexes. */
+    kCpumMsrWrFn_End
+} CPUMMSRWRFN;
+
+/**
+ * MSR range.
+ */
+typedef struct CPUMMSRRANGE
+{
+    /** The first MSR. [0] */
+    uint32_t    uFirst;
+    /** The last MSR. [4] */
+    uint32_t    uLast;
+    /** The read function (CPUMMSRRDFN). [8] */
+    uint16_t    enmRdFn;
+    /** The write function (CPUMMSRWRFN). [10] */
+    uint16_t    enmWrFn;
+    /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
+     * UINT16_MAX if not used by the read and write functions.  [12] */
+    uint16_t    offCpumCpu;
+    /** Reserved for future hacks. [14] */
+    uint16_t    fReserved;
+    /** The init/read value. [16]
+     * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
+     * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
+     * offset into CPUM. */
+    uint64_t    uInitOrReadValue;
+    /** The bits to ignore when writing. [24]   */
+    uint64_t    fWrIgnMask;
+    /** The bits that will cause a GP(0) when writing. [32]
+     * This is always checked prior to calling the write function.  Using
+     * UINT64_MAX effectively marks the MSR as read-only. */
+    uint64_t    fWrGpMask;
+    /** The register name, if applicable. [40] */
+    char        szName[56];
+
+#ifdef VBOX_WITH_STATISTICS
+    /** The number of reads. */
+    STAMCOUNTER cReads;
+    /** The number of writes. */
+    STAMCOUNTER cWrites;
+    /** The number of times ignored bits were written. */
+    STAMCOUNTER cIgnoredBits;
+    /** The number of GPs generated. */
+    STAMCOUNTER cGps;
+#endif
+} CPUMMSRRANGE;
+#ifdef VBOX_WITH_STATISTICS
+AssertCompileSize(CPUMMSRRANGE, 128);
+#else
+AssertCompileSize(CPUMMSRRANGE, 96);
+#endif
+/** Pointer to an MSR range. */
+typedef CPUMMSRRANGE *PCPUMMSRRANGE;
+/** Pointer to a const MSR range. */
+typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
+
+
+
+
+/**
+ * CPU features and quirks.
+ * This is mostly exploded CPUID info.
+ */
+typedef struct CPUMFEATURES
+{
+    /** The CPU vendor (CPUMCPUVENDOR). */
+    uint8_t         enmCpuVendor;
+    /** The CPU family. */
+    uint8_t         uFamily;
+    /** The CPU model. */
+    uint8_t         uModel;
+    /** The CPU stepping. */
+    uint8_t         uStepping;
+    /** The microarchitecture. */
+    CPUMMICROARCH   enmMicroarch;
+    /** The maximum physical address with of the CPU. */
+    uint8_t         cMaxPhysAddrWidth;
+    /** Alignment padding.  */
+    uint8_t         abPadding[3];
+
+    /** Supports MSRs.  */
+    uint32_t        fMsr : 1;
+    /** Supports the page size extension (4/2 MB pages). */
+    uint32_t        fPse : 1;
+    /** Supports 36-bit page size extension (4 MB pages can map memory above
+     *  4GB). */
+    uint32_t        fPse36 : 1;
+    /** Supports physical address extension (PAE).  */
+    uint32_t        fPae : 1;
+    /** Page attribute table (PAT) support (page level cache control). */
+    uint32_t        fPat : 1;
+    /** Supports the FXSAVE and FXRSTOR instructions. */
+    uint32_t        fFxSaveRstor : 1;
+    /** Intel SYSENTER/SYSEXIT support */
+    uint32_t        fSysEnter : 1;
+    /** First generation APIC. */
+    uint32_t        fApic : 1;
+    /** Second generation APIC. */
+    uint32_t        fX2Apic : 1;
+    /** Hypervisor present. */
+    uint32_t        fHypervisorPresent : 1;
+    /** MWAIT & MONITOR instructions supported. */
+    uint32_t        fMonitorMWait : 1;
+
+    /** AMD64: Supports long mode.  */
+    uint32_t        fLongMode : 1;
+    /** AMD64: SYSCALL/SYSRET support. */
+    uint32_t        fSysCall : 1;
+    /** AMD64: No-execute page table bit. */
+    uint32_t        fNoExecute : 1;
+    /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
+    uint32_t        fLahfSahf : 1;
+    /** AMD64: Supports RDTSCP. */
+    uint32_t        fRdTscP : 1;
+
+    /** Indicates that FPU instruction and data pointers may leak.
+     * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
+     * is only saved and restored if an exception is pending.   */
+    uint32_t        fLeakyFxSR : 1;
+
+    /** Alignment padding.  */
+    uint32_t        fPadding : 9;
+
+    uint64_t        auPadding[2];
+} CPUMFEATURES;
+AssertCompileSize(CPUMFEATURES, 32);
+/** Pointer to a CPU feature structure. */
+typedef CPUMFEATURES *PCPUMFEATURES;
+/** Pointer to a const CPU feature structure. */
+typedef CPUMFEATURES const *PCCPUMFEATURES;
+
+
+/**
+ * CPU info
+ */
+typedef struct CPUMINFO
+{
+    /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
+    uint32_t                    cMsrRanges;
+    /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
+     * instruction.  Older hardware has been observed to ignore higher bits. */
+    uint32_t                    fMsrMask;
+
+    /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
+    uint32_t                    cCpuIdLeaves;
+    /** The index of the first extended CPUID leaf in the array.
+     *  Set to cCpuIdLeaves if none present. */
+    uint32_t                    iFirstExtCpuIdLeaf;
+    /** How to handle unknown CPUID leaves. */
+    CPUMUKNOWNCPUID             enmUnknownCpuIdMethod;
+    /** For use with CPUMUKNOWNCPUID_DEFAULTS. */
+    CPUMCPUID                   DefCpuId;
+
+    /** Alignment padding.  */
+    uint32_t                    uPadding;
+
+    /** Pointer to the MSR ranges (ring-0 pointer). */
+    R0PTRTYPE(PCPUMMSRRANGE)    paMsrRangesR0;
+    /** Pointer to the CPUID leaves (ring-0 pointer). */
+    R0PTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesR0;
+
+    /** Pointer to the MSR ranges (ring-3 pointer). */
+    R3PTRTYPE(PCPUMMSRRANGE)    paMsrRangesR3;
+    /** Pointer to the CPUID leaves (ring-3 pointer). */
+    R3PTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesR3;
+
+    /** Pointer to the MSR ranges (raw-mode context pointer). */
+    RCPTRTYPE(PCPUMMSRRANGE)    paMsrRangesRC;
+    /** Pointer to the CPUID leaves (raw-mode context pointer). */
+    RCPTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesRC;
+} CPUMINFO;
+/** Pointer to a CPU info structure. */
+typedef CPUMINFO *PCPUMINFO;
+/** Pointer to a const CPU info structure. */
+typedef CPUMINFO const *CPCPUMINFO;
 
 
@@ -310,9 +923,4 @@
     } CPUFeaturesExt;
 
-    /** Host CPU manufacturer. */
-    CPUMCPUVENDOR           enmHostCpuVendor;
-    /** Guest CPU manufacturer. */
-    CPUMCPUVENDOR           enmGuestCpuVendor;
-
     /** CR4 mask */
     struct
@@ -322,6 +930,4 @@
     } CR4;
 
-    /** Synthetic CPU type? */
-    bool                    fSyntheticCpu;
     /** The (more) portable CPUID level.  */
     uint8_t                 u8PortableCpuIdLevel;
@@ -329,5 +935,5 @@
      * This is used to verify load order dependencies (PGM). */
     bool                    fPendingRestore;
-    uint8_t                 abPadding[HC_ARCH_BITS == 64 ? 5 : 1];
+    uint8_t                 abPadding[HC_ARCH_BITS == 64 ? 6 : 2];
 
     /** The standard set of CpuId leaves. */
@@ -345,4 +951,22 @@
     uint8_t                 abPadding2[4];
 #endif
+
+    /** Guest CPU info. */
+    CPUMINFO                GuestInfo;
+    /** Guest CPU feature information. */
+    CPUMFEATURES            GuestFeatures;
+    /** Host CPU feature information. */
+    CPUMFEATURES            HostFeatures;
+
+    /** @name MSR statistics.
+     * @{ */
+    STAMCOUNTER             cMsrWrites;
+    STAMCOUNTER             cMsrWritesToIgnoredBits;
+    STAMCOUNTER             cMsrWritesRaiseGp;
+    STAMCOUNTER             cMsrWritesUnknown;
+    STAMCOUNTER             cMsrReads;
+    STAMCOUNTER             cMsrReadsRaiseGp;
+    STAMCOUNTER             cMsrReadsUnknown;
+    /** @} */
 } CPUM;
 /** Pointer to the CPUM instance data residing in the shared VM structure. */
@@ -430,6 +1054,19 @@
 RT_C_DECLS_BEGIN
 
+PCPUMCPUIDLEAF      cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf);
+
 #ifdef IN_RING3
 int                 cpumR3DbgInit(PVM pVM);
+PCPUMCPUIDLEAF      cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
+bool                cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
+                                             PCPUMCPUID pLeagcy);
+int                 cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf);
+void                cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast);
+int                 cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures);
+int                 cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
+int                 cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
+int                 cpumR3MsrStrictInitChecks(void);
+int                 cpumR3MsrRegStats(PVM pVM);
+PCPUMMSRRANGE       cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
 #endif
 
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 49892)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 49893)
@@ -67,18 +67,14 @@
     .CPUFeaturesExt.ecx   resd    1
 
-    .enmHostCpuVendor     resd    1
-    .enmGuestCpuVendor    resd    1
-
     ; CR4 masks
     .CR4.AndMask          resd    1
     .CR4.OrMask           resd    1
     ; entered rawmode?
-    .fSyntheticCpu        resb    1
     .u8PortableCpuIdLevel resb    1
     .fPendingRestore      resb    1
 %if RTHCPTR_CB == 8
-    .abPadding            resb    5
+    .abPadding            resb    6
 %else
-    .abPadding            resb    1
+    .abPadding            resb    2
 %endif
 
@@ -93,4 +89,16 @@
     .abPadding2           resb    4
 %endif
+
+    .GuestInfo            resb    RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*10
+    .GuestFeatures        resb    32
+    .HostFeatures         resb    32
+
+    .cMsrWrites                 resq  1
+    .cMsrWritesToIgnoredBits    resq  1
+    .cMsrWritesRaiseGp          resq  1
+    .cMsrWritesUnknown          resq  1
+    .cMsrReads                  resq  1
+    .cMsrReadsRaiseGp           resq  1
+    .cMsrReadsUnknown           resq  1
 endstruc
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 49892)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 49893)
@@ -32,9 +32,6 @@
     GEN_CHECK_OFF(CPUM, CPUFeaturesExt);
     GEN_CHECK_OFF(CPUM, CPUFeaturesExt);
-    GEN_CHECK_OFF(CPUM, enmHostCpuVendor);
-    GEN_CHECK_OFF(CPUM, enmGuestCpuVendor);
     GEN_CHECK_OFF(CPUM, CR4);
 #ifndef VBOX_FOR_DTRACE_LIB
-    GEN_CHECK_OFF(CPUM, fSyntheticCpu);
     GEN_CHECK_OFF(CPUM, u8PortableCpuIdLevel);
     GEN_CHECK_OFF(CPUM, fPendingRestore);
Index: /trunk/src/VBox/VMM/tools/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/tools/Makefile.kmk	(revision 49892)
+++ /trunk/src/VBox/VMM/tools/Makefile.kmk	(revision 49893)
@@ -51,4 +51,21 @@
 
 
+#
+# CPU report program (CPUM DB).
+#
+PROGRAMS += VBoxCpuReport
+VBoxCpuReport_TEMPLATE := VBoxR3Static
+VBoxCpuReport_DEFS      = IN_VMM_R3
+VBoxCpuReport_INCS      = ../include
+VBoxCpuReport_SOURCES   = \
+	VBoxCpuReport.cpp \
+       ../VMMR3/CPUMR3CpuId.cpp
+VBoxCpuReport_LIBS      = \
+	$(PATH_STAGE_LIB)/SUPR3Static$(VBOX_SUFF_LIB) \
+       $(VBOX_LIB_RUNTIME_STATIC)
+VBoxCpuReport_LDFLAGS.darwin = \
+	-framework IOKit -framework CoreFoundation -framework CoreServices
+
+
 include $(FILE_KBUILD_SUB_FOOTER)
 
Index: /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
===================================================================
--- /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp	(revision 49893)
+++ /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp	(revision 49893)
@@ -0,0 +1,3996 @@
+/* $Id$ */
+/** @file
+ * VBoxCpuReport - Produces the basis for a CPU DB entry.
+ */
+
+/*
+ * Copyright (C) 2013 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*******************************************************************************
+*   Header Files                                                               *
+*******************************************************************************/
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/buildconfig.h>
+#include <iprt/ctype.h>
+#include <iprt/file.h>
+#include <iprt/getopt.h>
+#include <iprt/initterm.h>
+#include <iprt/message.h>
+#include <iprt/mem.h>
+#include <iprt/path.h>
+#include <iprt/string.h>
+#include <iprt/stream.h>
+#include <iprt/symlink.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include <VBox/err.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/sup.h>
+
+
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+/** Write only register. */
+#define VBCPUREPMSR_F_WRITE_ONLY      RT_BIT(0)
+
+typedef struct VBCPUREPMSR
+{
+    /** The first MSR register number. */
+    uint32_t        uMsr;
+    /** Flags (MSRREPORT_F_XXX). */
+    uint32_t        fFlags;
+    /** The value we read, unless write-only.  */
+    uint64_t        uValue;
+} VBCPUREPMSR;
+
+
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+/** The CPU vendor.  Used by the MSR code. */
+static CPUMCPUVENDOR    g_enmVendor = CPUMCPUVENDOR_INVALID;
+/** The CPU microarchitecture.  Used by the MSR code. */
+static CPUMMICROARCH    g_enmMicroarch = kCpumMicroarch_Invalid;
+/** Set if g_enmMicroarch indicates an Intel NetBurst CPU. */
+static bool             g_fIntelNetBurst = false;
+/** The report stream. */
+static PRTSTREAM        g_pReportOut;
+/** The debug stream. */
+static PRTSTREAM        g_pDebugOut;
+
+
+static void vbCpuRepDebug(const char *pszMsg, ...)
+{
+    if (g_pDebugOut)
+    {
+        va_list va;
+        va_start(va, pszMsg);
+        RTStrmPrintfV(g_pDebugOut, pszMsg, va);
+        va_end(va);
+        RTStrmFlush(g_pDebugOut);
+        RTThreadSleep(1);
+    }
+}
+
+
+static void vbCpuRepPrintf(const char *pszMsg, ...)
+{
+    va_list va;
+
+    /* Output to report file, if requested. */
+    if (g_pReportOut)
+    {
+        va_start(va, pszMsg);
+        RTStrmPrintfV(g_pReportOut, pszMsg, va);
+        va_end(va);
+        RTStrmFlush(g_pReportOut);
+    }
+
+    /* Always print a copy of the report to standard out. */
+    va_start(va, pszMsg);
+    RTStrmPrintfV(g_pStdOut, pszMsg, va);
+    va_end(va);
+    RTStrmFlush(g_pStdOut);
+}
+
+
+
+static int vbCpuRepMsrsAddOne(VBCPUREPMSR **ppaMsrs, uint32_t *pcMsrs,
+                              uint32_t uMsr, uint64_t uValue, uint32_t fFlags)
+{
+    /*
+     * Grow the array?
+     */
+    uint32_t cMsrs = *pcMsrs;
+    if ((cMsrs % 64) == 0)
+    {
+        void *pvNew = RTMemRealloc(*ppaMsrs, (cMsrs + 64) * sizeof(**ppaMsrs));
+        if (!pvNew)
+        {
+            RTMemFree(*ppaMsrs);
+            *ppaMsrs = NULL;
+            *pcMsrs  = 0;
+            return VERR_NO_MEMORY;
+        }
+        *ppaMsrs = (VBCPUREPMSR *)pvNew;
+    }
+
+    /*
+     * Add it.
+     */
+    VBCPUREPMSR *pEntry = *ppaMsrs + cMsrs;
+    pEntry->uMsr   = uMsr;
+    pEntry->fFlags = fFlags;
+    pEntry->uValue = uValue;
+    *pcMsrs = cMsrs + 1;
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Returns the max physical address width as a number of bits.
+ *
+ * @returns Bit count.
+ */
+static uint8_t vbCpuRepGetPhysAddrWidth(void)
+{
+    uint8_t  cMaxWidth;
+    uint32_t cMaxExt = ASMCpuId_EAX(0x80000000);
+    if (!ASMHasCpuId())
+        cMaxWidth = 32;
+    else if (ASMIsValidExtRange(cMaxExt)&& cMaxExt >= 0x80000008)
+        cMaxWidth = ASMCpuId_EAX(0x80000008) & 0xff;
+    else if (   ASMIsValidStdRange(ASMCpuId_EAX(0))
+             && (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PSE36))
+        cMaxWidth = 36;
+    else
+        cMaxWidth = 32;
+    return cMaxWidth;
+}
+
+
+static bool vbCpuRepSupportsPae(void)
+{
+    return ASMHasCpuId()
+        && ASMIsValidStdRange(ASMCpuId_EAX(0))
+        && (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE);
+}
+
+
+static bool vbCpuRepSupportsLongMode(void)
+{
+    return ASMHasCpuId()
+        && ASMIsValidExtRange(ASMCpuId_EAX(0x80000000))
+        && (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+}
+
+
+static bool vbCpuRepSupportsNX(void)
+{
+    return ASMHasCpuId()
+        && ASMIsValidExtRange(ASMCpuId_EAX(0x80000000))
+        && (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX);
+}
+
+
+static bool vbCpuRepSupportsX2Apic(void)
+{
+    return ASMHasCpuId()
+        && ASMIsValidStdRange(ASMCpuId_EAX(0))
+        && (ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_X2APIC);
+}
+
+
+
+static bool msrProberWrite(uint32_t uMsr, uint64_t uValue)
+{
+    bool fGp;
+    int rc = SUPR3MsrProberWrite(uMsr, NIL_RTCPUID, uValue, &fGp);
+    AssertRC(rc);
+    return RT_SUCCESS(rc) && !fGp;
+}
+
+
+static bool msrProberRead(uint32_t uMsr, uint64_t *puValue)
+{
+    *puValue = 0;
+    bool fGp;
+    int rc = SUPR3MsrProberRead(uMsr, NIL_RTCPUID, puValue, &fGp);
+    AssertRC(rc);
+    return RT_SUCCESS(rc) && !fGp;
+}
+
+
+/** Tries to modify the register by writing the original value to it. */
+static bool msrProberModifyNoChange(uint32_t uMsr)
+{
+    SUPMSRPROBERMODIFYRESULT Result;
+    int rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, UINT64_MAX, 0, &Result);
+    return RT_SUCCESS(rc)
+        && !Result.fBeforeGp
+        && !Result.fModifyGp
+        && !Result.fAfterGp
+        && !Result.fRestoreGp;
+}
+
+
+/** Tries to modify the register by writing zero to it. */
+static bool msrProberModifyZero(uint32_t uMsr)
+{
+    SUPMSRPROBERMODIFYRESULT Result;
+    int rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, 0, 0, &Result);
+    return RT_SUCCESS(rc)
+        && !Result.fBeforeGp
+        && !Result.fModifyGp
+        && !Result.fAfterGp
+        && !Result.fRestoreGp;
+}
+
+
+/**
+ * Tries to modify each bit in the MSR and see if we can make it change.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR.
+ * @param   pfIgnMask           The ignore mask to update.
+ * @param   pfGpMask            The GP mask to update.
+ * @param   fSkipMask           Mask of bits to skip.
+ */
+static int msrProberModifyBitChanges(uint32_t uMsr, uint64_t *pfIgnMask, uint64_t *pfGpMask, uint64_t fSkipMask)
+{
+    for (unsigned iBit = 0; iBit < 64; iBit++)
+    {
+        uint64_t fBitMask = RT_BIT_64(iBit);
+        if (fBitMask & fSkipMask)
+            continue;
+
+        /* Set it. */
+        SUPMSRPROBERMODIFYRESULT ResultSet;
+        int rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, ~fBitMask, fBitMask, &ResultSet);
+        if (RT_FAILURE(rc))
+            return RTMsgErrorRc(rc, "SUPR3MsrProberModify(%#x,,%#llx,%#llx,): %Rrc", uMsr, ~fBitMask, fBitMask, rc);
+
+        /* Clear it. */
+        SUPMSRPROBERMODIFYRESULT ResultClear;
+        rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, ~fBitMask, 0, &ResultClear);
+        if (RT_FAILURE(rc))
+            return RTMsgErrorRc(rc, "SUPR3MsrProberModify(%#x,,%#llx,%#llx,): %Rrc", uMsr, ~fBitMask, 0, rc);
+
+        if (ResultSet.fModifyGp || ResultClear.fModifyGp)
+            *pfGpMask |= fBitMask;
+        else if (   (   ((ResultSet.uBefore   ^ ResultSet.uAfter)   & fBitMask) == 0
+                     && !ResultSet.fBeforeGp
+                     && !ResultSet.fAfterGp)
+                 && (   ((ResultClear.uBefore ^ ResultClear.uAfter) & fBitMask) == 0
+                     && !ResultClear.fBeforeGp
+                     && !ResultClear.fAfterGp) )
+            *pfIgnMask |= fBitMask;
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Tries to modify one bit.
+ *
+ * @retval  -2 on API error.
+ * @retval  -1 on \#GP.
+ * @retval  0 if ignored.
+ * @retval  1 if it changed.
+ *
+ * @param   uMsr                The MSR.
+ * @param   iBit                The bit to try modify.
+ */
+static int msrProberModifyBit(uint32_t uMsr, unsigned iBit)
+{
+    uint64_t fBitMask = RT_BIT_64(iBit);
+
+    /* Set it. */
+    SUPMSRPROBERMODIFYRESULT ResultSet;
+    int rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, ~fBitMask, fBitMask, &ResultSet);
+    if (RT_FAILURE(rc))
+        return RTMsgErrorRc(-2, "SUPR3MsrProberModify(%#x,,%#llx,%#llx,): %Rrc", uMsr, ~fBitMask, fBitMask, rc);
+
+    /* Clear it. */
+    SUPMSRPROBERMODIFYRESULT ResultClear;
+    rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, ~fBitMask, 0, &ResultClear);
+    if (RT_FAILURE(rc))
+        return RTMsgErrorRc(-2, "SUPR3MsrProberModify(%#x,,%#llx,%#llx,): %Rrc", uMsr, ~fBitMask, 0, rc);
+
+    if (ResultSet.fModifyGp || ResultClear.fModifyGp)
+        return -1;
+
+    if (   (   ((ResultSet.uBefore   ^ ResultSet.uAfter)   & fBitMask) != 0
+            && !ResultSet.fBeforeGp
+            && !ResultSet.fAfterGp)
+        || (   ((ResultClear.uBefore ^ ResultClear.uAfter) & fBitMask) != 0
+            && !ResultClear.fBeforeGp
+            && !ResultClear.fAfterGp) )
+        return 1;
+
+    return 0;
+}
+
+
+/**
+ * Tries to do a simple AND+OR change and see if we \#GP or not.
+ *
+ * @retval  @c true if successfully modified.
+ * @retval  @c false if \#GP or other error.
+ *
+ * @param   uMsr                The MSR.
+ * @param   fAndMask            The AND mask.
+ * @param   fOrMask             The OR mask.
+ */
+static bool msrProberModifySimpleGp(uint32_t uMsr, uint64_t fAndMask, uint64_t fOrMask)
+{
+    SUPMSRPROBERMODIFYRESULT Result;
+    int rc = SUPR3MsrProberModify(uMsr, NIL_RTCPUID, fAndMask, fOrMask, &Result);
+    if (RT_FAILURE(rc))
+    {
+        RTMsgError("SUPR3MsrProberModify(%#x,,%#llx,%#llx,): %Rrc", uMsr, fAndMask, fOrMask, rc);
+        return false;
+    }
+    return !Result.fBeforeGp
+        && !Result.fModifyGp
+        && !Result.fAfterGp
+        && !Result.fRestoreGp;
+}
+
+
+
+
+/**
+ * Combination of the basic tests.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR.
+ * @param   fSkipMask           Mask of bits to skip.
+ * @param   pfReadOnly          Where to return read-only status.
+ * @param   pfIgnMask           Where to return the write ignore mask.  Need not
+ *                              be initialized.
+ * @param   pfGpMask            Where to return the write GP mask.  Need not
+ *                              be initialized.
+ */
+static int msrProberModifyBasicTests(uint32_t uMsr, uint64_t fSkipMask, bool *pfReadOnly, uint64_t *pfIgnMask, uint64_t *pfGpMask)
+{
+    if (msrProberModifyNoChange(uMsr))
+    {
+        *pfReadOnly = false;
+        *pfIgnMask  = 0;
+        *pfGpMask  = 0;
+        return msrProberModifyBitChanges(uMsr, pfIgnMask, pfGpMask, fSkipMask);
+    }
+
+    *pfReadOnly = true;
+    *pfIgnMask  = 0;
+    *pfGpMask   = UINT64_MAX;
+    return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Determines for the MSR AND mask.
+ *
+ * Older CPUs doesn't necessiarly implement all bits of the MSR register number.
+ * So, we have to approximate how many are used so we don't get an overly large
+ * and confusing set of MSRs when probing.
+ *
+ * @returns The mask.
+ */
+static uint32_t determineMsrAndMask(void)
+{
+#define VBCPUREP_MASK_TEST_MSRS     7
+    static uint32_t const s_aMsrs[VBCPUREP_MASK_TEST_MSRS] =
+    {
+        /* Try a bunch of mostly read only registers: */
+        MSR_P5_MC_TYPE, MSR_IA32_PLATFORM_ID, MSR_IA32_MTRR_CAP, MSR_IA32_MCG_CAP, MSR_IA32_CR_PAT,
+        /* Then some which aren't supposed to be present on any CPU: */
+        0x00000015, 0x00000019,
+    };
+
+    /* Get the base values. */
+    uint64_t auBaseValues[VBCPUREP_MASK_TEST_MSRS];
+    for (unsigned i = 0; i < RT_ELEMENTS(s_aMsrs); i++)
+    {
+        if (!msrProberRead(s_aMsrs[i], &auBaseValues[i]))
+            auBaseValues[i] = UINT64_MAX;
+        //vbCpuRepDebug("Base: %#x -> %#llx\n", s_aMsrs[i], auBaseValues[i]);
+    }
+
+    /* Do the probing. */
+    unsigned iBit;
+    for (iBit = 31; iBit > 8; iBit--)
+    {
+        uint64_t fMsrOrMask = RT_BIT_64(iBit);
+        for (unsigned iTest = 0; iTest <= 64 && fMsrOrMask < UINT32_MAX; iTest++)
+        {
+            for (unsigned i = 0; i < RT_ELEMENTS(s_aMsrs); i++)
+            {
+                uint64_t uValue;
+                if (!msrProberRead(s_aMsrs[i] | fMsrOrMask, &uValue))
+                    uValue = UINT64_MAX;
+                if (uValue != auBaseValues[i])
+                {
+                    uint32_t fMsrMask = iBit >= 31 ? UINT32_MAX : RT_BIT_32(iBit + 1) - 1;
+                    vbCpuRepDebug("MSR AND mask: quit on iBit=%u uMsr=%#x (%#x) %llx != %llx => fMsrMask=%#x\n",
+                                  iBit, s_aMsrs[i] | (uint32_t)fMsrOrMask, s_aMsrs[i], uValue, auBaseValues[i], fMsrMask);
+                    return fMsrMask;
+                }
+            }
+
+            /* Advance. */
+            if (iBit <= 6)
+                fMsrOrMask += RT_BIT_64(iBit);
+            else if (iBit <= 11)
+                fMsrOrMask += RT_BIT_64(iBit) * 33;
+            else if (iBit <= 16)
+                fMsrOrMask += RT_BIT_64(iBit) * 1025;
+            else if (iBit <= 22)
+                fMsrOrMask += RT_BIT_64(iBit) * 65537;
+            else
+                fMsrOrMask += RT_BIT_64(iBit) * 262145;
+        }
+    }
+
+    uint32_t fMsrMask = RT_BIT_32(iBit + 1) - 1;
+    vbCpuRepDebug("MSR AND mask: less that %u bits that matters?!? => fMsrMask=%#x\n", iBit + 1, fMsrMask);
+    return fMsrMask;
+}
+
+
+static int findMsrs(VBCPUREPMSR **ppaMsrs, uint32_t *pcMsrs, uint32_t fMsrMask)
+{
+    /*
+     * Gather them.
+     */
+    static struct { uint32_t uFirst, cMsrs; } const s_aRanges[] =
+    {
+        { 0x00000000, 0x00042000 },
+        { 0x10000000, 0x00001000 },
+        { 0x20000000, 0x00001000 },
+        { 0x40000000, 0x00012000 },
+        { 0x80000000, 0x00012000 },
+        { 0xc0000000, 0x00022000 }, /* Had some trouble here on solaris with the tstVMM setup. */
+    };
+
+    *pcMsrs  = 0;
+    *ppaMsrs = NULL;
+
+    for (unsigned i = 0; i < RT_ELEMENTS(s_aRanges); i++)
+    {
+        uint32_t uMsr  = s_aRanges[i].uFirst;
+        if ((uMsr & fMsrMask) != uMsr)
+            continue;
+        uint32_t cLeft = s_aRanges[i].cMsrs;
+        while (cLeft-- > 0 && (uMsr & fMsrMask) == uMsr)
+        {
+            if ((uMsr & 0xfff) == 0)
+            {
+                vbCpuRepDebug("testing %#x...\n", uMsr);
+                RTThreadSleep(22);
+            }
+#if 0
+            else if (uMsr >= 0xc0011008 && uMsr <= 0xc0011100)
+            {
+                vbCpuRepDebug("testing %#x...\n", uMsr);
+                RTThreadSleep(22);
+            }
+#endif
+
+            /* Read probing normally does it. */
+            uint64_t uValue = 0;
+            bool     fGp    = true;
+            int rc = SUPR3MsrProberRead(uMsr, NIL_RTCPUID, &uValue, &fGp);
+            if (RT_FAILURE(rc))
+            {
+                RTMemFree(*ppaMsrs);
+                *ppaMsrs = NULL;
+                return RTMsgErrorRc(rc, "SUPR3MsrProberRead failed on %#x: %Rrc\n", uMsr, rc);
+            }
+
+            uint32_t fFlags;
+            if (!fGp)
+                fFlags = 0;
+            else
+            {
+                /* Is it a write only register? */
+                fGp = true;
+                rc = SUPR3MsrProberWrite(uMsr, NIL_RTCPUID, 0, &fGp);
+                if (RT_FAILURE(rc))
+                {
+                    RTMemFree(*ppaMsrs);
+                    *ppaMsrs = NULL;
+                    return RTMsgErrorRc(rc, "SUPR3MsrProberWrite failed on %#x: %Rrc\n", uMsr, rc);
+                }
+                uValue = 0;
+                fFlags = VBCPUREPMSR_F_WRITE_ONLY;
+
+                /*
+                 * Tweaks.  On Intel CPUs we've got trouble detecting
+                 * IA32_BIOS_UPDT_TRIG (0x00000079), so we have to add it manually here.
+                 */
+                if (   uMsr == 0x00000079
+                    && fGp
+                    && g_enmMicroarch >= kCpumMicroarch_Intel_P6_Core_Atom_First
+                    && g_enmMicroarch <= kCpumMicroarch_Intel_End)
+                    fGp = false;
+            }
+
+            if (!fGp)
+            {
+                /* Add it. */
+                rc = vbCpuRepMsrsAddOne(ppaMsrs, pcMsrs, uMsr, uValue, fFlags);
+                if (RT_FAILURE(rc))
+                    return RTMsgErrorRc(rc, "Out of memory (uMsr=%#x).\n", uMsr);
+                vbCpuRepDebug("%#010x: uValue=%#llx fFlags=%#x\n", uMsr, uValue, fFlags);
+            }
+        }
+    }
+
+    return VINF_SUCCESS;
+}
+
+/**
+ * Get the name of the specified MSR, if we know it and can handle it.
+ *
+ * Do _NOT_ add any new names here without ALSO at the SAME TIME making sure it
+ * is handled correctly by the PROBING CODE and REPORTED correctly!!
+ *
+ * @returns Pointer to name if handled, NULL if not yet explored.
+ * @param   uMsr                The MSR in question.
+ */
+static const char *getMsrNameHandled(uint32_t uMsr)
+{
+    /** @todo figure out where NCU_EVENT_CORE_MASK might be... */
+    switch (uMsr)
+    {
+        case 0x00000000: return "IA32_P5_MC_ADDR";
+        case 0x00000001: return "IA32_P5_MC_TYPE";
+        case 0x00000006:
+            if (g_enmMicroarch >= kCpumMicroarch_Intel_First && g_enmMicroarch <= kCpumMicroarch_Intel_P6_Core_Atom_First)
+                return NULL; /* TR4 / cache tag on Pentium, but that's for later. */
+            return "IA32_MONITOR_FILTER_LINE_SIZE";
+        //case 0x0000000e: return "P?_TR12"; /* K6-III docs */
+        case 0x00000010: return "IA32_TIME_STAMP_COUNTER";
+        case 0x00000017: return "IA32_PLATFORM_ID";
+        case 0x00000018: return "P6_UNK_0000_0018"; /* P6_M_Dothan. */
+        case 0x0000001b: return "IA32_APIC_BASE";
+        case 0x0000002a: return "EBL_CR_POWERON";
+        case 0x0000002e: return "I7_UNK_0000_002e"; /* SandyBridge, IvyBridge. */
+        case 0x0000002f: return "P6_UNK_0000_002f"; /* P6_M_Dothan. */
+        case 0x00000032: return "P6_UNK_0000_0032"; /* P6_M_Dothan. */
+        case 0x00000033: return "TEST_CTL";
+        case 0x00000034: return "P6_UNK_0000_0034"; /* P6_M_Dothan. */
+        case 0x00000035: return "P6_UNK_0000_0035"; /* P6_M_Dothan. */
+        case 0x00000036: return "I7_UNK_0000_0036"; /* SandyBridge, IvyBridge. */
+        case 0x0000003a: return "IA32_FEATURE_CONTROL";
+        case 0x0000003b: return "P6_UNK_0000_003b"; /* P6_M_Dothan. */
+        case 0x0000003e: return "I7_UNK_0000_003e"; /* SandyBridge, IvyBridge. */
+        case 0x0000003f: return "P6_UNK_0000_003f"; /* P6_M_Dothan. */
+        case 0x00000040: return "MSR_LASTBRANCH_0";
+        case 0x00000041: return "MSR_LASTBRANCH_1";
+        case 0x00000042: return "MSR_LASTBRANCH_2";
+        case 0x00000043: return "MSR_LASTBRANCH_3";
+        case 0x00000044: return "MSR_LASTBRANCH_4";
+        case 0x00000045: return "MSR_LASTBRANCH_5";
+        case 0x00000046: return "MSR_LASTBRANCH_6";
+        case 0x00000047: return "MSR_LASTBRANCH_7";
+        case 0x00000048: return "MSR_LASTBRANCH_8";
+        case 0x00000049: return "MSR_LASTBRANCH_9";
+        case 0x0000004a: return "P6_UNK_0000_004a"; /* P6_M_Dothan. */
+        case 0x0000004b: return "P6_UNK_0000_004b"; /* P6_M_Dothan. */
+        case 0x0000004c: return "P6_UNK_0000_004c"; /* P6_M_Dothan. */
+        case 0x0000004d: return "P6_UNK_0000_004d"; /* P6_M_Dothan. */
+        case 0x0000004e: return "P6_UNK_0000_004e"; /* P6_M_Dothan. */
+        case 0x0000004f: return "P6_UNK_0000_004f"; /* P6_M_Dothan. */
+        case 0x00000050: return "P6_UNK_0000_0050"; /* P6_M_Dothan. */
+        case 0x00000051: return "P6_UNK_0000_0051"; /* P6_M_Dothan. */
+        case 0x00000052: return "P6_UNK_0000_0052"; /* P6_M_Dothan. */
+        case 0x00000053: return "P6_UNK_0000_0053"; /* P6_M_Dothan. */
+        case 0x00000054: return "P6_UNK_0000_0054"; /* P6_M_Dothan. */
+        case 0x0000006c: return "P6_UNK_0000_006c"; /* P6_M_Dothan. */
+        case 0x0000006d: return "P6_UNK_0000_006d"; /* P6_M_Dothan. */
+        case 0x0000006e: return "P6_UNK_0000_006e"; /* P6_M_Dothan. */
+        case 0x0000006f: return "P6_UNK_0000_006f"; /* P6_M_Dothan. */
+        case 0x00000079: return "IA32_BIOS_UPDT_TRIG";
+        case 0x00000088: return "BBL_CR_D0";
+        case 0x00000089: return "BBL_CR_D1";
+        case 0x0000008a: return "BBL_CR_D2";
+        case 0x0000008b: return "BBL_CR_D3|BIOS_SIGN";
+        case 0x0000008c: return "P6_UNK_0000_008c"; /* P6_M_Dothan. */
+        case 0x0000008d: return "P6_UNK_0000_008d"; /* P6_M_Dothan. */
+        case 0x0000008e: return "P6_UNK_0000_008e"; /* P6_M_Dothan. */
+        case 0x0000008f: return "P6_UNK_0000_008f"; /* P6_M_Dothan. */
+        case 0x00000090: return "P6_UNK_0000_0090"; /* P6_M_Dothan. */
+        case 0x0000009b: return "IA32_SMM_MONITOR_CTL";
+        case 0x000000ae: return "P6_UNK_0000_00ae"; /* P6_M_Dothan. */
+        case 0x000000c1: return "IA32_PMC0";
+        case 0x000000c2: return "IA32_PMC1";
+        case 0x000000c3: return "IA32_PMC2";
+        case 0x000000c4: return "IA32_PMC3";
+        /* PMC4+ first seen on SandyBridge. The earlier cut off is just to be
+           on the safe side as we must avoid P6_M_Dothan and possibly others. */
+        case 0x000000c5: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First ? "IA32_PMC4" : NULL;
+        case 0x000000c6: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First ? "IA32_PMC5" : NULL;
+        case 0x000000c7: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First ? "IA32_PMC6" : "P6_UNK_0000_00c7"; /* P6_M_Dothan. */
+        case 0x000000c8: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First ? "IA32_PMC7" : NULL;
+        case 0x000000cd: return "P6_UNK_0000_00cd"; /* P6_M_Dothan. */
+        case 0x000000ce: return "P6_UNK_0000_00ce"; /* P6_M_Dothan. */
+        case 0x000000e2: return "MSR_PKG_CST_CONFIG_CONTROL";
+        case 0x000000e4: return "MSR_PMG_IO_CAPTURE_BASE";
+        case 0x000000e7: return "IA32_MPERF";
+        case 0x000000e8: return "IA32_APERF";
+        case 0x000000fe: return "IA32_MTRRCAP";
+        case 0x00000102: return "I7_IB_UNK_0000_0102"; /* IvyBridge. */
+        case 0x00000103: return "I7_IB_UNK_0000_0103"; /* IvyBridge. */
+        case 0x00000104: return "I7_IB_UNK_0000_0104"; /* IvyBridge. */
+        case 0x00000116: return "BBL_CR_ADDR";
+        case 0x00000118: return "BBL_CR_DECC";
+        case 0x00000119: return "BBL_CR_CTL";
+        case 0x0000011a: return "BBL_CR_TRIG";
+        case 0x0000011b: return "P6_UNK_0000_011b"; /* P6_M_Dothan. */
+        case 0x0000011e: return "BBL_CR_CTL3";
+        case 0x00000130: return g_enmMicroarch == kCpumMicroarch_Intel_Core7_Westmere
+                             || g_enmMicroarch == kCpumMicroarch_Intel_Core7_Nehalem
+                              ? "CPUID1_FEATURE_MASK" : NULL;
+        case 0x00000131: return g_enmMicroarch == kCpumMicroarch_Intel_Core7_Westmere
+                             || g_enmMicroarch == kCpumMicroarch_Intel_Core7_Nehalem
+                              ? "CPUID80000001_FEATURE_MASK" : "P6_UNK_0000_0131" /* P6_M_Dothan. */;
+        case 0x00000132: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "CPUID1_FEATURE_MASK" : NULL;
+        case 0x00000133: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "CPUIDD_01_FEATURE_MASK" : NULL;
+        case 0x00000134: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "CPUID80000001_FEATURE_MASK" : NULL;
+        case 0x0000013c: return "I7_SB_AES_NI_CTL"; /* SandyBridge. Bit 0 is lock bit, bit 1 disables AES-NI. */
+        case 0x00000140: return "I7_IB_UNK_0000_0140"; /* IvyBridge. */
+        case 0x00000142: return "I7_IB_UNK_0000_0142"; /* IvyBridge. */
+        case 0x0000014e: return "P6_UNK_0000_014e"; /* P6_M_Dothan. */
+        case 0x0000014f: return "P6_UNK_0000_014f"; /* P6_M_Dothan. */
+        case 0x00000150: return "P6_UNK_0000_0150"; /* P6_M_Dothan. */
+        case 0x00000151: return "P6_UNK_0000_0151"; /* P6_M_Dothan. */
+        case 0x00000154: return "P6_UNK_0000_0154"; /* P6_M_Dothan. */
+        case 0x0000015b: return "P6_UNK_0000_015b"; /* P6_M_Dothan. */
+        case 0x00000174: return "IA32_SYSENTER_CS";
+        case 0x00000175: return "IA32_SYSENTER_ESP";
+        case 0x00000176: return "IA32_SYSENTER_EIP";
+        case 0x00000179: return "IA32_MCG_CAP";
+        case 0x0000017a: return "IA32_MCG_STATUS";
+        case 0x0000017b: return "IA32_MCG_CTL";
+        case 0x0000017f: return "I7_SB_ERROR_CONTROL"; /* SandyBridge. */
+        case 0x00000180: return g_fIntelNetBurst ? "MSR_MCG_RAX"       : NULL;
+        case 0x00000181: return g_fIntelNetBurst ? "MSR_MCG_RBX"       : NULL;
+        case 0x00000182: return g_fIntelNetBurst ? "MSR_MCG_RCX"       : NULL;
+        case 0x00000183: return g_fIntelNetBurst ? "MSR_MCG_RDX"       : NULL;
+        case 0x00000184: return g_fIntelNetBurst ? "MSR_MCG_RSI"       : NULL;
+        case 0x00000185: return g_fIntelNetBurst ? "MSR_MCG_RDI"       : NULL;
+        case 0x00000186: return g_fIntelNetBurst ? "MSR_MCG_RBP"       : "IA32_PERFEVTSEL0";
+        case 0x00000187: return g_fIntelNetBurst ? "MSR_MCG_RSP"       : "IA32_PERFEVTSEL1";
+        case 0x00000188: return g_fIntelNetBurst ? "MSR_MCG_RFLAGS"    : "IA32_PERFEVTSEL2";
+        case 0x00000189: return g_fIntelNetBurst ? "MSR_MCG_RIP"       : "IA32_PERFEVTSEL3";
+        case 0x0000018a: return g_fIntelNetBurst ? "MSR_MCG_MISC"      : "IA32_PERFEVTSEL4";
+        case 0x0000018b: return g_fIntelNetBurst ? "MSR_MCG_RESERVED1" : "IA32_PERFEVTSEL5";
+        case 0x0000018c: return g_fIntelNetBurst ? "MSR_MCG_RESERVED2" : "IA32_PERFEVTSEL6";
+        case 0x0000018d: return g_fIntelNetBurst ? "MSR_MCG_RESERVED3" : "IA32_PERFEVTSEL7";
+        case 0x0000018e: return g_fIntelNetBurst ? "MSR_MCG_RESERVED4" : "IA32_PERFEVTSEL8";
+        case 0x0000018f: return g_fIntelNetBurst ? "MSR_MCG_RESERVED5" : "IA32_PERFEVTSEL9";
+        case 0x00000190: return g_fIntelNetBurst ? "MSR_MCG_R8"        : NULL;
+        case 0x00000191: return g_fIntelNetBurst ? "MSR_MCG_R9"        : NULL;
+        case 0x00000192: return g_fIntelNetBurst ? "MSR_MCG_R10"       : NULL;
+        case 0x00000193: return g_fIntelNetBurst ? "MSR_MCG_R11"       : NULL;
+        case 0x00000194: return g_fIntelNetBurst ? "MSR_MCG_R12"       : "CLOCK_FLEX_MAX";
+        case 0x00000195: return g_fIntelNetBurst ? "MSR_MCG_R13"       : NULL;
+        case 0x00000196: return g_fIntelNetBurst ? "MSR_MCG_R14"       : NULL;
+        case 0x00000197: return g_fIntelNetBurst ? "MSR_MCG_R15"       : NULL;
+        case 0x00000198: return "IA32_PERF_STATUS";
+        case 0x00000199: return "IA32_PERF_CTL";
+        case 0x0000019a: return "IA32_CLOCK_MODULATION";
+        case 0x0000019b: return "IA32_THERM_INTERRUPT";
+        case 0x0000019c: return "IA32_THERM_STATUS";
+        case 0x0000019d: return "IA32_THERM2_CTL";
+        case 0x0000019e: return "P6_UNK_0000_019e"; /* P6_M_Dothan. */
+        case 0x0000019f: return "P6_UNK_0000_019f"; /* P6_M_Dothan. */
+        case 0x000001a0: return "IA32_MISC_ENABLE";
+        case 0x000001a1: return "P6_UNK_0000_01a1"; /* P6_M_Dothan. */
+        case 0x000001a2: return "I7_MSR_TEMPERATURE_TARGET"; /* SandyBridge, IvyBridge. */
+        case 0x000001a4: return "I7_UNK_0000_01a4"; /* SandyBridge, IvyBridge. */
+        case 0x000001a6: return "I7_MSR_OFFCORE_RSP_0";
+        case 0x000001a7: return "I7_MSR_OFFCORE_RSP_1";
+        case 0x000001a8: return "I7_UNK_0000_01a8"; /* SandyBridge, IvyBridge. */
+        case 0x000001aa: return CPUMMICROARCH_IS_INTEL_CORE7(g_enmMicroarch) ? "MSR_MISC_PWR_MGMT" : "P6_PIC_SENS_CFG" /* Pentium M. */;
+        case 0x000001ad: return "I7_MSR_TURBO_RATIO_LIMIT"; /* SandyBridge+, Silvermount+ */
+        case 0x000001ae: return "P6_UNK_0000_01ae"; /* P6_M_Dothan. */
+        case 0x000001af: return "P6_UNK_0000_01af"; /* P6_M_Dothan. */
+        case 0x000001b0: return "IA32_ENERGY_PERF_BIAS";
+        case 0x000001b1: return "IA32_PACKAGE_THERM_STATUS";
+        case 0x000001b2: return "IA32_PACKAGE_THERM_INTERRUPT";
+        case 0x000001c6: return "I7_UNK_0000_01c6"; /* SandyBridge*/
+        case 0x000001c8: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_Nehalem ? "MSR_LBR_SELECT" : NULL;
+        case 0x000001c9: return    g_enmMicroarch >= kCpumMicroarch_Intel_Core_Yonah
+                                && g_enmMicroarch <= kCpumMicroarch_Intel_P6_Core_Atom_End
+                              ? "MSR_LASTBRANCH_TOS" : NULL /* Pentium M Dothan seems to have something else here. */;
+        case 0x000001d3: return "P6_UNK_0000_01d3"; /* P6_M_Dothan. */
+        case 0x000001d9: return "IA32_DEBUGCTL";
+        case 0x000001db: return "P6_LAST_BRANCH_FROM_IP"; /* Not exclusive to P6, also AMD. */
+        case 0x000001dc: return "P6_LAST_BRANCH_TO_IP";
+        case 0x000001dd: return "P6_LAST_INT_FROM_IP";
+        case 0x000001de: return "P6_LAST_INT_TO_IP";
+        case 0x000001e0: return "MSR_ROB_CR_BKUPTMPDR6";
+        case 0x000001e1: return "I7_SB_UNK_0000_01e1";
+        case 0x000001ef: return "I7_SB_UNK_0000_01ef";
+        case 0x000001f0: return "I7_VLW_CAPABILITY"; /* SandyBridge.  Bit 1 is A20M and was implemented incorrectly (AAJ49). */
+        case 0x000001f2: return "IA32_SMRR_PHYSBASE";
+        case 0x000001f3: return "IA32_SMRR_PHYSMASK";
+        case 0x000001f8: return "IA32_PLATFORM_DCA_CAP";
+        case 0x000001f9: return "IA32_CPU_DCA_CAP";
+        case 0x000001fa: return "IA32_DCA_0_CAP";
+        case 0x000001fc: return "I7_MSR_POWER_CTL";
+
+        case 0x00000200: return "IA32_MTRR_PHYS_BASE0";
+        case 0x00000202: return "IA32_MTRR_PHYS_BASE1";
+        case 0x00000204: return "IA32_MTRR_PHYS_BASE2";
+        case 0x00000206: return "IA32_MTRR_PHYS_BASE3";
+        case 0x00000208: return "IA32_MTRR_PHYS_BASE4";
+        case 0x0000020a: return "IA32_MTRR_PHYS_BASE5";
+        case 0x0000020c: return "IA32_MTRR_PHYS_BASE6";
+        case 0x0000020e: return "IA32_MTRR_PHYS_BASE7";
+        case 0x00000210: return "IA32_MTRR_PHYS_BASE8";
+        case 0x00000212: return "IA32_MTRR_PHYS_BASE9";
+        case 0x00000214: return "IA32_MTRR_PHYS_BASE10";
+        case 0x00000216: return "IA32_MTRR_PHYS_BASE11";
+        case 0x00000218: return "IA32_MTRR_PHYS_BASE12";
+        case 0x0000021a: return "IA32_MTRR_PHYS_BASE13";
+        case 0x0000021c: return "IA32_MTRR_PHYS_BASE14";
+        case 0x0000021e: return "IA32_MTRR_PHYS_BASE15";
+
+        case 0x00000201: return "IA32_MTRR_PHYS_MASK0";
+        case 0x00000203: return "IA32_MTRR_PHYS_MASK1";
+        case 0x00000205: return "IA32_MTRR_PHYS_MASK2";
+        case 0x00000207: return "IA32_MTRR_PHYS_MASK3";
+        case 0x00000209: return "IA32_MTRR_PHYS_MASK4";
+        case 0x0000020b: return "IA32_MTRR_PHYS_MASK5";
+        case 0x0000020d: return "IA32_MTRR_PHYS_MASK6";
+        case 0x0000020f: return "IA32_MTRR_PHYS_MASK7";
+        case 0x00000211: return "IA32_MTRR_PHYS_MASK8";
+        case 0x00000213: return "IA32_MTRR_PHYS_MASK9";
+        case 0x00000215: return "IA32_MTRR_PHYS_MASK10";
+        case 0x00000217: return "IA32_MTRR_PHYS_MASK11";
+        case 0x00000219: return "IA32_MTRR_PHYS_MASK12";
+        case 0x0000021b: return "IA32_MTRR_PHYS_MASK13";
+        case 0x0000021d: return "IA32_MTRR_PHYS_MASK14";
+        case 0x0000021f: return "IA32_MTRR_PHYS_MASK15";
+
+        case 0x00000250: return "IA32_MTRR_FIX64K_00000";
+        case 0x00000258: return "IA32_MTRR_FIX16K_80000";
+        case 0x00000259: return "IA32_MTRR_FIX16K_A0000";
+        case 0x00000268: return "IA32_MTRR_FIX4K_C0000";
+        case 0x00000269: return "IA32_MTRR_FIX4K_C8000";
+        case 0x0000026a: return "IA32_MTRR_FIX4K_D0000";
+        case 0x0000026b: return "IA32_MTRR_FIX4K_D8000";
+        case 0x0000026c: return "IA32_MTRR_FIX4K_E0000";
+        case 0x0000026d: return "IA32_MTRR_FIX4K_E8000";
+        case 0x0000026e: return "IA32_MTRR_FIX4K_F0000";
+        case 0x0000026f: return "IA32_MTRR_FIX4K_F8000";
+        case 0x00000277: return "IA32_PAT";
+        case 0x00000280: return "IA32_MC0_CTL2";
+        case 0x00000281: return "IA32_MC1_CTL2";
+        case 0x00000282: return "IA32_MC2_CTL2";
+        case 0x00000283: return "IA32_MC3_CTL2";
+        case 0x00000284: return "IA32_MC4_CTL2";
+        case 0x00000285: return "IA32_MC5_CTL2";
+        case 0x00000286: return "IA32_MC6_CTL2";
+        case 0x00000287: return "IA32_MC7_CTL2";
+        case 0x00000288: return "IA32_MC8_CTL2";
+        case 0x00000289: return "IA32_MC9_CTL2";
+        case 0x0000028a: return "IA32_MC10_CTL2";
+        case 0x0000028b: return "IA32_MC11_CTL2";
+        case 0x0000028c: return "IA32_MC12_CTL2";
+        case 0x0000028d: return "IA32_MC13_CTL2";
+        case 0x0000028e: return "IA32_MC14_CTL2";
+        case 0x0000028f: return "IA32_MC15_CTL2";
+        case 0x00000290: return "IA32_MC16_CTL2";
+        case 0x00000291: return "IA32_MC17_CTL2";
+        case 0x00000292: return "IA32_MC18_CTL2";
+        case 0x00000293: return "IA32_MC19_CTL2";
+        case 0x00000294: return "IA32_MC20_CTL2";
+        case 0x00000295: return "IA32_MC21_CTL2";
+        //case 0x00000296: return "IA32_MC22_CTL2";
+        //case 0x00000297: return "IA32_MC23_CTL2";
+        //case 0x00000298: return "IA32_MC24_CTL2";
+        //case 0x00000299: return "IA32_MC25_CTL2";
+        //case 0x0000029a: return "IA32_MC26_CTL2";
+        //case 0x0000029b: return "IA32_MC27_CTL2";
+        //case 0x0000029c: return "IA32_MC28_CTL2";
+        //case 0x0000029d: return "IA32_MC29_CTL2";
+        //case 0x0000029e: return "IA32_MC30_CTL2";
+        //case 0x0000029f: return "IA32_MC31_CTL2";
+        case 0x000002e0: return "I7_SB_NO_EVICT_MODE"; /* (Bits 1 & 0 are said to have something to do with no-evict cache mode used during early boot.) */
+        case 0x000002e6: return "I7_IB_UNK_0000_02e6"; /* IvyBridge */
+        case 0x000002e7: return "I7_IB_UNK_0000_02e7"; /* IvyBridge */
+        case 0x000002ff: return "IA32_MTRR_DEF_TYPE";
+        case 0x00000300: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? "P4_MSR_BPU_COUNTER0"   : "I7_SB_UNK_0000_0300" /* SandyBridge */;
+        case 0x00000305: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? "P4_MSR_MS_COUNTER1"    : "I7_SB_UNK_0000_0305" /* SandyBridge, IvyBridge */;
+        case 0x00000309: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? "P4_MSR_FLAME_COUNTER1" : "IA32_FIXED_CTR0";
+        case 0x0000030a: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? "P4_MSR_FLAME_COUNTER2" : "IA32_FIXED_CTR1";
+        case 0x0000030b: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? "P4_MSR_FLAME_COUNTER3" : "IA32_FIXED_CTR2";
+        case 0x00000345: return "IA32_PERF_CAPABILITIES";
+        case 0x0000038d: return "IA32_FIXED_CTR_CTRL";
+        case 0x0000038e: return "IA32_PERF_GLOBAL_STATUS";
+        case 0x0000038f: return "IA32_PERF_GLOBAL_CTRL";
+        case 0x00000390: return "IA32_PERF_GLOBAL_OVF_CTRL";
+        case 0x00000391: return "I7_UNC_PERF_GLOBAL_CTRL";             /* S,H,X */
+        case 0x00000392: return "I7_UNC_PERF_GLOBAL_STATUS";           /* S,H,X */
+        case 0x00000393: return "I7_UNC_PERF_GLOBAL_OVF_CTRL";         /* X. ASSUMING this is the same on sandybridge and later. */
+        case 0x00000394: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PERF_FIXED_CTR"  /* X */    : "I7_UNC_PERF_FIXED_CTR_CTRL"; /* >= S,H */
+        case 0x00000395: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PERF_FIXED_CTR_CTRL" /* X*/ : "I7_UNC_PERF_FIXED_CTR";      /* >= S,H */
+        case 0x00000396: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_ADDR_OPCODE_MATCH" /* X */  : "I7_UNC_CB0_CONFIG";          /* >= S,H */
+        case 0x0000039c: return "I7_SB_MSR_PEBS_NUM_ALT";
+        case 0x000003b0: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PMC0" /* X */               : "I7_UNC_ARB_PERF_CTR0";       /* >= S,H */
+        case 0x000003b1: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PMC1" /* X */               : "I7_UNC_ARB_PERF_CTR1";       /* >= S,H */
+        case 0x000003b2: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PMC2" /* X */               : "I7_UNC_ARB_PERF_EVT_SEL0";   /* >= S,H */
+        case 0x000003b3: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "I7_UNC_PMC3" /* X */               : "I7_UNC_ARB_PERF_EVT_SEL1";   /* >= S,H */
+        case 0x000003b4: return "I7_UNC_PMC4";
+        case 0x000003b5: return "I7_UNC_PMC5";
+        case 0x000003b6: return "I7_UNC_PMC6";
+        case 0x000003b7: return "I7_UNC_PMC7";
+        case 0x000003c0: return "I7_UNC_PERF_EVT_SEL0";
+        case 0x000003c1: return "I7_UNC_PERF_EVT_SEL1";
+        case 0x000003c2: return "I7_UNC_PERF_EVT_SEL2";
+        case 0x000003c3: return "I7_UNC_PERF_EVT_SEL3";
+        case 0x000003c4: return "I7_UNC_PERF_EVT_SEL4";
+        case 0x000003c5: return "I7_UNC_PERF_EVT_SEL5";
+        case 0x000003c6: return "I7_UNC_PERF_EVT_SEL6";
+        case 0x000003c7: return "I7_UNC_PERF_EVT_SEL7";
+        case 0x000003f1: return "IA32_PEBS_ENABLE";
+        case 0x000003f6: return "I7_MSR_PEBS_LD_LAT";
+        case 0x000003f8: return "I7_MSR_PKG_C3_RESIDENCY";
+        case 0x000003f9: return "I7_MSR_PKG_C6_RESIDENCY";
+        case 0x000003fa: return "I7_MSR_PKG_C7_RESIDENCY";
+        case 0x000003fc: return "I7_MSR_CORE_C3_RESIDENCY";
+        case 0x000003fd: return "I7_MSR_CORE_C6_RESIDENCY";
+        case 0x000003fe: return "I7_MSR_CORE_C7_RESIDENCY";
+        case 0x00000478: g_enmMicroarch == kCpumMicroarch_Intel_Core2_Penryn ? "CPUID1_FEATURE_MASK" : NULL;
+        case 0x00000480: return "IA32_VMX_BASIC";
+        case 0x00000481: return "IA32_VMX_PINBASED_CTLS";
+        case 0x00000482: return "IA32_VMX_PROCBASED_CTLS";
+        case 0x00000483: return "IA32_VMX_EXIT_CTLS";
+        case 0x00000484: return "IA32_VMX_ENTRY_CTLS";
+        case 0x00000485: return "IA32_VMX_MISC";
+        case 0x00000486: return "IA32_VMX_CR0_FIXED0";
+        case 0x00000487: return "IA32_VMX_CR0_FIXED1";
+        case 0x00000488: return "IA32_VMX_CR4_FIXED0";
+        case 0x00000489: return "IA32_VMX_CR4_FIXED1";
+        case 0x0000048a: return "IA32_VMX_VMCS_ENUM";
+        case 0x0000048b: return "IA32_VMX_PROCBASED_CTLS2";
+        case 0x0000048c: return "IA32_VMX_EPT_VPID_CAP";
+        case 0x0000048d: return "IA32_VMX_TRUE_PINBASED_CTLS";
+        case 0x0000048e: return "IA32_VMX_TRUE_PROCBASED_CTLS";
+        case 0x0000048f: return "IA32_VMX_TRUE_EXIT_CTLS";
+        case 0x00000490: return "IA32_VMX_TRUE_ENTRY_CTLS";
+        case 0x000004c1: return "IA32_A_PMC0";
+        case 0x000004c2: return "IA32_A_PMC1";
+        case 0x000004c3: return "IA32_A_PMC2";
+        case 0x000004c4: return "IA32_A_PMC3";
+        case 0x000004c5: return "IA32_A_PMC4";
+        case 0x000004c6: return "IA32_A_PMC5";
+        case 0x000004c7: return "IA32_A_PMC6";
+        case 0x000004c8: return "IA32_A_PMC7";
+        case 0x00000502: return "I7_SB_UNK_0000_0502";
+        case 0x00000600: return "IA32_DS_AREA";
+        case 0x00000601: return "I7_SB_MSR_VR_CURRENT_CONFIG"; /* SandyBridge, IvyBridge. */
+        case 0x00000603: return "I7_SB_MSR_VR_MISC_CONFIG"; /* SandyBridge, IvyBridge. */
+        case 0x00000606: return "I7_SB_MSR_RAPL_POWER_UNIT"; /* SandyBridge, IvyBridge. */
+        case 0x0000060a: return "I7_SB_MSR_PKGC3_IRTL"; /* SandyBridge, IvyBridge. */
+        case 0x0000060b: return "I7_SB_MSR_PKGC6_IRTL"; /* SandyBridge, IvyBridge. */
+        case 0x0000060c: return "I7_SB_MSR_PKGC7_IRTL"; /* SandyBridge, IvyBridge. */
+        case 0x0000060d: return "I7_SB_MSR_PKG_C2_RESIDENCY"; /* SandyBridge, IvyBridge. */
+        case 0x00000610: return "I7_SB_MSR_PKG_POWER_LIMIT";
+        case 0x00000611: return "I7_SB_MSR_PKG_ENERGY_STATUS";
+        case 0x00000613: return "I7_SB_MSR_PKG_PERF_STATUS";
+        case 0x00000614: return "I7_SB_MSR_PKG_POWER_INFO";
+        case 0x00000618: return "I7_SB_MSR_DRAM_POWER_LIMIT";
+        case 0x00000619: return "I7_SB_MSR_DRAM_ENERGY_STATUS";
+        case 0x0000061b: return "I7_SB_MSR_DRAM_PERF_STATUS";
+        case 0x0000061c: return "I7_SB_MSR_DRAM_POWER_INFO";
+        case 0x00000638: return "I7_SB_MSR_PP0_POWER_LIMIT";
+        case 0x00000639: return "I7_SB_MSR_PP0_ENERGY_STATUS";
+        case 0x0000063a: return "I7_SB_MSR_PP0_POLICY";
+        case 0x0000063b: return "I7_SB_MSR_PP0_PERF_STATUS";
+        case 0x00000640: return "I7_HW_MSR_PP0_POWER_LIMIT";
+        case 0x00000641: return "I7_HW_MSR_PP0_ENERGY_STATUS";
+        case 0x00000642: return "I7_HW_MSR_PP0_POLICY";
+        case 0x00000680: return "MSR_LASTBRANCH_0_FROM_IP";
+        case 0x00000681: return "MSR_LASTBRANCH_1_FROM_IP";
+        case 0x00000682: return "MSR_LASTBRANCH_2_FROM_IP";
+        case 0x00000683: return "MSR_LASTBRANCH_3_FROM_IP";
+        case 0x00000684: return "MSR_LASTBRANCH_4_FROM_IP";
+        case 0x00000685: return "MSR_LASTBRANCH_5_FROM_IP";
+        case 0x00000686: return "MSR_LASTBRANCH_6_FROM_IP";
+        case 0x00000687: return "MSR_LASTBRANCH_7_FROM_IP";
+        case 0x00000688: return "MSR_LASTBRANCH_8_FROM_IP";
+        case 0x00000689: return "MSR_LASTBRANCH_9_FROM_IP";
+        case 0x0000068a: return "MSR_LASTBRANCH_10_FROM_IP";
+        case 0x0000068b: return "MSR_LASTBRANCH_11_FROM_IP";
+        case 0x0000068c: return "MSR_LASTBRANCH_12_FROM_IP";
+        case 0x0000068d: return "MSR_LASTBRANCH_13_FROM_IP";
+        case 0x0000068e: return "MSR_LASTBRANCH_14_FROM_IP";
+        case 0x0000068f: return "MSR_LASTBRANCH_15_FROM_IP";
+        case 0x000006c0: return "MSR_LASTBRANCH_0_TO_IP";
+        case 0x000006c1: return "MSR_LASTBRANCH_1_TO_IP";
+        case 0x000006c2: return "MSR_LASTBRANCH_2_TO_IP";
+        case 0x000006c3: return "MSR_LASTBRANCH_3_TO_IP";
+        case 0x000006c4: return "MSR_LASTBRANCH_4_TO_IP";
+        case 0x000006c5: return "MSR_LASTBRANCH_5_TO_IP";
+        case 0x000006c6: return "MSR_LASTBRANCH_6_TO_IP";
+        case 0x000006c7: return "MSR_LASTBRANCH_7_TO_IP";
+        case 0x000006c8: return "MSR_LASTBRANCH_8_TO_IP";
+        case 0x000006c9: return "MSR_LASTBRANCH_9_TO_IP";
+        case 0x000006ca: return "MSR_LASTBRANCH_10_TO_IP";
+        case 0x000006cb: return "MSR_LASTBRANCH_11_TO_IP";
+        case 0x000006cc: return "MSR_LASTBRANCH_12_TO_IP";
+        case 0x000006cd: return "MSR_LASTBRANCH_13_TO_IP";
+        case 0x000006ce: return "MSR_LASTBRANCH_14_TO_IP";
+        case 0x000006cf: return "MSR_LASTBRANCH_15_TO_IP";
+        case 0x000006e0: return "IA32_TSC_DEADLINE";
+
+
+        /* 0x1000..0x1004 seems to have been used by IBM 386 and 486 clones too. */
+        case 0x00001000: return "P6_DEBUG_REGISTER_0";
+        case 0x00001001: return "P6_DEBUG_REGISTER_1";
+        case 0x00001002: return "P6_DEBUG_REGISTER_2";
+        case 0x00001003: return "P6_DEBUG_REGISTER_3";
+        case 0x00001004: return "P6_DEBUG_REGISTER_4";
+        case 0x00001005: return "P6_DEBUG_REGISTER_5";
+        case 0x00001006: return "P6_DEBUG_REGISTER_6";
+        case 0x00001007: return "P6_DEBUG_REGISTER_7";
+        case 0x0000103f: return "P6_UNK_0000_103f"; /* P6_M_Dothan. */
+        case 0x000010cd: return "P6_UNK_0000_10cd"; /* P6_M_Dothan. */
+        case 0x00002000: return "P6_CR0";
+        case 0x00002002: return "P6_CR2";
+        case 0x00002003: return "P6_CR3";
+        case 0x00002004: return "P6_CR4";
+        case 0x0000203f: return "P6_UNK_0000_203f"; /* P6_M_Dothan. */
+        case 0x000020cd: return "P6_UNK_0000_20cd"; /* P6_M_Dothan. */
+        case 0x0000303f: return "P6_UNK_0000_303f"; /* P6_M_Dothan. */
+        case 0x000030cd: return "P6_UNK_0000_30cd"; /* P6_M_Dothan. */
+
+        case 0xc0000080: return "AMD64_EFER";
+        case 0xc0000081: return "AMD64_STAR";
+        case 0xc0000082: return "AMD64_STAR64";
+        case 0xc0000083: return "AMD64_STARCOMPAT";
+        case 0xc0000084: return "AMD64_SYSCALL_FLAG_MASK";
+        case 0xc0000100: return "AMD64_FS_BASE";
+        case 0xc0000101: return "AMD64_GS_BASE";
+        case 0xc0000102: return "AMD64_KERNEL_GS_BASE";
+        case 0xc0000103: return "AMD64_TSC_AUX";
+        case 0xc0000104: return "AMD_15H_TSC_RATE";
+        case 0xc0000105: return "AMD_15H_LWP_CFG";      /* Only Family 15h? */
+        case 0xc0000106: return "AMD_15H_LWP_CBADDR";   /* Only Family 15h? */
+        case 0xc0000408: return "AMD_10H_MC4_MISC1";
+        case 0xc0000409: return "AMD_10H_MC4_MISC2";
+        case 0xc000040a: return "AMD_10H_MC4_MISC3";
+        case 0xc000040b: return "AMD_10H_MC4_MISC4";
+        case 0xc000040c: return "AMD_10H_MC4_MISC5";
+        case 0xc000040d: return "AMD_10H_MC4_MISC6";
+        case 0xc000040e: return "AMD_10H_MC4_MISC7";
+        case 0xc000040f: return "AMD_10H_MC4_MISC8";
+        case 0xc0010000: return "AMD_K8_PERF_CTL_0";
+        case 0xc0010001: return "AMD_K8_PERF_CTL_1";
+        case 0xc0010002: return "AMD_K8_PERF_CTL_2";
+        case 0xc0010003: return "AMD_K8_PERF_CTL_3";
+        case 0xc0010004: return "AMD_K8_PERF_CTR_0";
+        case 0xc0010005: return "AMD_K8_PERF_CTR_1";
+        case 0xc0010006: return "AMD_K8_PERF_CTR_2";
+        case 0xc0010007: return "AMD_K8_PERF_CTR_3";
+        case 0xc0010010: return "AMD_K8_SYS_CFG";
+        case 0xc0010015: return "AMD_K8_HW_CFG";
+        case 0xc0010016: return "AMD_K8_IORR_BASE_0";
+        case 0xc0010017: return "AMD_K8_IORR_MASK_0";
+        case 0xc0010018: return "AMD_K8_IORR_BASE_1";
+        case 0xc0010019: return "AMD_K8_IORR_MASK_1";
+        case 0xc001001a: return "AMD_K8_TOP_MEM";
+        case 0xc001001d: return "AMD_K8_TOP_MEM2";
+        case 0xc001001e: return "AMD_K8_MANID";
+        case 0xc001001f: return "AMD_K8_NB_CFG1";
+        case 0xc0010022: return "AMD_K8_MC_XCPT_REDIR";
+        case 0xc0010028: return "AMD_K8_UNK_c001_0028";
+        case 0xc0010029: return "AMD_K8_UNK_c001_0029";
+        case 0xc001002a: return "AMD_K8_UNK_c001_002a";
+        case 0xc001002b: return "AMD_K8_UNK_c001_002b";
+        case 0xc001002c: return "AMD_K8_UNK_c001_002c";
+        case 0xc001002d: return "AMD_K8_UNK_c001_002d";
+        case 0xc0010030: return "AMD_K8_CPU_NAME_0";
+        case 0xc0010031: return "AMD_K8_CPU_NAME_1";
+        case 0xc0010032: return "AMD_K8_CPU_NAME_2";
+        case 0xc0010033: return "AMD_K8_CPU_NAME_3";
+        case 0xc0010034: return "AMD_K8_CPU_NAME_4";
+        case 0xc0010035: return "AMD_K8_CPU_NAME_5";
+        case 0xc001003e: return "AMD_K8_HTC";
+        case 0xc001003f: return "AMD_K8_STC";
+        case 0xc0010043: return "AMD_K8_THERMTRIP_STATUS"; /* BDKG says it was removed in K8 revision C.*/
+        case 0xc0010044: return "AMD_K8_MC_CTL_MASK_0";
+        case 0xc0010045: return "AMD_K8_MC_CTL_MASK_1";
+        case 0xc0010046: return "AMD_K8_MC_CTL_MASK_2";
+        case 0xc0010047: return "AMD_K8_MC_CTL_MASK_3";
+        case 0xc0010048: return "AMD_K8_MC_CTL_MASK_4";
+        case 0xc0010049: return "AMD_K8_MC_CTL_MASK_5";
+        case 0xc001004a: return "AMD_K8_MC_CTL_MASK_6";
+        //case 0xc001004b: return "AMD_K8_MC_CTL_MASK_7";
+        case 0xc0010050: return "AMD_K8_SMI_ON_IO_TRAP_0";
+        case 0xc0010051: return "AMD_K8_SMI_ON_IO_TRAP_1";
+        case 0xc0010052: return "AMD_K8_SMI_ON_IO_TRAP_2";
+        case 0xc0010053: return "AMD_K8_SMI_ON_IO_TRAP_3";
+        case 0xc0010054: return "AMD_K8_SMI_ON_IO_TRAP_CTL_STS";
+        case 0xc0010055: return "AMD_K8_INT_PENDING_MSG";
+        case 0xc0010056: return "AMD_K8_SMI_TRIGGER_IO_CYCLE";
+        case 0xc0010057: return "AMD_10H_UNK_c001_0057";
+        case 0xc0010058: return "AMD_10H_MMIO_CFG_BASE_ADDR";
+        case 0xc0010059: return "AMD_10H_TRAP_CTL?"; /* Undocumented, only one google hit. */
+        case 0xc001005a: return "AMD_10H_UNK_c001_005a";
+        case 0xc001005b: return "AMD_10H_UNK_c001_005b";
+        case 0xc001005c: return "AMD_10H_UNK_c001_005c";
+        case 0xc001005d: return "AMD_10H_UNK_c001_005d";
+        case 0xc0010060: return "AMD_K8_BIST_RESULT";   /* BDKG says it as introduced with revision F. */
+        case 0xc0010061: return "AMD_10H_P_ST_CUR_LIM";
+        case 0xc0010062: return "AMD_10H_P_ST_CTL";
+        case 0xc0010063: return "AMD_10H_P_ST_STS";
+        case 0xc0010064: return "AMD_10H_P_ST_0";
+        case 0xc0010065: return "AMD_10H_P_ST_1";
+        case 0xc0010066: return "AMD_10H_P_ST_2";
+        case 0xc0010067: return "AMD_10H_P_ST_3";
+        case 0xc0010068: return "AMD_10H_P_ST_4";
+        case 0xc0010069: return "AMD_10H_P_ST_5";
+        case 0xc001006a: return "AMD_10H_P_ST_6";
+        case 0xc001006b: return "AMD_10H_P_ST_7";
+        case 0xc0010070: return "AMD_10H_COFVID_CTL";
+        case 0xc0010071: return "AMD_10H_COFVID_STS";
+        case 0xc0010073: return "AMD_10H_C_ST_IO_BASE_ADDR";
+        case 0xc0010074: return "AMD_10H_CPU_WD_TMR_CFG";
+        // case 0xc0010075: return "AMD_15H_APML_TDP_LIM";
+        // case 0xc0010077: return "AMD_15H_CPU_PWR_IN_TDP";
+        // case 0xc0010078: return "AMD_15H_PWR_AVG_PERIOD";
+        // case 0xc0010079: return "AMD_15H_DRAM_CTR_CMD_THR";
+        // case 0xc0010080: return "AMD_16H_FSFM_ACT_CNT_0";
+        // case 0xc0010081: return "AMD_16H_FSFM_REF_CNT_0";
+        case 0xc0010111: return "AMD_K8_SMM_BASE";
+        case 0xc0010112: return "AMD_K8_SMM_ADDR";
+        case 0xc0010113: return "AMD_K8_SMM_MASK";
+        case 0xc0010114: return "AMD_K8_VM_CR";
+        case 0xc0010115: return "AMD_K8_IGNNE";
+        case 0xc0010116: return "AMD_K8_SMM_CTL";
+        case 0xc0010117: return "AMD_K8_VM_HSAVE_PA";
+        case 0xc0010118: return "AMD_10H_VM_LOCK_KEY";
+        case 0xc0010119: return "AMD_10H_SSM_LOCK_KEY";
+        case 0xc001011a: return "AMD_10H_LOCAL_SMI_STS";
+        case 0xc0010140: return "AMD_10H_OSVW_ID_LEN";
+        case 0xc0010141: return "AMD_10H_OSVW_STS";
+        case 0xc0010200: return "AMD_K8_PERF_CTL_0";
+        case 0xc0010202: return "AMD_K8_PERF_CTL_1";
+        case 0xc0010204: return "AMD_K8_PERF_CTL_2";
+        case 0xc0010206: return "AMD_K8_PERF_CTL_3";
+        case 0xc0010208: return "AMD_K8_PERF_CTL_4";
+        case 0xc001020a: return "AMD_K8_PERF_CTL_5";
+        //case 0xc001020c: return "AMD_K8_PERF_CTL_6";
+        //case 0xc001020e: return "AMD_K8_PERF_CTL_7";
+        case 0xc0010201: return "AMD_K8_PERF_CTR_0";
+        case 0xc0010203: return "AMD_K8_PERF_CTR_1";
+        case 0xc0010205: return "AMD_K8_PERF_CTR_2";
+        case 0xc0010207: return "AMD_K8_PERF_CTR_3";
+        case 0xc0010209: return "AMD_K8_PERF_CTR_4";
+        case 0xc001020b: return "AMD_K8_PERF_CTR_5";
+        //case 0xc001020d: return "AMD_K8_PERF_CTR_6";
+        //case 0xc001020f: return "AMD_K8_PERF_CTR_7";
+        case 0xc0010230: return "AMD_16H_L2I_PERF_CTL_0";
+        case 0xc0010232: return "AMD_16H_L2I_PERF_CTL_1";
+        case 0xc0010234: return "AMD_16H_L2I_PERF_CTL_2";
+        case 0xc0010236: return "AMD_16H_L2I_PERF_CTL_3";
+        //case 0xc0010238: return "AMD_16H_L2I_PERF_CTL_4";
+        //case 0xc001023a: return "AMD_16H_L2I_PERF_CTL_5";
+        //case 0xc001030c: return "AMD_16H_L2I_PERF_CTL_6";
+        //case 0xc001023e: return "AMD_16H_L2I_PERF_CTL_7";
+        case 0xc0010231: return "AMD_16H_L2I_PERF_CTR_0";
+        case 0xc0010233: return "AMD_16H_L2I_PERF_CTR_1";
+        case 0xc0010235: return "AMD_16H_L2I_PERF_CTR_2";
+        case 0xc0010237: return "AMD_16H_L2I_PERF_CTR_3";
+        //case 0xc0010239: return "AMD_16H_L2I_PERF_CTR_4";
+        //case 0xc001023b: return "AMD_16H_L2I_PERF_CTR_5";
+        //case 0xc001023d: return "AMD_16H_L2I_PERF_CTR_6";
+        //case 0xc001023f: return "AMD_16H_L2I_PERF_CTR_7";
+        case 0xc0010240: return "AMD_15H_NB_PERF_CTL_0";
+        case 0xc0010242: return "AMD_15H_NB_PERF_CTL_1";
+        case 0xc0010244: return "AMD_15H_NB_PERF_CTL_2";
+        case 0xc0010246: return "AMD_15H_NB_PERF_CTL_3";
+        //case 0xc0010248: return "AMD_15H_NB_PERF_CTL_4";
+        //case 0xc001024a: return "AMD_15H_NB_PERF_CTL_5";
+        //case 0xc001024c: return "AMD_15H_NB_PERF_CTL_6";
+        //case 0xc001024e: return "AMD_15H_NB_PERF_CTL_7";
+        case 0xc0010241: return "AMD_15H_NB_PERF_CTR_0";
+        case 0xc0010243: return "AMD_15H_NB_PERF_CTR_1";
+        case 0xc0010245: return "AMD_15H_NB_PERF_CTR_2";
+        case 0xc0010247: return "AMD_15H_NB_PERF_CTR_3";
+        //case 0xc0010249: return "AMD_15H_NB_PERF_CTR_4";
+        //case 0xc001024b: return "AMD_15H_NB_PERF_CTR_5";
+        //case 0xc001024d: return "AMD_15H_NB_PERF_CTR_6";
+        //case 0xc001024f: return "AMD_15H_NB_PERF_CTR_7";
+        case 0xc0011000: return "AMD_K7_MCODE_CTL";
+        case 0xc0011001: return "AMD_K7_APIC_CLUSTER_ID"; /* Mentioned in BKDG (r3.00) for fam16h when describing EBL_CR_POWERON. */
+        case 0xc0011002: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_CPUID_CTL_STD07" : NULL;
+        case 0xc0011003: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_CPUID_CTL_STD06" : NULL;
+        case 0xc0011004: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_CPUID_CTL_STD01" : NULL;
+        case 0xc0011005: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_CPUID_CTL_EXT01" : NULL;
+        case 0xc0011006: return "AMD_K7_DEBUG_STS?";
+        case 0xc0011007: return "AMD_K7_BH_TRACE_BASE?";
+        case 0xc0011008: return "AMD_K7_BH_TRACE_PTR?";
+        case 0xc0011009: return "AMD_K7_BH_TRACE_LIM?";
+        case 0xc001100a: return "AMD_K7_HDT_CFG?";
+        case 0xc001100b: return "AMD_K7_FAST_FLUSH_COUNT?";
+        case 0xc001100c: return "AMD_K7_NODE_ID";
+        case 0xc001100d: return "AMD_K8_LOGICAL_CPUS_NUM?";
+        case 0xc001100e: return "AMD_K8_WRMSR_BP?";
+        case 0xc001100f: return "AMD_K8_WRMSR_BP_MASK?";
+        case 0xc0011010: return "AMD_K8_BH_TRACE_CTL?";
+        case 0xc0011011: return "AMD_K8_BH_TRACE_USRD?";
+        case 0xc0011012: return "AMD_K7_UNK_c001_1012";
+        case 0xc0011013: return "AMD_K7_UNK_c001_1013";
+        case 0xc0011014: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_XCPT_BP_RIP?" : "AMD_K7_MOBIL_DEBUG?";
+        case 0xc0011015: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_XCPT_BP_RIP_MASK?" : NULL;
+        case 0xc0011016: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_COND_HDT_VAL?" : NULL;
+        case 0xc0011017: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_COND_HDT_VAL_MASK?" : NULL;
+        case 0xc0011018: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_XCPT_BP_CTL?" : NULL;
+        case 0xc0011019: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AMD_16H_DR1_ADDR_MASK" : NULL;
+        case 0xc001101a: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AMD_16H_DR2_ADDR_MASK" : NULL;
+        case 0xc001101b: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AMD_16H_DR3_ADDR_MASK" : NULL;
+        case 0xc001101d: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_NB_BIST?" : NULL;
+        case 0xc001101e: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_THERMTRIP_2?" : NULL;
+        case 0xc001101f: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AMD_K8_NB_CFG?" : NULL;
+        case 0xc0011020: return "AMD_K7_LS_CFG";
+        case 0xc0011021: return "AMD_K7_IC_CFG";
+        case 0xc0011022: return "AMD_K7_DC_CFG";
+        case 0xc0011023: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch) ? "AMD_15H_CU_CFG" : "AMD_K7_BU_CFG";
+        case 0xc0011024: return "AMD_K7_DEBUG_CTL_2?";
+        case 0xc0011025: return "AMD_K7_DR0_DATA_MATCH?";
+        case 0xc0011026: return "AMD_K7_DR0_DATA_MATCH?";
+        case 0xc0011027: return "AMD_K7_DR0_ADDR_MASK";
+        case 0xc0011028: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_First ? "AMD_15H_FP_CFG"
+                              : CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch)   ? "AMD_10H_UNK_c001_1028"
+                              : NULL;
+        case 0xc0011029: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_First ? "AMD_15H_DC_CFG"
+                              : CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch)   ? "AMD_10H_UNK_c001_1029"
+                              : NULL;
+        case 0xc001102a: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AMD_15H_CU_CFG2"
+                              : CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch) || g_enmMicroarch > kCpumMicroarch_AMD_15h_End
+                              ? "AMD_10H_BU_CFG2" /* 10h & 16h */ : NULL;
+        case 0xc001102b: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AMD_15H_CU_CFG3" : NULL;
+        case 0xc001102c: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AMD_15H_EX_CFG" : NULL;
+        case 0xc001102d: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AMD_15H_LS_CFG2" : NULL;
+        case 0xc0011030: return "AMD_10H_IBS_FETCH_CTL";
+        case 0xc0011031: return "AMD_10H_IBS_FETCH_LIN_ADDR";
+        case 0xc0011032: return "AMD_10H_IBS_FETCH_PHYS_ADDR";
+        case 0xc0011033: return "AMD_10H_IBS_OP_EXEC_CTL";
+        case 0xc0011034: return "AMD_10H_IBS_OP_RIP";
+        case 0xc0011035: return "AMD_10H_IBS_OP_DATA";
+        case 0xc0011036: return "AMD_10H_IBS_OP_DATA2";
+        case 0xc0011037: return "AMD_10H_IBS_OP_DATA3";
+        case 0xc0011038: return "AMD_10H_IBS_DC_LIN_ADDR";
+        case 0xc0011039: return "AMD_10H_IBS_DC_PHYS_ADDR";
+        case 0xc001103a: return "AMD_10H_IBS_CTL";
+        case 0xc001103b: return "AMD_14H_IBS_BR_TARGET";
+
+        case 0xc0011040: return "AMD_15H_UNK_c001_1040";
+        case 0xc0011041: return "AMD_15H_UNK_c001_1041";
+        case 0xc0011042: return "AMD_15H_UNK_c001_1042";
+        case 0xc0011043: return "AMD_15H_UNK_c001_1043";
+        case 0xc0011044: return "AMD_15H_UNK_c001_1044";
+        case 0xc0011045: return "AMD_15H_UNK_c001_1045";
+        case 0xc0011046: return "AMD_15H_UNK_c001_1046";
+        case 0xc0011047: return "AMD_15H_UNK_c001_1047";
+        case 0xc0011048: return "AMD_15H_UNK_c001_1048";
+        case 0xc0011049: return "AMD_15H_UNK_c001_1049";
+        case 0xc001104a: return "AMD_15H_UNK_c001_104a";
+        case 0xc001104b: return "AMD_15H_UNK_c001_104b";
+        case 0xc001104c: return "AMD_15H_UNK_c001_104c";
+        case 0xc001104d: return "AMD_15H_UNK_c001_104d";
+        case 0xc001104e: return "AMD_15H_UNK_c001_104e";
+        case 0xc001104f: return "AMD_15H_UNK_c001_104f";
+        case 0xc0011050: return "AMD_15H_UNK_c001_1050";
+        case 0xc0011051: return "AMD_15H_UNK_c001_1051";
+        case 0xc0011052: return "AMD_15H_UNK_c001_1052";
+        case 0xc0011053: return "AMD_15H_UNK_c001_1053";
+        case 0xc0011054: return "AMD_15H_UNK_c001_1054";
+        case 0xc0011055: return "AMD_15H_UNK_c001_1055";
+        case 0xc0011056: return "AMD_15H_UNK_c001_1056";
+        case 0xc0011057: return "AMD_15H_UNK_c001_1057";
+        case 0xc0011058: return "AMD_15H_UNK_c001_1058";
+        case 0xc0011059: return "AMD_15H_UNK_c001_1059";
+        case 0xc001105a: return "AMD_15H_UNK_c001_105a";
+        case 0xc001105b: return "AMD_15H_UNK_c001_105b";
+        case 0xc001105c: return "AMD_15H_UNK_c001_105c";
+        case 0xc001105d: return "AMD_15H_UNK_c001_105d";
+        case 0xc001105e: return "AMD_15H_UNK_c001_105e";
+        case 0xc001105f: return "AMD_15H_UNK_c001_105f";
+        case 0xc0011060: return "AMD_15H_UNK_c001_1060";
+        case 0xc0011061: return "AMD_15H_UNK_c001_1061";
+        case 0xc0011062: return "AMD_15H_UNK_c001_1062";
+        case 0xc0011063: return "AMD_15H_UNK_c001_1063";
+        case 0xc0011064: return "AMD_15H_UNK_c001_1064";
+        case 0xc0011065: return "AMD_15H_UNK_c001_1065";
+        case 0xc0011066: return "AMD_15H_UNK_c001_1066";
+        case 0xc0011067: return "AMD_15H_UNK_c001_1067";
+        case 0xc0011068: return "AMD_15H_UNK_c001_1068";
+        case 0xc0011069: return "AMD_15H_UNK_c001_1069";
+        case 0xc001106a: return "AMD_15H_UNK_c001_106a";
+        case 0xc001106b: return "AMD_15H_UNK_c001_106b";
+        case 0xc001106c: return "AMD_15H_UNK_c001_106c";
+        case 0xc001106d: return "AMD_15H_UNK_c001_106d";
+        case 0xc001106e: return "AMD_15H_UNK_c001_106e";
+        case 0xc001106f: return "AMD_15H_UNK_c001_106f";
+        case 0xc0011070: return "AMD_15H_UNK_c001_1070"; /* coreboot defines this, but with a numerical name. */
+        case 0xc0011071: return "AMD_15H_UNK_c001_1071";
+        case 0xc0011072: return "AMD_15H_UNK_c001_1072";
+        case 0xc0011073: return "AMD_15H_UNK_c001_1073";
+        case 0xc0011080: return "AMD_15H_UNK_c001_1080";
+    }
+
+    /*
+     * Bunch of unknown sandy bridge registers.  They might seem like the
+     * nehalem based xeon stuff, but the layout doesn't match.  I bet it's the
+     * same kind of registes though (i.e. uncore (UNC)).
+     *
+     * Kudos to Intel for keeping these a secret!  Many thanks guys!!
+     */
+    if (g_enmMicroarch == kCpumMicroarch_Intel_Core7_SandyBridge)
+        switch (uMsr)
+        {
+            case 0x00000a00: return "I7_SB_UNK_0000_0a00"; case 0x00000a01: return "I7_SB_UNK_0000_0a01";
+            case 0x00000a02: return "I7_SB_UNK_0000_0a02";
+            case 0x00000c00: return "I7_SB_UNK_0000_0c00"; case 0x00000c01: return "I7_SB_UNK_0000_0c01";
+            case 0x00000c06: return "I7_SB_UNK_0000_0c06"; case 0x00000c08: return "I7_SB_UNK_0000_0c08";
+            case 0x00000c09: return "I7_SB_UNK_0000_0c09"; case 0x00000c10: return "I7_SB_UNK_0000_0c10";
+            case 0x00000c11: return "I7_SB_UNK_0000_0c11"; case 0x00000c14: return "I7_SB_UNK_0000_0c14";
+            case 0x00000c15: return "I7_SB_UNK_0000_0c15"; case 0x00000c16: return "I7_SB_UNK_0000_0c16";
+            case 0x00000c17: return "I7_SB_UNK_0000_0c17"; case 0x00000c24: return "I7_SB_UNK_0000_0c24";
+            case 0x00000c30: return "I7_SB_UNK_0000_0c30"; case 0x00000c31: return "I7_SB_UNK_0000_0c31";
+            case 0x00000c32: return "I7_SB_UNK_0000_0c32"; case 0x00000c33: return "I7_SB_UNK_0000_0c33";
+            case 0x00000c34: return "I7_SB_UNK_0000_0c34"; case 0x00000c35: return "I7_SB_UNK_0000_0c35";
+            case 0x00000c36: return "I7_SB_UNK_0000_0c36"; case 0x00000c37: return "I7_SB_UNK_0000_0c37";
+            case 0x00000c38: return "I7_SB_UNK_0000_0c38"; case 0x00000c39: return "I7_SB_UNK_0000_0c39";
+            case 0x00000d04: return "I7_SB_UNK_0000_0d04";
+            case 0x00000d10: return "I7_SB_UNK_0000_0d10"; case 0x00000d11: return "I7_SB_UNK_0000_0d11";
+            case 0x00000d12: return "I7_SB_UNK_0000_0d12"; case 0x00000d13: return "I7_SB_UNK_0000_0d13";
+            case 0x00000d14: return "I7_SB_UNK_0000_0d14"; case 0x00000d15: return "I7_SB_UNK_0000_0d15";
+            case 0x00000d16: return "I7_SB_UNK_0000_0d16"; case 0x00000d17: return "I7_SB_UNK_0000_0d17";
+            case 0x00000d18: return "I7_SB_UNK_0000_0d18"; case 0x00000d19: return "I7_SB_UNK_0000_0d19";
+            case 0x00000d24: return "I7_SB_UNK_0000_0d24";
+            case 0x00000d30: return "I7_SB_UNK_0000_0d30"; case 0x00000d31: return "I7_SB_UNK_0000_0d31";
+            case 0x00000d32: return "I7_SB_UNK_0000_0d32"; case 0x00000d33: return "I7_SB_UNK_0000_0d33";
+            case 0x00000d34: return "I7_SB_UNK_0000_0d34"; case 0x00000d35: return "I7_SB_UNK_0000_0d35";
+            case 0x00000d36: return "I7_SB_UNK_0000_0d36"; case 0x00000d37: return "I7_SB_UNK_0000_0d37";
+            case 0x00000d38: return "I7_SB_UNK_0000_0d38"; case 0x00000d39: return "I7_SB_UNK_0000_0d39";
+            case 0x00000d44: return "I7_SB_UNK_0000_0d44";
+            case 0x00000d50: return "I7_SB_UNK_0000_0d50"; case 0x00000d51: return "I7_SB_UNK_0000_0d51";
+            case 0x00000d52: return "I7_SB_UNK_0000_0d52"; case 0x00000d53: return "I7_SB_UNK_0000_0d53";
+            case 0x00000d54: return "I7_SB_UNK_0000_0d54"; case 0x00000d55: return "I7_SB_UNK_0000_0d55";
+            case 0x00000d56: return "I7_SB_UNK_0000_0d56"; case 0x00000d57: return "I7_SB_UNK_0000_0d57";
+            case 0x00000d58: return "I7_SB_UNK_0000_0d58"; case 0x00000d59: return "I7_SB_UNK_0000_0d59";
+            case 0x00000d64: return "I7_SB_UNK_0000_0d64";
+            case 0x00000d70: return "I7_SB_UNK_0000_0d70"; case 0x00000d71: return "I7_SB_UNK_0000_0d71";
+            case 0x00000d72: return "I7_SB_UNK_0000_0d72"; case 0x00000d73: return "I7_SB_UNK_0000_0d73";
+            case 0x00000d74: return "I7_SB_UNK_0000_0d74"; case 0x00000d75: return "I7_SB_UNK_0000_0d75";
+            case 0x00000d76: return "I7_SB_UNK_0000_0d76"; case 0x00000d77: return "I7_SB_UNK_0000_0d77";
+            case 0x00000d78: return "I7_SB_UNK_0000_0d78"; case 0x00000d79: return "I7_SB_UNK_0000_0d79";
+            case 0x00000d84: return "I7_SB_UNK_0000_0d84";
+            case 0x00000d90: return "I7_SB_UNK_0000_0d90"; case 0x00000d91: return "I7_SB_UNK_0000_0d91";
+            case 0x00000d92: return "I7_SB_UNK_0000_0d92"; case 0x00000d93: return "I7_SB_UNK_0000_0d93";
+            case 0x00000d94: return "I7_SB_UNK_0000_0d94"; case 0x00000d95: return "I7_SB_UNK_0000_0d95";
+            case 0x00000d96: return "I7_SB_UNK_0000_0d96"; case 0x00000d97: return "I7_SB_UNK_0000_0d97";
+            case 0x00000d98: return "I7_SB_UNK_0000_0d98"; case 0x00000d99: return "I7_SB_UNK_0000_0d99";
+            case 0x00000da4: return "I7_SB_UNK_0000_0da4";
+            case 0x00000db0: return "I7_SB_UNK_0000_0db0"; case 0x00000db1: return "I7_SB_UNK_0000_0db1";
+            case 0x00000db2: return "I7_SB_UNK_0000_0db2"; case 0x00000db3: return "I7_SB_UNK_0000_0db3";
+            case 0x00000db4: return "I7_SB_UNK_0000_0db4"; case 0x00000db5: return "I7_SB_UNK_0000_0db5";
+            case 0x00000db6: return "I7_SB_UNK_0000_0db6"; case 0x00000db7: return "I7_SB_UNK_0000_0db7";
+            case 0x00000db8: return "I7_SB_UNK_0000_0db8"; case 0x00000db9: return "I7_SB_UNK_0000_0db9";
+        }
+    return NULL;
+}
+
+
+/**
+ * Gets the name of an MSR.
+ *
+ * This may return a static buffer, so the content should only be considered
+ * valid until the next time this function is called!.
+ *
+ * @returns MSR name.
+ * @param   uMsr                The MSR in question.
+ */
+static const char *getMsrName(uint32_t uMsr)
+{
+    const char *pszReadOnly = getMsrNameHandled(uMsr);
+    if (pszReadOnly)
+        return pszReadOnly;
+
+    /*
+     * This MSR needs looking into, return a TODO_XXXX_XXXX name.
+     */
+    static char s_szBuf[32];
+    RTStrPrintf(s_szBuf, sizeof(s_szBuf), "TODO_%04x_%04x", RT_HI_U16(uMsr), RT_LO_U16(uMsr));
+    return s_szBuf;
+}
+
+
+
+/**
+ * Gets the name of an MSR range.
+ *
+ * This may return a static buffer, so the content should only be considered
+ * valid until the next time this function is called!.
+ *
+ * @returns MSR name.
+ * @param   uMsr                The first MSR in the range.
+ */
+static const char *getMsrRangeName(uint32_t uMsr)
+{
+    switch (uMsr)
+    {
+        case 0x000003f8:
+        case 0x000003f9:
+        case 0x000003fa:
+            return "I7_MSR_PKG_Cn_RESIDENCY";
+        case 0x000003fc:
+        case 0x000003fd:
+        case 0x000003fe:
+            return "I7_MSR_CORE_Cn_RESIDENCY";
+
+        case 0x00000400:
+            return "IA32_MCi_CTL_STATUS_ADDR_MISC";
+
+        case 0x00000680:
+            return "MSR_LASTBRANCH_n_FROM_IP";
+        case 0x000006c0:
+            return "MSR_LASTBRANCH_n_TO_IP";
+
+        case 0x00000800: case 0x00000801: case 0x00000802: case 0x00000803:
+        case 0x00000804: case 0x00000805: case 0x00000806: case 0x00000807:
+        case 0x00000808: case 0x00000809: case 0x0000080a: case 0x0000080b:
+        case 0x0000080c: case 0x0000080d: case 0x0000080e: case 0x0000080f:
+            return "IA32_X2APIC_n";
+    }
+
+    static char s_szBuf[96];
+    const char *pszReadOnly = getMsrNameHandled(uMsr);
+    if (pszReadOnly)
+    {
+        /*
+         * Replace the last char with 'n'.
+         */
+        RTStrCopy(s_szBuf, sizeof(s_szBuf), pszReadOnly);
+        size_t off = strlen(s_szBuf);
+        if (off > 0)
+            off--;
+        if (off + 1 < sizeof(s_szBuf))
+        {
+            s_szBuf[off] = 'n';
+            s_szBuf[off + 1] = '\0';
+        }
+    }
+    else
+    {
+        /*
+         * This MSR needs looking into, return a TODO_XXXX_XXXX_n name.
+         */
+        RTStrPrintf(s_szBuf, sizeof(s_szBuf), "TODO_%04x_%04x_n", RT_HI_U16(uMsr), RT_LO_U16(uMsr));
+    }
+    return s_szBuf;
+}
+
+
+/**
+ * Returns the function name for MSRs that have one or two.
+ *
+ * @returns Function name if applicable, NULL if not.
+ * @param   uMsr            The MSR in question.
+ * @param   pfTakesValue    Whether this MSR function takes a value or not.
+ *                          Optional.
+ */
+static const char *getMsrFnName(uint32_t uMsr, bool *pfTakesValue)
+{
+    bool fTmp;
+    if (!pfTakesValue)
+        pfTakesValue = &fTmp;
+
+    *pfTakesValue = false;
+
+    switch (uMsr)
+    {
+        case 0x00000000: return "Ia32P5McAddr";
+        case 0x00000001: return "Ia32P5McType";
+        case 0x00000006:
+            if (g_enmMicroarch >= kCpumMicroarch_Intel_First && g_enmMicroarch <= kCpumMicroarch_Intel_P6_Core_Atom_First)
+                return NULL; /* TR4 / cache tag on Pentium, but that's for later. */
+            return "Ia32MonitorFilterLineSize";
+        case 0x00000010: return "Ia32TimestampCounter";
+        case 0x0000001b: return "Ia32ApicBase";
+        case 0x0000002a: *pfTakesValue = true; return "IntelEblCrPowerOn";
+        //case 0x00000033: return "IntelTestCtl";
+        case 0x0000003a: return "Ia32FeatureControl";
+
+        case 0x00000040:
+        case 0x00000041:
+        case 0x00000042:
+        case 0x00000043:
+        case 0x00000044:
+        case 0x00000045:
+        case 0x00000046:
+        case 0x00000047:
+            return "IntelLastBranchFromToN";
+
+        case 0x0000009b: return "Ia32SmmMonitorCtl";
+
+        case 0x000000c1:
+        case 0x000000c2:
+        case 0x000000c3:
+        case 0x000000c4:
+            return "Ia32PmcN";
+        case 0x000000c5:
+        case 0x000000c6:
+        case 0x000000c7:
+        case 0x000000c8:
+            if (g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First)
+                return "Ia32PmcN";
+            return NULL;
+
+        case 0x000000e2: return "IntelPkgCStConfigControl";
+        case 0x000000e4: return "IntelPmgIoCaptureBase";
+        case 0x000000e7: return "Ia32MPerf";
+        case 0x000000e8: return "Ia32APerf";
+        case 0x000000fe: *pfTakesValue = true; return "Ia32MtrrCap";
+        case 0x00000119: *pfTakesValue = true; return "IntelBblCrCtl";
+        case 0x0000011e: *pfTakesValue = true; return "IntelBblCrCtl3";
+
+        case 0x00000130: return g_enmMicroarch == kCpumMicroarch_Intel_Core7_Westmere
+                             || g_enmMicroarch == kCpumMicroarch_Intel_Core7_Nehalem
+                              ? "IntelCpuId1FeatureMaskEcdx" : NULL;
+        case 0x00000131: return g_enmMicroarch == kCpumMicroarch_Intel_Core7_Westmere
+                             || g_enmMicroarch == kCpumMicroarch_Intel_Core7_Nehalem
+                              ? "IntelCpuId80000001FeatureMaskEcdx" : NULL;
+        case 0x00000132: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "IntelCpuId1FeatureMaskEax" : NULL;
+        case 0x00000133: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "IntelCpuId1FeatureMaskEcdx" : NULL;
+        case 0x00000134: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
+                              ? "IntelCpuId80000001FeatureMaskEcdx" : NULL;
+        case 0x0000013c: return "IntelI7SandyAesNiCtl";
+        case 0x00000174: return "Ia32SysEnterCs";
+        case 0x00000175: return "Ia32SysEnterEsp";
+        case 0x00000176: return "Ia32SysEnterEip";
+        case 0x00000179: *pfTakesValue = true; return "Ia32McgCap";
+        case 0x0000017a: return "Ia32McgStatus";
+        case 0x0000017b: return "Ia32McgCtl";
+        case 0x0000017f: return "IntelI7SandyErrorControl"; /* SandyBridge. */
+        case 0x00000186: return "Ia32PerfEvtSelN";
+        case 0x00000187: return "Ia32PerfEvtSelN";
+        case 0x00000198: *pfTakesValue = true; return "Ia32PerfStatus";
+        case 0x00000199: *pfTakesValue = true; return "Ia32PerfCtl";
+        case 0x0000019a: *pfTakesValue = true; return "Ia32ClockModulation";
+        case 0x0000019b: *pfTakesValue = true; return "Ia32ThermInterrupt";
+        case 0x0000019c: *pfTakesValue = true; return "Ia32ThermStatus";
+        case 0x0000019d: *pfTakesValue = true; return "Ia32Therm2Ctl";
+        case 0x000001a0: *pfTakesValue = true; return "Ia32MiscEnable";
+        case 0x000001a2: *pfTakesValue = true; return "IntelI7TemperatureTarget";
+        case 0x000001a6: return "IntelI7MsrOffCoreResponseN";
+        case 0x000001a7: return "IntelI7MsrOffCoreResponseN";
+        case 0x000001aa: return CPUMMICROARCH_IS_INTEL_CORE7(g_enmMicroarch) ? "IntelI7MiscPwrMgmt" : NULL /*"P6PicSensCfg"*/;
+        case 0x000001ad: *pfTakesValue = true; return "IntelI7TurboRatioLimit"; /* SandyBridge+, Silvermount+ */
+        case 0x000001c8: return g_enmMicroarch >= kCpumMicroarch_Intel_Core7_Nehalem ? "IntelI7LbrSelect" : NULL;
+        case 0x000001c9: return    g_enmMicroarch >= kCpumMicroarch_Intel_Core_Yonah
+                                && g_enmMicroarch <= kCpumMicroarch_Intel_P6_Core_Atom_End
+                              ? "IntelLastBranchTos" : NULL /* Pentium M Dothan seems to have something else here. */;
+        case 0x000001d9: return "Ia32DebugCtl";
+        case 0x000001db: return "P6LastBranchFromIp";
+        case 0x000001dc: return "P6LastBranchToIp";
+        case 0x000001dd: return "P6LastIntFromIp";
+        case 0x000001de: return "P6LastIntToIp";
+        case 0x000001f0: return "IntelI7VirtualLegacyWireCap"; /* SandyBridge. */
+        case 0x000001f2: return "Ia32SmrrPhysBase";
+        case 0x000001f3: return "Ia32SmrrPhysMask";
+        case 0x000001f8: return "Ia32PlatformDcaCap";
+        case 0x000001f9: return "Ia32CpuDcaCap";
+        case 0x000001fa: return "Ia32Dca0Cap";
+        case 0x000001fc: return "IntelI7PowerCtl";
+
+        case 0x00000200: case 0x00000202: case 0x00000204: case 0x00000206:
+        case 0x00000208: case 0x0000020a: case 0x0000020c: case 0x0000020e:
+        case 0x00000210: case 0x00000212: case 0x00000214: case 0x00000216:
+        case 0x00000218: case 0x0000021a: case 0x0000021c: case 0x0000021e:
+            return "Ia32MtrrPhysBaseN";
+        case 0x00000201: case 0x00000203: case 0x00000205: case 0x00000207:
+        case 0x00000209: case 0x0000020b: case 0x0000020d: case 0x0000020f:
+        case 0x00000211: case 0x00000213: case 0x00000215: case 0x00000217:
+        case 0x00000219: case 0x0000021b: case 0x0000021d: case 0x0000021f:
+            return "Ia32MtrrPhysMaskN";
+        case 0x00000250:
+        case 0x00000258: case 0x00000259:
+        case 0x00000268: case 0x00000269: case 0x0000026a: case 0x0000026b:
+        case 0x0000026c: case 0x0000026d: case 0x0000026e: case 0x0000026f:
+            return "Ia32MtrrFixed";
+        case 0x00000277: *pfTakesValue = true; return "Ia32Pat";
+
+        case 0x00000280: case 0x00000281:  case 0x00000282: case 0x00000283:
+        case 0x00000284: case 0x00000285:  case 0x00000286: case 0x00000287:
+        case 0x00000288: case 0x00000289:  case 0x0000028a: case 0x0000028b:
+        case 0x0000028c: case 0x0000028d:  case 0x0000028e: case 0x0000028f:
+        case 0x00000290: case 0x00000291:  case 0x00000292: case 0x00000293:
+        case 0x00000294: case 0x00000295:  //case 0x00000296: case 0x00000297:
+        //case 0x00000298: case 0x00000299:  case 0x0000029a: case 0x0000029b:
+        //case 0x0000029c: case 0x0000029d:  case 0x0000029e: case 0x0000029f:
+            return "Ia32McNCtl2";
+
+        case 0x000002ff: return "Ia32MtrrDefType";
+        //case 0x00000305: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? TODO : NULL;
+        case 0x00000309: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? NULL /** @todo P4 */ : "Ia32FixedCtrN";
+        case 0x0000030a: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? NULL /** @todo P4 */ : "Ia32FixedCtrN";
+        case 0x0000030b: return CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch) ? NULL /** @todo P4 */ : "Ia32FixedCtrN";
+        case 0x00000345: *pfTakesValue = true; return "Ia32PerfCapabilities";
+        case 0x0000038d: return "Ia32FixedCtrCtrl";
+        case 0x0000038e: *pfTakesValue = true; return "Ia32PerfGlobalStatus";
+        case 0x0000038f: return "Ia32PerfGlobalCtrl";
+        case 0x00000390: return "Ia32PerfGlobalOvfCtrl";
+        case 0x00000391: return "IntelI7UncPerfGlobalCtrl";             /* S,H,X */
+        case 0x00000392: return "IntelI7UncPerfGlobalStatus";           /* S,H,X */
+        case 0x00000393: return "IntelI7UncPerfGlobalOvfCtrl";          /* X. ASSUMING this is the same on sandybridge and later. */
+        case 0x00000394: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPerfFixedCtr"  /* X */   : "IntelI7UncPerfFixedCtrCtrl"; /* >= S,H */
+        case 0x00000395: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPerfFixedCtrCtrl" /* X*/ : "IntelI7UncPerfFixedCtr";     /* >= S,H */
+        case 0x00000396: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncAddrOpcodeMatch" /* X */ : "IntelI7UncCbO_Config";       /* >= S,H */
+        case 0x0000039c: return "IntelI7SandyPebsNumAlt";
+        case 0x000003b0: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPmcN" /* X */            : "IntelI7UncArbPerfCtrN";      /* >= S,H */
+        case 0x000003b1: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPmcN" /* X */            : "IntelI7UncArbPerfCtrN";      /* >= S,H */
+        case 0x000003b2: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPmcN" /* X */            : "IntelI7UncArbPerfEvtSelN";   /* >= S,H */
+        case 0x000003b3: return g_enmMicroarch < kCpumMicroarch_Intel_Core7_SandyBridge ? "IntelI7UncPmcN" /* X */            : "IntelI7UncArbPerfEvtSelN";   /* >= S,H */
+        case 0x000003b4: case 0x000003b5: case 0x000003b6: case 0x000003b7:
+            return "IntelI7UncPmcN";
+        case 0x000003c0: case 0x000003c1: case 0x000003c2: case 0x000003c3:
+        case 0x000003c4: case 0x000003c5: case 0x000003c6: case 0x000003c7:
+            return "IntelI7UncPerfEvtSelN";
+        case 0x000003f1: return "Ia32PebsEnable";
+        case 0x000003f6: return "IntelI7PebsLdLat";
+        case 0x000003f8: return "IntelI7PkgCnResidencyN";
+        case 0x000003f9: return "IntelI7PkgCnResidencyN";
+        case 0x000003fa: return "IntelI7PkgCnResidencyN";
+        case 0x000003fc: return "IntelI7CoreCnResidencyN";
+        case 0x000003fd: return "IntelI7CoreCnResidencyN";
+        case 0x000003fe: return "IntelI7CoreCnResidencyN";
+
+        case 0x00000478: g_enmMicroarch == kCpumMicroarch_Intel_Core2_Penryn ? "IntelCpuId1FeatureMaskEcdx" : NULL;
+        case 0x00000480: *pfTakesValue = true; return "Ia32VmxBase";
+        case 0x00000481: *pfTakesValue = true; return "Ia32VmxPinbasedCtls";
+        case 0x00000482: *pfTakesValue = true; return "Ia32VmxProcbasedCtls";
+        case 0x00000483: *pfTakesValue = true; return "Ia32VmxExitCtls";
+        case 0x00000484: *pfTakesValue = true; return "Ia32VmxEntryCtls";
+        case 0x00000485: *pfTakesValue = true; return "Ia32VmxMisc";
+        case 0x00000486: *pfTakesValue = true; return "Ia32VmxCr0Fixed0";
+        case 0x00000487: *pfTakesValue = true; return "Ia32VmxCr0Fixed1";
+        case 0x00000488: *pfTakesValue = true; return "Ia32VmxCr4Fixed0";
+        case 0x00000489: *pfTakesValue = true; return "Ia32VmxCr4Fixed1";
+        case 0x0000048a: *pfTakesValue = true; return "Ia32VmxVmcsEnum";
+        case 0x0000048b: *pfTakesValue = true; return "Ia32VmxProcBasedCtls2";
+        case 0x0000048c: *pfTakesValue = true; return "Ia32VmxEptVpidCap";
+        case 0x0000048d: *pfTakesValue = true; return "Ia32VmxTruePinbasedCtls";
+        case 0x0000048e: *pfTakesValue = true; return "Ia32VmxTrueProcbasedCtls";
+        case 0x0000048f: *pfTakesValue = true; return "Ia32VmxTrueExitCtls";
+        case 0x00000490: *pfTakesValue = true; return "Ia32VmxTrueEntryCtls";
+
+        case 0x000004c1:
+        case 0x000004c2:
+        case 0x000004c3:
+        case 0x000004c4:
+        case 0x000004c5:
+        case 0x000004c6:
+        case 0x000004c7:
+        case 0x000004c8:
+            return "Ia32PmcN";
+
+        case 0x00000600: return "Ia32DsArea";
+        case 0x00000601: return "IntelI7SandyVrCurrentConfig";
+        case 0x00000603: return "IntelI7SandyVrMiscConfig";
+        case 0x00000606: return "IntelI7SandyRaplPowerUnit";
+        case 0x0000060a: return "IntelI7SandyPkgCnIrtlN";
+        case 0x0000060b: return "IntelI7SandyPkgCnIrtlN";
+        case 0x0000060c: return "IntelI7SandyPkgCnIrtlN";
+        case 0x0000060d: return "IntelI7SandyPkgC2Residency";
+
+        case 0x00000610: return "IntelI7RaplPkgPowerLimit";
+        case 0x00000611: return "IntelI7RaplPkgEnergyStatus";
+        case 0x00000613: return "IntelI7RaplPkgPerfStatus";
+        case 0x00000614: return "IntelI7RaplPkgPowerInfo";
+        case 0x00000618: return "IntelI7RaplDramPowerLimit";
+        case 0x00000619: return "IntelI7RaplDramEnergyStatus";
+        case 0x0000061b: return "IntelI7RaplDramPerfStatus";
+        case 0x0000061c: return "IntelI7RaplDramPowerInfo";
+        case 0x00000638: return "IntelI7RaplPp0PowerLimit";
+        case 0x00000639: return "IntelI7RaplPp0EnergyStatus";
+        case 0x0000063a: return "IntelI7RaplPp0Policy";
+        case 0x0000063b: return "IntelI7RaplPp0PerfStatus";
+        case 0x00000640: return "IntelI7RaplPp1PowerLimit";
+        case 0x00000641: return "IntelI7RaplPp1EnergyStatus";
+        case 0x00000642: return "IntelI7RaplPp1Policy";
+        case 0x00000680: case 0x00000681: case 0x00000682: case 0x00000683:
+        case 0x00000684: case 0x00000685: case 0x00000686: case 0x00000687:
+        case 0x00000688: case 0x00000689: case 0x0000068a: case 0x0000068b:
+        case 0x0000068c: case 0x0000068d: case 0x0000068e: case 0x0000068f:
+        //case 0x00000690: case 0x00000691: case 0x00000692: case 0x00000693:
+        //case 0x00000694: case 0x00000695: case 0x00000696: case 0x00000697:
+        //case 0x00000698: case 0x00000699: case 0x0000069a: case 0x0000069b:
+        //case 0x0000069c: case 0x0000069d: case 0x0000069e: case 0x0000069f:
+            return "IntelLastBranchFromN";
+        case 0x000006c0: case 0x000006c1: case 0x000006c2: case 0x000006c3:
+        case 0x000006c4: case 0x000006c5: case 0x000006c6: case 0x000006c7:
+        case 0x000006c8: case 0x000006c9: case 0x000006ca: case 0x000006cb:
+        case 0x000006cc: case 0x000006cd: case 0x000006ce: case 0x000006cf:
+        //case 0x000006d0: case 0x000006d1: case 0x000006d2: case 0x000006d3:
+        //case 0x000006d4: case 0x000006d5: case 0x000006d6: case 0x000006d7:
+        //case 0x000006d8: case 0x000006d9: case 0x000006da: case 0x000006db:
+        //case 0x000006dc: case 0x000006dd: case 0x000006de: case 0x000006df:
+            return "IntelLastBranchFromN";
+        case 0x000006e0: return "Ia32TscDeadline";
+
+        case 0xc0000080: return "Amd64Efer";
+        case 0xc0000081: return "Amd64SyscallTarget";
+        case 0xc0000082: return "Amd64LongSyscallTarget";
+        case 0xc0000083: return "Amd64CompSyscallTarget";
+        case 0xc0000084: return "Amd64SyscallFlagMask";
+        case 0xc0000100: return "Amd64FsBase";
+        case 0xc0000101: return "Amd64GsBase";
+        case 0xc0000102: return "Amd64KernelGsBase";
+        case 0xc0000103: return "Amd64TscAux";
+        case 0xc0000104: return "AmdFam15hTscRate";
+        case 0xc0000105: return "AmdFam15hLwpCfg";
+        case 0xc0000106: return "AmdFam15hLwpCbAddr";
+        case 0xc0000408: return "AmdFam10hMc4MiscN";
+        case 0xc0000409: return "AmdFam10hMc4MiscN";
+        case 0xc000040a: return "AmdFam10hMc4MiscN";
+        case 0xc000040b: return "AmdFam10hMc4MiscN";
+        case 0xc000040c: return "AmdFam10hMc4MiscN";
+        case 0xc000040d: return "AmdFam10hMc4MiscN";
+        case 0xc000040e: return "AmdFam10hMc4MiscN";
+        case 0xc000040f: return "AmdFam10hMc4MiscN";
+        case 0xc0010000: return "AmdK8PerfCtlN";
+        case 0xc0010001: return "AmdK8PerfCtlN";
+        case 0xc0010002: return "AmdK8PerfCtlN";
+        case 0xc0010003: return "AmdK8PerfCtlN";
+        case 0xc0010004: return "AmdK8PerfCtrN";
+        case 0xc0010005: return "AmdK8PerfCtrN";
+        case 0xc0010006: return "AmdK8PerfCtrN";
+        case 0xc0010007: return "AmdK8PerfCtrN";
+        case 0xc0010010: *pfTakesValue = true; return "AmdK8SysCfg";
+        case 0xc0010015: return "AmdK8HwCr";
+        case 0xc0010016: case 0xc0010018: return "AmdK8IorrBaseN";
+        case 0xc0010017: case 0xc0010019: return "AmdK8IorrMaskN";
+        case 0xc001001a: case 0xc001001d: return "AmdK8TopOfMemN";
+        case 0xc001001f: return "AmdK8NbCfg1";
+        case 0xc0010022: return "AmdK8McXcptRedir";
+        case 0xc0010030: case 0xc0010031: case 0xc0010032:
+        case 0xc0010033: case 0xc0010034: case 0xc0010035:
+            return "AmdK8CpuNameN";
+        case 0xc001003e: *pfTakesValue = true; return "AmdK8HwThermalCtrl";
+        case 0xc001003f: return "AmdK8SwThermalCtrl";
+        case 0xc0010044: case 0xc0010045: case 0xc0010046: case 0xc0010047:
+        case 0xc0010048: case 0xc0010049: case 0xc001004a: //case 0xc001004b:
+            return "AmdK8McCtlMaskN";
+        case 0xc0010050: case 0xc0010051: case 0xc0010052: case 0xc0010053:
+            return "AmdK8SmiOnIoTrapN";
+        case 0xc0010054: return "AmdK8SmiOnIoTrapCtlSts";
+        case 0xc0010055: return "AmdK8IntPendingMessage";
+        case 0xc0010056: return "AmdK8SmiTriggerIoCycle";
+        case 0xc0010058: return "AmdFam10hMmioCfgBaseAddr";
+        case 0xc0010059: return "AmdFam10hTrapCtlMaybe";
+        case 0xc0010061: *pfTakesValue = true; return "AmdFam10hPStateCurLimit";
+        case 0xc0010062: *pfTakesValue = true; return "AmdFam10hPStateControl";
+        case 0xc0010063: *pfTakesValue = true; return "AmdFam10hPStateStatus";
+        case 0xc0010064: case 0xc0010065: case 0xc0010066: case 0xc0010067:
+        case 0xc0010068: case 0xc0010069: case 0xc001006a: case 0xc001006b:
+            *pfTakesValue = true; return "AmdFam10hPStateN";
+        case 0xc0010070: *pfTakesValue = true; return "AmdFam10hCofVidControl";
+        case 0xc0010071: *pfTakesValue = true; return "AmdFam10hCofVidStatus";
+        case 0xc0010073: return "AmdFam10hCStateIoBaseAddr";
+        case 0xc0010074: return "AmdFam10hCpuWatchdogTimer";
+        // case 0xc0010075: return "AmdFam15hApmlTdpLimit";
+        // case 0xc0010077: return "AmdFam15hCpuPowerInTdp";
+        // case 0xc0010078: return "AmdFam15hPowerAveragingPeriod";
+        // case 0xc0010079: return "AmdFam15hDramCtrlCmdThrottle";
+        // case 0xc0010080: return "AmdFam16hFreqSensFeedbackMonActCnt0";
+        // case 0xc0010081: return "AmdFam16hFreqSensFeedbackMonRefCnt0";
+        case 0xc0010111: return "AmdK8SmmBase";     /** @todo probably misdetected ign/gp due to locking */
+        case 0xc0010112: return "AmdK8SmmAddr";     /** @todo probably misdetected ign/gp due to locking */
+        case 0xc0010113: return "AmdK8SmmMask";     /** @todo probably misdetected ign/gp due to locking */
+        case 0xc0010114: return "AmdK8VmCr";        /** @todo probably misdetected due to locking */
+        case 0xc0010115: return "AmdK8IgnNe";
+        case 0xc0010116: return "AmdK8SmmCtl";
+        case 0xc0010117: return "AmdK8VmHSavePa";   /** @todo probably misdetected due to locking */
+        case 0xc0010118: return "AmdFam10hVmLockKey";
+        case 0xc0010119: return "AmdFam10hSmmLockKey"; /* Not documented by BKDG, found in netbsd patch. */
+        case 0xc001011a: return "AmdFam10hLocalSmiStatus";
+        case 0xc0010140: *pfTakesValue = true; return "AmdFam10hOsVisWrkIdLength";
+        case 0xc0010141: *pfTakesValue = true; return "AmdFam10hOsVisWrkStatus";
+        case 0xc0010200: case 0xc0010202: case 0xc0010204: case 0xc0010206:
+        case 0xc0010208: case 0xc001020a: //case 0xc001020c: case 0xc001020e:
+            return "AmdK8PerfCtlN";
+        case 0xc0010201: case 0xc0010203: case 0xc0010205: case 0xc0010207:
+        case 0xc0010209: case 0xc001020b: //case 0xc001020d: case 0xc001020f:
+            return "AmdK8PerfCtrN";
+        case 0xc0010230: case 0xc0010232: case 0xc0010234: case 0xc0010236:
+        //case 0xc0010238: case 0xc001023a: case 0xc001030c: case 0xc001023e:
+            return "AmdFam16hL2IPerfCtlN";
+        case 0xc0010231: case 0xc0010233: case 0xc0010235: case 0xc0010237:
+        //case 0xc0010239: case 0xc001023b: case 0xc001023d: case 0xc001023f:
+            return "AmdFam16hL2IPerfCtrN";
+        case 0xc0010240: case 0xc0010242: case 0xc0010244: case 0xc0010246:
+        //case 0xc0010248: case 0xc001024a: case 0xc001024c: case 0xc001024e:
+            return "AmdFam15hNorthbridgePerfCtlN";
+        case 0xc0010241: case 0xc0010243: case 0xc0010245: case 0xc0010247:
+        //case 0xc0010249: case 0xc001024b: case 0xc001024d: case 0xc001024f:
+            return "AmdFam15hNorthbridgePerfCtrN";
+        case 0xc0011000: *pfTakesValue = true; return "AmdK7MicrocodeCtl";
+        case 0xc0011001: *pfTakesValue = true; return "AmdK7ClusterIdMaybe";
+        case 0xc0011002: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AmdK8CpuIdCtlStd07hEbax" : NULL;
+        case 0xc0011003: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AmdK8CpuIdCtlStd06hEcx"  : NULL;
+        case 0xc0011004: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AmdK8CpuIdCtlStd01hEdcx" : NULL;
+        case 0xc0011005: return g_enmMicroarch >= kCpumMicroarch_AMD_K8_First ? "AmdK8CpuIdCtlExt01hEdcx" : NULL;
+        case 0xc0011006: return "AmdK7DebugStatusMaybe";
+        case 0xc0011007: return "AmdK7BHTraceBaseMaybe";
+        case 0xc0011008: return "AmdK7BHTracePtrMaybe";
+        case 0xc0011009: return "AmdK7BHTraceLimitMaybe";
+        case 0xc001100a: return "AmdK7HardwareDebugToolCfgMaybe";
+        case 0xc001100b: return "AmdK7FastFlushCountMaybe";
+        case 0xc001100c: return "AmdK7NodeId"; /** @todo dunno if this was there is K7 already. Kinda doubt it. */
+        case 0xc0011019: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AmdK7DrXAddrMaskN" : NULL;
+        case 0xc001101a: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AmdK7DrXAddrMaskN" : NULL;
+        case 0xc001101b: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver ? "AmdK7DrXAddrMaskN" : NULL;
+        case 0xc0011020: return "AmdK7LoadStoreCfg";
+        case 0xc0011021: return "AmdK7InstrCacheCfg";
+        case 0xc0011022: return "AmdK7DataCacheCfg";
+        case 0xc0011023: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch) ? "AmdFam15hCombUnitCfg" : "AmdK7BusUnitCfg";
+        case 0xc0011024: return "AmdK7DebugCtl2Maybe";
+        case 0xc0011025: return "AmdK7Dr0DataMatchMaybe";
+        case 0xc0011026: return "AmdK7Dr0DataMaskMaybe";
+        case 0xc0011027: return "AmdK7DrXAddrMaskN";
+        case 0xc0011028: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_First ? "AmdFam15hFpuCfg" : NULL;
+        case 0xc0011029: return g_enmMicroarch >= kCpumMicroarch_AMD_15h_First ? "AmdFam15hDecoderCfg" : NULL;
+        case 0xc001102a: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AmdFam15hCombUnitCfg2"
+                              : CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch) || g_enmMicroarch > kCpumMicroarch_AMD_15h_End
+                              ? "AmdFam10hBusUnitCfg2" /* 10h & 16h */ : NULL;
+        case 0xc001102b: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AmdFam15hCombUnitCfg3" : NULL;
+        case 0xc001102c: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AmdFam15hExecUnitCfg" : NULL;
+        case 0xc001102d: return CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)   ? "AmdFam15hLoadStoreCfg2" : NULL;
+        case 0xc0011030: return "AmdFam10hIbsFetchCtl";
+        case 0xc0011031: return "AmdFam10hIbsFetchLinAddr";
+        case 0xc0011032: return "AmdFam10hIbsFetchPhysAddr";
+        case 0xc0011033: return "AmdFam10hIbsOpExecCtl";
+        case 0xc0011034: return "AmdFam10hIbsOpRip";
+        case 0xc0011035: return "AmdFam10hIbsOpData";
+        case 0xc0011036: return "AmdFam10hIbsOpData2";
+        case 0xc0011037: return "AmdFam10hIbsOpData3";
+        case 0xc0011038: return "AmdFam10hIbsDcLinAddr";
+        case 0xc0011039: return "AmdFam10hIbsDcPhysAddr";
+        case 0xc001103a: return "AmdFam10hIbsCtl";
+        case 0xc001103b: return "AmdFam14hIbsBrTarget";
+    }
+    return NULL;
+}
+
+
+/**
+ * Names CPUMCPU variables that MSRs corresponds to.
+ *
+ * @returns The variable name @a uMsr corresponds to, NULL if no variable.
+ * @param   uMsr                The MSR in question.
+ */
+static const char *getMsrCpumCpuVarName(uint32_t uMsr)
+{
+    switch (uMsr)
+    {
+        case 0x00000250: return "GuestMsrs.msr.MtrrFix64K_00000";
+        case 0x00000258: return "GuestMsrs.msr.MtrrFix16K_80000";
+        case 0x00000259: return "GuestMsrs.msr.MtrrFix16K_A0000";
+        case 0x00000268: return "GuestMsrs.msr.MtrrFix4K_C0000";
+        case 0x00000269: return "GuestMsrs.msr.MtrrFix4K_C8000";
+        case 0x0000026a: return "GuestMsrs.msr.MtrrFix4K_D0000";
+        case 0x0000026b: return "GuestMsrs.msr.MtrrFix4K_D8000";
+        case 0x0000026c: return "GuestMsrs.msr.MtrrFix4K_E0000";
+        case 0x0000026d: return "GuestMsrs.msr.MtrrFix4K_E8000";
+        case 0x0000026e: return "GuestMsrs.msr.MtrrFix4K_F0000";
+        case 0x0000026f: return "GuestMsrs.msr.MtrrFix4K_F8000";
+        case 0x00000277: return "Guest.msrPAT";
+        case 0x000002ff: return "GuestMsrs.msr.MtrrDefType";
+    }
+    return NULL;
+}
+
+
+/**
+ * Checks whether the MSR should read as zero for some reason.
+ *
+ * @returns true if the register should read as zero, false if not.
+ * @param   uMsr                The MSR.
+ */
+static bool doesMsrReadAsZero(uint32_t uMsr)
+{
+    switch (uMsr)
+    {
+        case 0x00000088: return true; // "BBL_CR_D0" - RAZ until understood/needed.
+        case 0x00000089: return true; // "BBL_CR_D1" - RAZ until understood/needed.
+        case 0x0000008a: return true; // "BBL_CR_D2" - RAZ until understood/needed.
+
+        /* Non-zero, but unknown register. */
+        case 0x0000004a:
+        case 0x0000004b:
+        case 0x0000004c:
+        case 0x0000004d:
+        case 0x0000004e:
+        case 0x0000004f:
+        case 0x00000050:
+        case 0x00000051:
+        case 0x00000052:
+        case 0x00000053:
+        case 0x00000054:
+        case 0x0000008c:
+        case 0x0000008d:
+        case 0x0000008e:
+        case 0x0000008f:
+        case 0x00000090:
+        case 0xc0011011:
+            return true;
+    }
+
+    return false;
+}
+
+
+/**
+ * Gets the skip mask for the given MSR.
+ *
+ * @returns Skip mask (0 means skipping nothing).
+ * @param   uMsr                The MSR.
+ */
+static uint64_t getGenericSkipMask(uint32_t uMsr)
+{
+    switch (uMsr)
+    {
+        case 0x0000013c: return 3; /* AES-NI lock bit ++. */
+
+        case 0x000001f2: return UINT64_C(0xfffff00f); /* Ia32SmrrPhysBase - Only writable in SMM. */
+        case 0x000001f3: return UINT64_C(0xfffff800); /* Ia32SmrrPhysMask - Only writable in SMM. */
+
+        case 0xc0010015: return 1; /* SmmLock bit */
+
+        /* SmmLock effect: */
+        case 0xc0010111: return UINT32_MAX;
+        case 0xc0010112: return UINT64_C(0xfffe0000) | ((RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & ~(uint64_t)UINT32_MAX);
+        case 0xc0010113: return UINT64_C(0xfffe773f) | ((RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & ~(uint64_t)UINT32_MAX);
+        case 0xc0010116: return 0x1f;
+
+        case 0xc0010114: return RT_BIT_64(3) /* SVM lock */ | RT_BIT_64(4) /* SvmeDisable */;
+
+        /* Canonical */
+        case 0xc0011034:
+        case 0xc0011038:
+        case 0xc001103b:
+            return UINT64_C(0xffff800000000000);
+
+        /* Time counters - fudge them to avoid incorrect ignore masks. */
+        case 0x00000010:
+        case 0x000000e7:
+        case 0x000000e8:
+            return RT_BIT_32(27) - 1;
+    }
+    return 0;
+}
+
+
+
+
+/** queryMsrWriteBadness return values. */
+typedef enum
+{
+    /** . */
+    VBCPUREPBADNESS_MOSTLY_HARMLESS = 0,
+    /** Not a problem if accessed with care. */
+    VBCPUREPBADNESS_MIGHT_BITE,
+    /** Worse than a bad james bond villain. */
+    VBCPUREPBADNESS_BOND_VILLAIN
+} VBCPUREPBADNESS;
+
+
+/**
+ * Backlisting and graylisting of MSRs which may cause tripple faults.
+ *
+ * @returns Badness factor.
+ * @param   uMsr                The MSR in question.
+ */
+static VBCPUREPBADNESS queryMsrWriteBadness(uint32_t uMsr)
+{
+    /** @todo Having trouble in the 0xc0010247,0xc0011006,?? region on Bulldozer. */
+    /** @todo Having trouble in the 0xc001100f,0xc001100d,?? region on Opteron
+     *        2384. */
+
+    switch (uMsr)
+    {
+        case 0x00000050:
+        case 0x00000051:
+        case 0x00000052:
+        case 0x00000053:
+        case 0x00000054:
+        case 0x00001006:
+        case 0x00001007:
+        case 0xc0010010:
+        case 0xc0010016:
+        case 0xc0010017:
+        case 0xc0010018:
+        case 0xc0010019:
+        case 0xc001001a:
+        case 0xc001001d:
+        case 0xc001101e:
+        case 0xc0010064: /* P-state fequency, voltage, ++. */
+        case 0xc0010065: /* P-state fequency, voltage, ++. */
+        case 0xc0010066: /* P-state fequency, voltage, ++. */
+        case 0xc0010067: /* P-state fequency, voltage, ++. */
+        case 0xc0010068: /* P-state fequency, voltage, ++. */
+        case 0xc0010069: /* P-state fequency, voltage, ++. */
+        case 0xc001006a: /* P-state fequency, voltage, ++. */
+        case 0xc001006b: /* P-state fequency, voltage, ++. */
+        case 0xc0010070: /* COFVID Control. */
+        case 0xc0011021: /* IC_CFG (instruction cache configuration) */
+        case 0xc0011023: /* CU_CFG (combined unit configuration) */
+        case 0xc001102c: /* EX_CFG (execution unit configuration) */
+            return VBCPUREPBADNESS_BOND_VILLAIN;
+
+        case 0x000001a0: /* IA32_MISC_ENABLE */
+        case 0x00000199: /* IA32_PERF_CTL */
+        case 0x00002000: /* P6_CR0 */
+        case 0x00002003: /* P6_CR3 */
+        case 0x00002004: /* P6_CR4 */
+        case 0xc0000080: /* MSR_K6_EFER */
+            return VBCPUREPBADNESS_MIGHT_BITE;
+    }
+    return VBCPUREPBADNESS_MOSTLY_HARMLESS;
+}
+
+
+
+/**
+ * Prints a 64-bit value in the best way.
+ *
+ * @param   uValue              The value.
+ */
+static void printMsrValueU64(uint64_t uValue)
+{
+    if (uValue == 0)
+        vbCpuRepPrintf(", 0");
+    else if (uValue == UINT16_MAX)
+        vbCpuRepPrintf(", UINT16_MAX");
+    else if (uValue == UINT32_MAX)
+        vbCpuRepPrintf(", UINT32_MAX");
+    else if (uValue == UINT64_MAX)
+        vbCpuRepPrintf(", UINT64_MAX");
+    else if (uValue == UINT64_C(0xffffffff00000000))
+        vbCpuRepPrintf(", ~(uint64_t)UINT32_MAX");
+    else if (uValue <= (UINT32_MAX >> 1))
+        vbCpuRepPrintf(", %#llx", uValue);
+    else if (uValue <= UINT32_MAX)
+        vbCpuRepPrintf(", UINT32_C(%#llx)", uValue);
+    else
+        vbCpuRepPrintf(", UINT64_C(%#llx)", uValue);
+}
+
+
+/**
+ * Prints the newline after an MSR line has been printed.
+ *
+ * This is used as a hook to slow down the output and make sure the remote
+ * terminal or/and output file has received the last update before we go and
+ * crash probing the next MSR.
+ */
+static void printMsrNewLine(void)
+{
+    vbCpuRepPrintf("\n");
+#if 1
+    RTThreadSleep(8);
+#endif
+}
+
+static int printMsrWriteOnly(uint32_t uMsr, const char *pszWrFnName, const char *pszAnnotation)
+{
+    if (!pszWrFnName)
+        pszWrFnName = "IgnoreWrite";
+    vbCpuRepPrintf(pszAnnotation
+                   ? "    MFN(%#010x, \"%s\", WriteOnly, %s), /* %s */"
+                   : "    MFN(%#010x, \"%s\", WriteOnly, %s),",
+                   uMsr, getMsrName(uMsr), pszWrFnName, pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrValueReadOnly(uint32_t uMsr, uint64_t uValue, const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    MVO(%#010x, \"%s\"", uMsr, getMsrName(uMsr));
+    printMsrValueU64(uValue);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrValueIgnoreWrites(uint32_t uMsr, uint64_t uValue, const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    MVI(%#010x, \"%s\"", uMsr, getMsrName(uMsr));
+    printMsrValueU64(uValue);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrValueExtended(uint32_t uMsr, uint64_t uValue, uint64_t fIgnMask, uint64_t fGpMask,
+                                  const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    MVX(%#010x, \"%s\"", uMsr, getMsrName(uMsr));
+    printMsrValueU64(uValue);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrRangeValueReadOnly(uint32_t uMsr, uint32_t uLast, uint64_t uValue, const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    RVO(%#010x, %#010x, \"%s\"", uMsr, uLast, getMsrRangeName(uMsr));
+    printMsrValueU64(uValue);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrRangeValueIgnoreWrites(uint32_t uMsr, uint32_t uLast, uint64_t uValue, const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    RVI(%#010x, %#010x, \"%s\"", uMsr, uLast, getMsrRangeName(uMsr));
+    printMsrValueU64(uValue);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunction(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    MFN(%#010x, \"%s\", %s, %s),", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionReadOnly(uint32_t uMsr, const char *pszRdFnName, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    vbCpuRepPrintf("    MFO(%#010x, \"%s\", %s),", uMsr, getMsrName(uMsr), pszRdFnName);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionIgnoreWrites(uint32_t uMsr, const char *pszRdFnName, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    vbCpuRepPrintf("    MFI(%#010x, \"%s\", %s),", uMsr, getMsrName(uMsr), pszRdFnName);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionIgnoreMask(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName,
+                                      uint64_t fIgnMask, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    MFW(%#010x, \"%s\", %s, %s", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName);
+    printMsrValueU64(fIgnMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionExtended(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName, uint64_t uValue,
+                                    uint64_t fIgnMask, uint64_t fGpMask, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    MFX(%#010x, \"%s\", %s, %s", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName);
+    printMsrValueU64(uValue);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionExtendedIdxVal(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName, uint64_t uValue,
+                                          uint64_t fIgnMask, uint64_t fGpMask, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    MFX(%#010x, \"%s\", %s, %s, %#x", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName, uValue);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionCpumCpu(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName,
+                                   const char *pszCpumCpuStorage, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    if (!pszCpumCpuStorage)
+        pszCpumCpuStorage = getMsrCpumCpuVarName(uMsr);
+    if (!pszCpumCpuStorage)
+        return RTMsgErrorRc(VERR_NOT_FOUND, "Missing CPUMCPU member for %#s (%#x)\n", getMsrName(uMsr), uMsr);
+    vbCpuRepPrintf("    MFS(%#010x, \"%s\", %s, %s, %s),", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName, pszCpumCpuStorage);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrFunctionCpumCpuEx(uint32_t uMsr, const char *pszRdFnName, const char *pszWrFnName,
+                                     const char *pszCpumCpuStorage, uint64_t fIgnMask, uint64_t fGpMask,
+                                     const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    if (!pszCpumCpuStorage)
+        pszCpumCpuStorage = getMsrCpumCpuVarName(uMsr);
+    if (!pszCpumCpuStorage)
+        return RTMsgErrorRc(VERR_NOT_FOUND, "Missing CPUMCPU member for %#s (%#x)\n", getMsrName(uMsr), uMsr);
+    vbCpuRepPrintf("    MFZ(%#010x, \"%s\", %s, %s, %s", uMsr, getMsrName(uMsr), pszRdFnName, pszWrFnName, pszCpumCpuStorage);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrRangeFunction(uint32_t uMsr, uint32_t uLast, const char *pszRdFnName, const char *pszWrFnName,
+                                 const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    RFN(%#010x, %#010x, \"%s\", %s, %s),", uMsr, uLast, getMsrRangeName(uMsr), pszRdFnName, pszWrFnName);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrRangeFunctionEx(uint32_t uMsr, uint32_t uLast, const char *pszRdFnName, const char *pszWrFnName,
+                                   uint64_t uValue, uint64_t fIgnMask, uint64_t fGpMask, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    RSN(%#010x, %#010x, \"%s\", %s, %s", uMsr, uLast, getMsrRangeName(uMsr), pszRdFnName, pszWrFnName);
+    printMsrValueU64(uValue);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrRangeFunctionExIdxVal(uint32_t uMsr, uint32_t uLast, const char *pszRdFnName, const char *pszWrFnName,
+                                         uint64_t uValue, uint64_t fIgnMask, uint64_t fGpMask, const char *pszAnnotation)
+{
+    if (!pszRdFnName)
+        pszRdFnName = getMsrFnName(uMsr, NULL);
+    if (!pszWrFnName)
+        pszWrFnName = pszRdFnName;
+    vbCpuRepPrintf("    RSN(%#010x, %#010x, \"%s\", %s, %s, %#x",
+                   uMsr, uLast, getMsrRangeName(uMsr), pszRdFnName, pszWrFnName, uValue);
+    printMsrValueU64(fIgnMask);
+    printMsrValueU64(fGpMask);
+    vbCpuRepPrintf("),");
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+static int printMsrAlias(uint32_t uMsr, uint32_t uTarget, const char *pszAnnotation)
+{
+    vbCpuRepPrintf("    MAL(%#010x, \"%s\", %#010x),", uMsr, getMsrName(uMsr), uTarget);
+    if (pszAnnotation)
+        vbCpuRepPrintf(" /* %s */", pszAnnotation);
+    printMsrNewLine();
+    return VINF_SUCCESS;
+}
+
+
+
+static const char *annotateValue(uint64_t uValue)
+{
+    static char s_szBuf[40];
+    if (uValue <= UINT32_MAX)
+        RTStrPrintf(s_szBuf, sizeof(s_szBuf), "value=%#llx", uValue);
+    else
+        RTStrPrintf(s_szBuf, sizeof(s_szBuf), "value=%#x`%08x", RT_HI_U32(uValue), RT_LO_U32(uValue));
+    return s_szBuf;
+}
+
+
+static const char *annotateValueExtra(const char *pszExtra, uint64_t uValue)
+{
+    static char s_szBuf[40];
+    if (uValue <= UINT32_MAX)
+        RTStrPrintf(s_szBuf, sizeof(s_szBuf), "%s value=%#llx", pszExtra, uValue);
+    else
+        RTStrPrintf(s_szBuf, sizeof(s_szBuf), "%s value=%#x`%08x", pszExtra, RT_HI_U32(uValue), RT_LO_U32(uValue));
+    return s_szBuf;
+}
+
+
+static const char *annotateIfMissingBits(uint64_t uValue, uint64_t fBits)
+{
+    static char s_szBuf[80];
+    if ((uValue & fBits) == fBits)
+        return annotateValue(uValue);
+    RTStrPrintf(s_szBuf, sizeof(s_szBuf), "XXX: Unexpected value %#llx - wanted bits %#llx to be set.", uValue, fBits);
+    return s_szBuf;
+}
+
+
+
+static int reportMsr_Generic(uint32_t uMsr, uint32_t fFlags, uint64_t uValue)
+{
+    int         rc;
+    bool        fTakesValue = false;
+    const char *pszFnName   = getMsrFnName(uMsr, &fTakesValue);
+
+    if (fFlags & VBCPUREPMSR_F_WRITE_ONLY)
+        rc = printMsrWriteOnly(uMsr, pszFnName, NULL);
+    else
+    {
+        bool    fReadAsZero = doesMsrReadAsZero(uMsr);
+        fTakesValue = fTakesValue && !fReadAsZero;
+
+
+        switch (queryMsrWriteBadness(uMsr))
+        {
+            /* This is what we're here for... */
+            case VBCPUREPBADNESS_MOSTLY_HARMLESS:
+            {
+                if (   msrProberModifyNoChange(uMsr)
+                    || msrProberModifyZero(uMsr))
+                {
+                    uint64_t fSkipMask = getGenericSkipMask(uMsr);
+                    uint64_t fIgnMask  = 0;
+                    uint64_t fGpMask   = 0;
+                    rc = msrProberModifyBitChanges(uMsr, &fIgnMask, &fGpMask, fSkipMask);
+                    if (RT_FAILURE(rc))
+                        return rc;
+
+                    if (pszFnName)
+                    {
+                        if (fGpMask == 0 && fIgnMask == UINT64_MAX && !fTakesValue)
+                            rc = printMsrFunctionIgnoreWrites(uMsr, pszFnName, annotateValue(uValue));
+                        else if (fGpMask == 0 && fIgnMask == 0 && (!fTakesValue || uValue == 0))
+                            rc = printMsrFunction(uMsr, pszFnName, pszFnName, annotateValue(uValue));
+                        else
+                            rc = printMsrFunctionExtended(uMsr, pszFnName, pszFnName, fTakesValue ? uValue : 0,
+                                                          fIgnMask, fGpMask, annotateValue(uValue));
+                    }
+                    else if (fGpMask == 0 && fIgnMask == UINT64_MAX)
+                        rc = printMsrValueIgnoreWrites(uMsr, fReadAsZero ? 0 : uValue, fReadAsZero ? annotateValue(uValue) : NULL);
+                    else
+                        rc = printMsrValueExtended(uMsr, fReadAsZero ? 0 : uValue, fIgnMask, fGpMask,
+                                                   fReadAsZero ? annotateValue(uValue) : NULL);
+                }
+                /* Most likely read-only. */
+                else if (pszFnName && !fTakesValue)
+                    rc = printMsrFunctionReadOnly(uMsr, pszFnName, annotateValue(uValue));
+                else if (pszFnName)
+                    rc = printMsrFunctionExtended(uMsr, pszFnName, "ReadOnly", uValue, 0, 0, annotateValue(uValue));
+                else if (fReadAsZero)
+                    rc = printMsrValueReadOnly(uMsr, 0, annotateValue(uValue));
+                else
+                    rc = printMsrValueReadOnly(uMsr, uValue, NULL);
+                break;
+            }
+
+            /* These should have special handling, so just do a simple
+               write back same value check to see if it's writable. */
+            case VBCPUREPBADNESS_MIGHT_BITE:
+                if (msrProberModifyNoChange(uMsr))
+                {
+                    if (pszFnName && !fTakesValue)
+                        rc = printMsrFunction(uMsr, pszFnName, pszFnName, annotateValueExtra("Might bite.", uValue));
+                    else if (pszFnName)
+                        rc = printMsrFunctionExtended(uMsr, pszFnName, pszFnName, uValue, 0, 0,
+                                                      annotateValueExtra("Might bite.", uValue));
+                    else if (fReadAsZero)
+                        rc = printMsrValueIgnoreWrites(uMsr, 0, annotateValueExtra("Might bite.", uValue));
+                    else
+                        rc = printMsrValueIgnoreWrites(uMsr, uValue, "Might bite.");
+                }
+                else if (pszFnName && !fTakesValue)
+                    rc = printMsrFunctionReadOnly(uMsr, pszFnName, annotateValueExtra("Might bite.", uValue));
+                else if (pszFnName)
+                    rc = printMsrFunctionExtended(uMsr, pszFnName, "ReadOnly", uValue, 0, UINT64_MAX,
+                                                  annotateValueExtra("Might bite.", uValue));
+                else if (fReadAsZero)
+                    rc = printMsrValueReadOnly(uMsr, 0, annotateValueExtra("Might bite.", uValue));
+                else
+                    rc = printMsrValueReadOnly(uMsr, uValue, "Might bite.");
+                break;
+
+
+            /* Don't try anything with these guys. */
+            case VBCPUREPBADNESS_BOND_VILLAIN:
+            default:
+                if (pszFnName && !fTakesValue)
+                    rc = printMsrFunction(uMsr, pszFnName, pszFnName, annotateValueExtra("Villain?", uValue));
+                else if (pszFnName)
+                    rc = printMsrFunctionExtended(uMsr, pszFnName, pszFnName, uValue, 0, 0,
+                                                  annotateValueExtra("Villain?", uValue));
+                else if (fReadAsZero)
+                    rc = printMsrValueIgnoreWrites(uMsr, 0, annotateValueExtra("Villain?", uValue));
+                else
+                    rc = printMsrValueIgnoreWrites(uMsr, uValue, "Villain?");
+                break;
+        }
+    }
+
+    return rc;
+}
+
+
+static int reportMsr_GenRangeFunctionEx(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t cMax, const char *pszRdWrFnName,
+                                        uint32_t uMsrBase, bool fEarlyEndOk, uint64_t fSkipMask, uint32_t *pidxLoop)
+{
+    uint32_t uMsr   = paMsrs[0].uMsr;
+    uint32_t iRange = uMsr - uMsrBase;
+    Assert(cMax > iRange);
+    cMax -= iRange;
+
+    /* Resolve default function name. */
+    if (!pszRdWrFnName)
+    {
+        pszRdWrFnName = getMsrFnName(uMsr, NULL);
+        if (!pszRdWrFnName)
+            return RTMsgErrorRc(VERR_INVALID_PARAMETER, "uMsr=%#x no function name\n", uMsr);
+    }
+
+    /* Figure the possible register count. */
+    if (cMax > cMsrs)
+        cMax = cMsrs;
+    uint32_t cRegs = 1;
+    while (   cRegs < cMax
+           && paMsrs[cRegs].uMsr == uMsr + cRegs)
+        cRegs++;
+
+    /* Probe the first register and check that the others exhibit
+       the same characteristics. */
+    bool     fReadOnly0;
+    uint64_t fIgnMask0, fGpMask0;
+    int rc = msrProberModifyBasicTests(uMsr, fSkipMask, &fReadOnly0, &fIgnMask0, &fGpMask0);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    const char *pszAnnotation = NULL;
+    for (uint32_t i = 1; i < cRegs; i++)
+    {
+        bool     fReadOnlyN;
+        uint64_t fIgnMaskN, fGpMaskN;
+        rc = msrProberModifyBasicTests(paMsrs[i].uMsr, fSkipMask, &fReadOnlyN, &fIgnMaskN, &fGpMaskN);
+        if (RT_FAILURE(rc))
+            return rc;
+        if (   fReadOnlyN != fReadOnly0
+            || fIgnMaskN  != fIgnMask0
+            || fGpMaskN   != fGpMask0)
+        {
+            if (!fEarlyEndOk)
+            {
+                vbCpuRepDebug("MSR %s (%#x) range ended unexpectedly early on %#x: ro=%d ign=%#llx/%#llx gp=%#llx/%#llx [N/0]\n",
+                              getMsrNameHandled(uMsr), uMsr, paMsrs[i].uMsr,
+                              fReadOnlyN, fReadOnly0, fIgnMaskN, fIgnMask0, fGpMaskN, fGpMask0);
+                pszAnnotation = "XXX: The range ended earlier than expected!";
+            }
+            cRegs = i;
+            break;
+        }
+    }
+
+    /*
+     * Report the range (or single MSR as it might be).
+     */
+    *pidxLoop += cRegs - 1;
+
+    bool     fSimple = fIgnMask0 == 0
+                    && (fGpMask0 == 0 || (fGpMask0 == UINT64_MAX && fReadOnly0))
+                    && iRange == 0;
+    if (cRegs == 1)
+        return printMsrFunctionExtendedIdxVal(uMsr, pszRdWrFnName, fReadOnly0 ? "ReadOnly" : pszRdWrFnName,
+                                              iRange, fIgnMask0, fGpMask0,
+                                              pszAnnotation ? pszAnnotation : annotateValue(paMsrs[0].uValue));
+    if (fSimple)
+        return printMsrRangeFunction(uMsr, uMsr + cRegs - 1,
+                                     pszRdWrFnName, fReadOnly0 ? "ReadOnly" : pszRdWrFnName, pszAnnotation);
+
+    return printMsrRangeFunctionExIdxVal(uMsr, uMsr + cRegs - 1, pszRdWrFnName, fReadOnly0 ? "ReadOnly" : pszRdWrFnName,
+                                         iRange /*uValue*/, fIgnMask0, fGpMask0, pszAnnotation);
+}
+
+
+static int reportMsr_GenRangeFunction(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t cMax, const char *pszRdWrFnName,
+                                      uint32_t *pidxLoop)
+{
+    return reportMsr_GenRangeFunctionEx(paMsrs, cMsrs, cMax, pszRdWrFnName, paMsrs[0].uMsr, false /*fEarlyEndOk*/,
+                                        getGenericSkipMask(paMsrs[0].uMsr), pidxLoop);
+}
+
+
+static int reportMsr_GenFunctionEx(uint32_t uMsr, const char *pszRdWrFnName, uint32_t uValue,
+                                   uint64_t fSkipMask, const char *pszAnnotate)
+{
+    /* Resolve default function name. */
+    if (!pszRdWrFnName)
+    {
+        pszRdWrFnName = getMsrFnName(uMsr, NULL);
+        if (!pszRdWrFnName)
+            return RTMsgErrorRc(VERR_INVALID_PARAMETER, "uMsr=%#x no function name\n", uMsr);
+    }
+
+    /* Probe the register and report. */
+    uint64_t fIgnMask = 0;
+    uint64_t fGpMask  = 0;
+    int rc = msrProberModifyBitChanges(uMsr, &fIgnMask, &fGpMask, fSkipMask);
+    if (RT_SUCCESS(rc))
+    {
+        if (fGpMask == UINT64_MAX && uValue == 0 && !msrProberModifyZero(uMsr))
+            rc = printMsrFunctionReadOnly(uMsr, pszRdWrFnName, pszAnnotate);
+        else if (fIgnMask == UINT64_MAX && fGpMask == 0 && uValue == 0)
+            rc = printMsrFunctionIgnoreWrites(uMsr, pszRdWrFnName, pszAnnotate);
+        else if (fIgnMask != 0 && fGpMask == 0 && uValue == 0)
+            rc = printMsrFunctionIgnoreMask(uMsr, pszRdWrFnName, NULL, fIgnMask, pszAnnotate);
+        else if (fIgnMask == 0 && fGpMask == 0 && uValue == 0)
+            rc = printMsrFunction(uMsr, pszRdWrFnName, NULL, pszAnnotate);
+        else
+            rc = printMsrFunctionExtended(uMsr, pszRdWrFnName, NULL, uValue, fIgnMask, fGpMask, pszAnnotate);
+    }
+    return rc;
+}
+
+
+/**
+ * Special function for reporting the IA32_APIC_BASE register, as it seems to be
+ * causing trouble on newer systems.
+ *
+ * @returns
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The value.
+ */
+static int reportMsr_Ia32ApicBase(uint32_t uMsr, uint64_t uValue)
+{
+    /* Trouble with the generic treatment of both the "APIC Global Enable" and
+       "Enable x2APIC mode" bits on an i7-3820QM running OS X 10.8.5.  */
+    uint64_t fSkipMask = RT_BIT_64(11);
+    if (vbCpuRepSupportsX2Apic())
+        fSkipMask |= RT_BIT_64(10);
+    return reportMsr_GenFunctionEx(uMsr, "Ia32ApicBase", uValue, fSkipMask, NULL);
+}
+
+
+/**
+ * Special function for reporting the IA32_MISC_ENABLE register, as it seems to
+ * be causing trouble on newer systems.
+ *
+ * @returns
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The value.
+ */
+static int reportMsr_Ia32MiscEnable(uint32_t uMsr, uint64_t uValue)
+{
+    uint64_t fSkipMask = 0;
+
+    /** @todo test & adjust on P4. */
+    if (   (   g_enmMicroarch >= kCpumMicroarch_Intel_NB_First
+            && g_enmMicroarch <= kCpumMicroarch_Intel_NB_End)
+        || (   g_enmMicroarch >= kCpumMicroarch_Intel_Core7_Broadwell
+            && g_enmMicroarch <= kCpumMicroarch_Intel_Core7_End)
+        || (   g_enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
+            && g_enmMicroarch <= kCpumMicroarch_Intel_Atom_End)
+       )
+    {
+        vbCpuRepPrintf("WARNING: IA32_MISC_ENABLE probing needs hacking on this CPU!\n");
+        RTThreadSleep(128);
+    }
+
+    /* The no execute related flag is deadly if clear.  */
+    if (   !(uValue & MSR_IA32_MISC_ENABLE_XD_DISABLE)
+        && (   g_enmMicroarch <  kCpumMicroarch_Intel_First
+            || g_enmMicroarch >= kCpumMicroarch_Intel_Core_Yonah
+            || vbCpuRepSupportsNX() ) )
+        fSkipMask |= MSR_IA32_MISC_ENABLE_XD_DISABLE;
+
+    uint64_t fIgnMask = 0;
+    uint64_t fGpMask  = 0;
+    int rc = msrProberModifyBitChanges(uMsr, &fIgnMask, &fGpMask, fSkipMask);
+    if (RT_SUCCESS(rc))
+        rc = printMsrFunctionExtended(uMsr, "Ia32MiscEnable", "Ia32MiscEnable", uValue,
+                                      fIgnMask, fGpMask, annotateValue(uValue));
+    return rc;
+}
+
+
+/**
+ * Verifies that MTRR type field works correctly in the given MSR.
+ *
+ * @returns VBox status code (failure if bad MSR behavior).
+ * @param   uMsr                The MSR.
+ * @param   iBit                The first bit of the type field (8-bit wide).
+ * @param   cExpected           The number of types expected - PAT=8, MTRR=7.
+ */
+static int msrVerifyMtrrTypeGPs(uint32_t uMsr, uint32_t iBit, uint32_t cExpected)
+{
+    uint32_t uEndTypes = 0;
+    while (uEndTypes < 255)
+    {
+        bool fGp = !msrProberModifySimpleGp(uMsr, ~(UINT64_C(0xff) << iBit), (uint64_t)uEndTypes << iBit);
+        if (!fGp && (uEndTypes == 2 || uEndTypes == 3))
+            return RTMsgErrorRc(VERR_INVALID_PARAMETER, "MTRR types %u does not cause a GP as it should. (msr %#x)\n",
+                                uEndTypes, uMsr);
+        if (fGp && uEndTypes != 2 && uEndTypes != 3)
+            break;
+        uEndTypes++;
+    }
+    if (uEndTypes != cExpected)
+        return RTMsgErrorRc(VERR_INVALID_PARAMETER, "MTRR types detected to be %#x (msr %#x). Expected %#x.\n",
+                            uEndTypes, uMsr, cExpected);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals with the variable MTRR MSRs.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first variable MTRR MSR (200h).
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MTRR MSR entry.
+ */
+static int reportMsr_Ia32MtrrPhysBaseMaskN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+
+    /* Count them. */
+    uint32_t cRegs = 1;
+    while (   cRegs < cMsrs
+           && paMsrs[cRegs].uMsr == uMsr + cRegs)
+        cRegs++;
+    if (cRegs & 1)
+        return RTMsgErrorRc(VERR_INVALID_PARAMETER, "MTRR variable MSR range is odd: cRegs=%#x\n", cRegs);
+    if (cRegs > 0x20)
+        return RTMsgErrorRc(VERR_INVALID_PARAMETER, "MTRR variable MSR range is too large: cRegs=%#x\n", cRegs);
+
+    /* Find a disabled register that we can play around with. */
+    uint32_t iGuineaPig;
+    for (iGuineaPig = 0; iGuineaPig < cRegs; iGuineaPig += 2)
+        if (!(paMsrs[iGuineaPig + 1].uValue & RT_BIT_32(11)))
+            break;
+    if (iGuineaPig >= cRegs)
+        iGuineaPig = cRegs - 2;
+    vbCpuRepDebug("iGuineaPig=%#x -> %#x\n", iGuineaPig, uMsr + iGuineaPig);
+
+    /* Probe the base.  */
+    uint64_t fIgnBase = 0;
+    uint64_t fGpBase  = 0;
+    int rc = msrProberModifyBitChanges(uMsr + iGuineaPig, &fIgnBase, &fGpBase, 0);
+    if (RT_FAILURE(rc))
+        return rc;
+    rc = msrVerifyMtrrTypeGPs(uMsr + iGuineaPig, 0, 7);
+    if (RT_FAILURE(rc))
+        return rc;
+    vbCpuRepDebug("fIgnBase=%#llx fGpBase=%#llx\n", fIgnBase, fGpBase);
+
+    /* Probing the mask is relatively straight forward. */
+    uint64_t fIgnMask = 0;
+    uint64_t fGpMask  = 0;
+    rc = msrProberModifyBitChanges(uMsr + iGuineaPig + 1, &fIgnMask, &fGpMask, 0);
+    if (RT_FAILURE(rc))
+        return rc;
+    vbCpuRepDebug("fIgnMask=%#llx fGpMask=%#llx\n", fIgnMask, fGpMask);
+
+    /* Validate that the whole range subscribes to the apprimately same GP rules. */
+    for (uint32_t i = 0; i < cRegs; i += 2)
+    {
+        uint64_t fSkipBase = ~fGpBase;
+        uint64_t fSkipMask = ~fGpMask;
+        if (!(paMsrs[i + 1].uValue & RT_BIT_32(11)))
+            fSkipBase = fSkipMask = 0;
+        fSkipBase |= 0x7;           /* Always skip the type. */
+        fSkipMask |= RT_BIT_32(11); /* Always skip the enable bit. */
+
+        vbCpuRepDebug("i=%#x fSkipBase=%#llx fSkipMask=%#llx\n", i, fSkipBase, fSkipMask);
+
+        if (!(paMsrs[i + 1].uValue & RT_BIT_32(11)))
+        {
+            rc = msrVerifyMtrrTypeGPs(uMsr + iGuineaPig, 0, 7);
+            if (RT_FAILURE(rc))
+                return rc;
+        }
+
+        uint64_t fIgnBaseN = 0;
+        uint64_t fGpBaseN  = 0;
+        rc = msrProberModifyBitChanges(uMsr + i, &fIgnBaseN, &fGpBaseN, fSkipBase);
+        if (RT_FAILURE(rc))
+            return rc;
+
+        if (   fIgnBaseN != (fIgnBase & ~fSkipBase)
+            || fGpBaseN  != (fGpBase  & ~fSkipBase) )
+            return RTMsgErrorRc(VERR_INVALID_PARAMETER,
+                                "MTRR PHYS BASE register %#x behaves differently from %#x: ign=%#llx/%#llx gp=%#llx/%#llx (fSkipBase=%#llx)\n",
+                                uMsr + i, uMsr + iGuineaPig,
+                                fIgnBaseN, fIgnBase & ~fSkipBase, fGpBaseN, fGpBase & ~fSkipBase, fSkipBase);
+
+        uint64_t fIgnMaskN = 0;
+        uint64_t fGpMaskN  = 0;
+        rc = msrProberModifyBitChanges(uMsr + i + 1, &fIgnMaskN, &fGpMaskN, fSkipMask);
+        if (RT_FAILURE(rc))
+            return rc;
+        if (   fIgnMaskN != (fIgnMask & ~fSkipMask)
+            || fGpMaskN  != (fGpMask  & ~fSkipMask) )
+            return RTMsgErrorRc(VERR_INVALID_PARAMETER,
+                                "MTRR PHYS MASK register %#x behaves differently from %#x: ign=%#llx/%#llx gp=%#llx/%#llx (fSkipMask=%#llx)\n",
+                                uMsr + i + 1, uMsr + iGuineaPig + 1,
+                                fIgnMaskN, fIgnMask & ~fSkipMask, fGpMaskN, fGpMask & ~fSkipMask, fSkipMask);
+    }
+
+    /* Print the whole range. */
+    fGpBase &= ~(uint64_t)0x7; /* Valid type bits, see msrVerifyMtrrTypeGPs(). */
+    for (uint32_t i = 0; i < cRegs; i += 2)
+    {
+        printMsrFunctionExtendedIdxVal(uMsr + i,     "Ia32MtrrPhysBaseN", NULL, i / 2, fIgnBase, fGpBase,
+                                       annotateValue(paMsrs[i].uValue));
+        printMsrFunctionExtendedIdxVal(uMsr + i + 1, "Ia32MtrrPhysMaskN", NULL, i / 2, fIgnMask, fGpMask,
+                                       annotateValue(paMsrs[i + 1].uValue));
+    }
+
+    *pidxLoop += cRegs - 1;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals with fixed MTRR and PAT MSRs, checking the 8 memory type fields.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR.
+ */
+static int reportMsr_Ia32MtrrFixedOrPat(uint32_t uMsr)
+{
+    /* Every 8 bytes is a type, check the type ranges one by one. */
+    for (uint32_t iBit = 0; iBit < 64; iBit += 8)
+    {
+        int rc = msrVerifyMtrrTypeGPs(uMsr, iBit, 7 + (uMsr == 0x00000277));
+        if (RT_FAILURE(rc))
+            return rc;
+    }
+
+    return printMsrFunctionCpumCpu(uMsr, NULL, NULL, NULL, NULL);
+}
+
+
+/**
+ * Deals with IA32_MTRR_DEF_TYPE.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR.
+ */
+static int reportMsr_Ia32MtrrDefType(uint32_t uMsr)
+{
+    int rc = msrVerifyMtrrTypeGPs(uMsr, 0, 7);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    uint64_t fGpMask  = 0;
+    uint64_t fIgnMask = 0;
+    rc = msrProberModifyBitChanges(uMsr, &fIgnMask, &fGpMask, 0x7);
+    if (RT_FAILURE(rc))
+        return rc;
+    Assert(!(fGpMask & 7)); Assert(!(fIgnMask & 7));
+
+    return printMsrFunctionCpumCpuEx(uMsr, NULL, NULL, NULL, fIgnMask, fGpMask, NULL);
+}
+
+
+/**
+ * Deals with the Machine Check (MC) MSRs in the 400h+ area.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MC MSR (400h).
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MC MSR entry.
+ */
+static int reportMsr_Ia32McCtlStatusAddrMiscN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+
+    /* Count them. */
+    uint32_t cRegs = 1;
+    uint32_t cDetectedRegs = 1;
+    while (   cDetectedRegs < cMsrs
+           && (   paMsrs[cDetectedRegs].uMsr == uMsr + cRegs
+               || (cRegs & 3) == 2 /* ADDR may or may not be there, depends on STATUS and CPU. */
+               || (cRegs & 3) == 3 /* MISC may or may not be there, depends on STATUS and CPU. */)
+           && cRegs < 0x7f )
+    {
+        if (paMsrs[cDetectedRegs].uMsr == uMsr + cRegs)
+            cDetectedRegs++;
+        cRegs++;
+    }
+    if (cRegs & 3)
+        return RTMsgErrorRc(VERR_INVALID_PARAMETER, "MC MSR range is odd: cRegs=%#x\n", cRegs);
+
+    /* Just report them.  We don't bother probing here as the CTL format
+       and such seems to be a lot of work to test correctly and changes between
+       cpu generations.  */
+    *pidxLoop += cDetectedRegs - 1;
+    return printMsrRangeFunction(uMsr, uMsr + cRegs - 1, "Ia32McCtlStatusAddrMiscN", NULL, NULL);
+}
+
+
+
+/**
+ * Deals with the X2APIC msrs.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first X2APIC MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last X2APIC MSR entry.
+ */
+static int reportMsr_GenX2Apic(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    /* Advance. */
+    uint32_t cRegs = 1;
+    while (   cRegs < cMsrs
+           && paMsrs[cRegs].uMsr <= 0x8ff)
+        cRegs++;
+    *pidxLoop += cRegs - 1;
+
+    /* Just emit an X2APIC range. */
+    return printMsrRangeFunction(0x800, 0x8ff, "Ia32X2ApicN", NULL, NULL);
+}
+
+
+/**
+ * Deals carefully with the EFER register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_Amd64Efer(uint32_t uMsr, uint64_t uValue)
+{
+    uint64_t fSkipMask = 0;
+    if (vbCpuRepSupportsLongMode())
+        fSkipMask |= MSR_K6_EFER_LME;
+    if (   (uValue & MSR_K6_EFER_NXE)
+        || vbCpuRepSupportsNX())
+        fSkipMask |= MSR_K6_EFER_NXE;
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, NULL);
+}
+
+
+/**
+ * Deals with the MC4_MISCn (n >= 1) range and the following reserved MSRs.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MSR entry in the range.
+ */
+static int reportMsr_AmdFam10hMc4MiscN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    /* Count registers. */
+    uint32_t cRegs = 1;
+    while (   cRegs < cMsrs
+           && cRegs < 8
+           && paMsrs[cRegs].uMsr == paMsrs[0].uMsr + cRegs)
+        cRegs++;
+
+    /* Probe & report used MSRs. */
+    uint64_t fIgnMask = 0;
+    uint64_t fGpMask  = 0;
+    uint32_t cUsed    = 0;
+    while (cUsed < cRegs)
+    {
+        uint64_t fIgnMaskN = 0;
+        uint64_t fGpMaskN  = 0;
+        int rc = msrProberModifyBitChanges(paMsrs[cUsed].uMsr, &fIgnMaskN, &fGpMaskN, 0);
+        if (RT_FAILURE(rc))
+            return rc;
+        if (fIgnMaskN == UINT64_MAX || fGpMaskN == UINT64_MAX)
+            break;
+        if (cUsed == 0)
+        {
+            fIgnMask = fIgnMaskN;
+            fGpMask  = fGpMaskN;
+        }
+        else if (   fIgnMaskN != fIgnMask
+                 || fGpMaskN  != fGpMask)
+            return RTMsgErrorRc(VERR_NOT_EQUAL, "AmdFam16hMc4MiscN mismatch: fIgn=%#llx/%#llx fGp=%#llx/%#llx uMsr=%#x\n",
+                                fIgnMaskN, fIgnMask, fGpMaskN, fGpMask, paMsrs[cUsed].uMsr);
+        cUsed++;
+    }
+    if (cUsed > 0)
+        printMsrRangeFunctionEx(paMsrs[0].uMsr, paMsrs[cUsed - 1].uMsr, "AmdFam10hMc4MiscN", NULL, 0, fIgnMask, fGpMask, NULL);
+
+    /* Probe & report reserved MSRs. */
+    uint32_t cReserved = 0;
+    while (cUsed + cReserved < cRegs)
+    {
+        fIgnMask = fGpMask = 0;
+        int rc = msrProberModifyBitChanges(paMsrs[cUsed + cReserved].uMsr, &fIgnMask, &fGpMask, 0);
+        if (RT_FAILURE(rc))
+            return rc;
+        if ((fIgnMask != UINT64_MAX && fGpMask != UINT64_MAX) || paMsrs[cUsed + cReserved].uValue)
+            return RTMsgErrorRc(VERR_NOT_EQUAL,
+                                "Unexpected reserved AmdFam16hMc4MiscN: fIgn=%#llx fGp=%#llx uMsr=%#x uValue=%#llx\n",
+                                fIgnMask, fGpMask, paMsrs[cUsed + cReserved].uMsr, paMsrs[cUsed + cReserved].uValue);
+        cReserved++;
+    }
+    if (cReserved > 0 && fIgnMask == UINT64_MAX)
+        printMsrRangeValueIgnoreWrites(paMsrs[cUsed].uMsr, paMsrs[cUsed + cReserved - 1].uMsr, 0, NULL);
+    else if (cReserved > 0 && fGpMask == UINT64_MAX)
+        printMsrRangeValueReadOnly(paMsrs[cUsed].uMsr, paMsrs[cUsed + cReserved - 1].uMsr, 0, NULL);
+
+    *pidxLoop += cRegs - 1;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals with the AMD PERF_CTL range.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MSR entry in the range.
+ */
+static int reportMsr_AmdK8PerfCtlN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+    Assert(uMsr == 0xc0010000);
+
+    /* Family 15h (bulldozer +) aliases these registers sparsely onto c001020x. */
+    if (CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+    {
+        for (uint32_t i = 0; i < 4; i++)
+            printMsrAlias(uMsr + i, 0xc0010200 + i * 2, NULL);
+        *pidxLoop += 3;
+    }
+    else
+        return reportMsr_GenRangeFunction(paMsrs, cMsrs, 4, "AmdK8PerfCtlN", pidxLoop);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals with the AMD PERF_CTR range.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MSR entry in the range.
+ */
+static int reportMsr_AmdK8PerfCtrN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+    Assert(uMsr == 0xc0010004);
+
+    /* Family 15h (bulldozer +) aliases these registers sparsely onto c001020x. */
+    if (CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+    {
+        for (uint32_t i = 0; i < 4; i++)
+            printMsrAlias(uMsr + i, 0xc0010201 + i * 2, NULL);
+        *pidxLoop += 3;
+    }
+    else
+        return reportMsr_GenRangeFunction(paMsrs, cMsrs, 4, "AmdK8PerfCtrN", pidxLoop);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals carefully with the SYS_CFG register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK8SysCfg(uint32_t uMsr, uint64_t uValue)
+{
+    uint64_t fSkipMask = 0;
+
+    /* Bit 21 (MtrrTom2En) is marked reserved in family 0fh, while in family
+       10h BKDG this changes (as does the document style).  Testing this bit
+       causes bulldozer running win64 to restart, thus this special treatment. */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K10)
+        fSkipMask |= RT_BIT(21);
+
+    /* Turns out there are more killer bits here, at least on Opteron 2384.
+       Skipping all known bits. */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_65nm /* Not sure when introduced - harmless? */)
+        fSkipMask |= RT_BIT(22); /* Tom2ForceMemTypeWB */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(21); /* MtrrTom2En */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(20); /* MtrrVarDramEn*/
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(19); /* MtrrFixDramModEn */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(18); /* MtrrFixDramEn */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(17); /* SysUcLockEn */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+        fSkipMask |= RT_BIT(16); /* ChgToDirtyDis */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First && g_enmMicroarch < kCpumMicroarch_AMD_15h_First)
+        fSkipMask |= RT_BIT(10); /* SetDirtyEnO */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K8_First && g_enmMicroarch < kCpumMicroarch_AMD_15h_First)
+        fSkipMask |= RT_BIT(9);  /* SetDirtyEnS */
+    if (   CPUMMICROARCH_IS_AMD_FAM_8H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch))
+        fSkipMask |= RT_BIT(8);  /* SetDirtyEnE */
+    if (   CPUMMICROARCH_IS_AMD_FAM_8H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_11H(g_enmMicroarch) )
+        fSkipMask |= RT_BIT(7)   /* SysVicLimit */
+                  |  RT_BIT(6)   /* SysVicLimit */
+                  |  RT_BIT(5)   /* SysVicLimit */
+                  |  RT_BIT(4)   /* SysAckLimit */
+                  |  RT_BIT(3)   /* SysAckLimit */
+                  |  RT_BIT(2)   /* SysAckLimit */
+                  |  RT_BIT(1)   /* SysAckLimit */
+                  |  RT_BIT(0)   /* SysAckLimit */;
+
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with the HWCR register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK8HwCr(uint32_t uMsr, uint64_t uValue)
+{
+    uint64_t fSkipMask = 0;
+
+    /* Trouble on Opteron 2384, skip some of the known bits. */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_K10 && !CPUMMICROARCH_IS_AMD_FAM_11H(g_enmMicroarch))
+        fSkipMask |= /*RT_BIT(10)*/ 0  /* MonMwaitUserEn */
+                  |  RT_BIT(9);  /* MonMwaitDis */
+    fSkipMask |= RT_BIT(8);      /* #IGNNE port emulation */
+    if (   CPUMMICROARCH_IS_AMD_FAM_8H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_11H(g_enmMicroarch) )
+        fSkipMask |= RT_BIT(7)   /* DisLock */
+                  |  RT_BIT(6);  /* FFDis (TLB flush filter) */
+    fSkipMask |= RT_BIT(4);      /* INVD to WBINVD */
+    fSkipMask |= RT_BIT(3);      /* TLBCACHEDIS */
+    if (   CPUMMICROARCH_IS_AMD_FAM_8H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_11H(g_enmMicroarch) )
+        fSkipMask |= RT_BIT(1);  /* SLOWFENCE */
+    fSkipMask |= RT_BIT(0);      /* SMMLOCK */
+
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with a IORRBasei register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK8IorrBaseN(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = RT_BIT(4) | RT_BIT(3);
+    fSkipMask |= (RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & X86_PAGE_4K_BASE_MASK;
+    return reportMsr_GenFunctionEx(uMsr, NULL, (uMsr - 0xc0010016) / 2, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with a IORRMaski register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK8IorrMaskN(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = RT_BIT(11);
+    fSkipMask |= (RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & X86_PAGE_4K_BASE_MASK;
+    return reportMsr_GenFunctionEx(uMsr, NULL, (uMsr - 0xc0010017) / 2, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with a IORRMaski register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK8TopMemN(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = (RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & ~(RT_BIT_64(23) - 1);
+    return reportMsr_GenFunctionEx(uMsr, NULL, uMsr == 0xc001001d, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals with the AMD P-state config range.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MSR entry in the range.
+ */
+static int reportMsr_AmdFam10hPStateN(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+    AssertRelease(uMsr == 0xc0010064);
+
+    /* Count them. */
+    uint32_t cRegs = 1;
+    while (   cRegs < 8
+           && cRegs < cMsrs
+           && paMsrs[cRegs].uMsr == uMsr + cRegs)
+        cRegs++;
+
+    /* Figure out which bits we should skip when probing.  This is based on
+       specs and may need adjusting for real life when handy. */
+    uint64_t fSkipMask = RT_BIT_64(63);             /* PstateEn */
+    fSkipMask |= RT_BIT_64(41) | RT_BIT_64(40);     /* IddDiv */
+    fSkipMask |= UINT64_C(0x000000ff00000000);      /* IddValue */
+    if (CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch))
+        fSkipMask |= UINT32_C(0xfe000000);          /* NbVid - Northbridge VID */
+    if (   CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch)
+        || CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+        fSkipMask |= RT_BIT_32(22);                 /* NbDid or NbPstate. */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver) /* ?? - listed in 10-1Fh model BDKG as well asFam16h */
+        fSkipMask |= RT_BIT_32(16);                 /* CpuVid[7] */
+    fSkipMask |= UINT32_C(0x0000fe00);              /* CpuVid[6:0] */
+    fSkipMask |= UINT32_C(0x000001c0);              /* CpuDid */
+    fSkipMask |= UINT32_C(0x0000003f);              /* CpuFid */
+
+    /* Probe and report them one by one since we're passing values instead of
+       register indexes to the functions. */
+    for (uint32_t i = 0; i < cRegs; i++)
+    {
+        uint64_t fIgnMask = 0;
+        uint64_t fGpMask = 0;
+        int rc = msrProberModifyBitChanges(uMsr + i, &fIgnMask, &fGpMask, fSkipMask);
+        if (RT_FAILURE(rc))
+            return rc;
+        printMsrFunctionExtended(uMsr + i, "AmdFam10hPStateN", NULL, paMsrs[i].uValue, fIgnMask, fGpMask,
+                                 annotateValue(paMsrs[i].uValue));
+    }
+
+    /* Advance. */
+    *pidxLoop += cRegs - 1;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals carefully with a COFVID control register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdFam10hCofVidControl(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = 0;
+    if (CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch))
+        fSkipMask |= UINT32_C(0xfe000000);          /* NbVid - Northbridge VID */
+    else if (g_enmMicroarch >= kCpumMicroarch_AMD_15h_First) /* Listed in preliminary Fam16h BDKG. */
+        fSkipMask |= UINT32_C(0xff000000);          /* NbVid - Northbridge VID - includes bit 24 for Fam15h and Fam16h. Odd... */
+    if (   CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch)
+        || g_enmMicroarch >= kCpumMicroarch_AMD_15h_First) /* Listed in preliminary Fam16h BDKG. */
+        fSkipMask |= RT_BIT_32(22);                 /* NbDid or NbPstate. */
+    if (g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver) /* ?? - listed in 10-1Fh model BDKG as well asFam16h */
+        fSkipMask |= RT_BIT_32(20);                 /* CpuVid[7] */
+    fSkipMask |= UINT32_C(0x00070000);              /* PstatId */
+    fSkipMask |= UINT32_C(0x0000fe00);              /* CpuVid[6:0] */
+    fSkipMask |= UINT32_C(0x000001c0);              /* CpuDid */
+    fSkipMask |= UINT32_C(0x0000003f);              /* CpuFid */
+
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals with the AMD [|L2I_|NB_]PERF_CT[LR] mixed ranges.
+ *
+ * Mixed here refers to the control and counter being in mixed in pairs as
+ * opposed to them being two separate parallel arrays like in the 0xc0010000
+ * area.
+ *
+ * @returns VBox status code.
+ * @param   paMsrs              Pointer to the first MSR.
+ * @param   cMsrs               The number of MSRs in the array @a paMsr.
+ * @param   cMax                The max number of MSRs (not counters).
+ * @param   pidxLoop            Index variable that should be advanced to the
+ *                              last MSR entry in the range.
+ */
+static int reportMsr_AmdGenPerfMixedRange(VBCPUREPMSR const *paMsrs, uint32_t cMsrs, uint32_t cMax, uint32_t *pidxLoop)
+{
+    uint32_t uMsr = paMsrs[0].uMsr;
+
+    /* Count them. */
+    uint32_t cRegs = 1;
+    while (   cRegs < cMax
+           && cRegs < cMsrs
+           && paMsrs[cRegs].uMsr == uMsr + cRegs)
+        cRegs++;
+    if (cRegs & 1)
+        return RTMsgErrorRc(VERR_INVALID_PARAMETER, "PERF range at %#x is odd: cRegs=%#x\n", uMsr, cRegs);
+
+    /* Report them as individual entries, using default names and such. */
+    for (uint32_t i = 0; i < cRegs; i++)
+    {
+        uint64_t fIgnMask = 0;
+        uint64_t fGpMask  = 0;
+        int rc = msrProberModifyBitChanges(uMsr + i, &fIgnMask, &fGpMask, 0);
+        if (RT_FAILURE(rc))
+            return rc;
+        printMsrFunctionExtendedIdxVal(uMsr + i, NULL, NULL, i / 2, fIgnMask, fGpMask, annotateValue(paMsrs[i].uValue));
+    }
+
+    /* Advance. */
+    *pidxLoop += cRegs - 1;
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Deals carefully with a LS_CFG register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdK7InstrCacheCfg(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = RT_BIT_64(9) /* DIS_SPEC_TLB_RLD */;
+    if (CPUMMICROARCH_IS_AMD_FAM_10H(g_enmMicroarch))
+        fSkipMask |= RT_BIT_64(14); /* DIS_IND */
+    if (CPUMMICROARCH_IS_AMD_FAM_16H(g_enmMicroarch))
+        fSkipMask |= RT_BIT_64(26); /* DIS_WIDEREAD_PWR_SAVE */
+    if (CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+    {
+        fSkipMask |= 0x1e;          /* DisIcWayFilter */
+        fSkipMask |= RT_BIT_64(39); /* DisLoopPredictor */
+        fSkipMask |= RT_BIT_64(27); /* Unknown killer bit, possibly applicable to other microarchs. */
+        fSkipMask |= RT_BIT_64(28); /* Unknown killer bit, possibly applicable to other microarchs. */
+    }
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with a CU_CFG register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdFam15hCombUnitCfg(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = RT_BIT_64(23) /* L2WayLock  */
+                       | RT_BIT_64(22) /* L2FirstLockWay */
+                       | RT_BIT_64(21) /* L2FirstLockWay */
+                       | RT_BIT_64(20) /* L2FirstLockWay */
+                       | RT_BIT_64(19) /* L2FirstLockWay */
+                       | RT_BIT_64(10) /* DcacheAggressivePriority */;
+    fSkipMask |= RT_BIT_64(46) | RT_BIT_64(45); /* Killer field. Seen bit 46 set, 45 clear. Messing with either means reboot/BSOD. */
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+/**
+ * Deals carefully with a EX_CFG register.
+ *
+ * @returns VBox status code.
+ * @param   uMsr                The MSR number.
+ * @param   uValue              The current value.
+ */
+static int reportMsr_AmdFam15hExecUnitCfg(uint32_t uMsr, uint64_t uValue)
+{
+    /* Skip know bits here, as harm seems to come from messing with them. */
+    uint64_t fSkipMask = RT_BIT_64(54) /* LateSbzResync  */;
+    fSkipMask |= RT_BIT_64(35); /* Undocumented killer bit. */
+    return reportMsr_GenFunctionEx(uMsr, NULL, uValue, fSkipMask, annotateValue(uValue));
+}
+
+
+
+static int produceMsrReport(VBCPUREPMSR *paMsrs, uint32_t cMsrs)
+{
+    vbCpuRepDebug("produceMsrReport\n");
+    RTThreadSleep(500);
+
+    for (uint32_t i = 0; i < cMsrs; i++)
+    {
+        uint32_t    uMsr       = paMsrs[i].uMsr;
+        uint32_t    fFlags     = paMsrs[i].fFlags;
+        uint64_t    uValue     = paMsrs[i].uValue;
+        int         rc;
+#if 1
+        //if (uMsr < 0xc0000000)
+        //    continue;
+        if (uMsr >= 0xc0010000)
+        {
+            vbCpuRepDebug("produceMsrReport: uMsr=%#x (%s)...\n", uMsr, getMsrNameHandled(uMsr));
+            RTThreadSleep(1000);
+        }
+#endif
+        /*
+         * Deal with write only regs first to avoid having to avoid them all the time.
+         */
+        if (fFlags & VBCPUREPMSR_F_WRITE_ONLY)
+        {
+            if (uMsr == 0x00000079)
+                rc = printMsrWriteOnly(uMsr, NULL, NULL);
+            else
+                rc = reportMsr_Generic(uMsr, fFlags, uValue);
+        }
+        /*
+         * This shall be sorted by uMsr as much as possible.
+         */
+        else if (uMsr == 0x00000000 && g_enmVendor == CPUMCPUVENDOR_AMD && g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+            rc = printMsrAlias(uMsr, 0x00000402, NULL);
+        else if (uMsr == 0x00000001 && g_enmVendor == CPUMCPUVENDOR_AMD && g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)
+            rc = printMsrAlias(uMsr, 0x00000401, NULL); /** @todo not 101% correct on Fam15h and later, 0xc0010015[McstatusWrEn] effect differs. */
+        else if (uMsr == 0x0000001b)
+            rc = reportMsr_Ia32ApicBase(uMsr, uValue);
+        else if (uMsr == 0x00000040)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 8 /*cMax*/, "IntelLastBranchFromToN", &i);
+        else if (uMsr == 0x000000c1)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i,
+                                            g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First ? 8 : 4 /*cMax*/,
+                                            NULL, &i);
+        else if (uMsr == 0x00000186 && !g_fIntelNetBurst)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 8 /*cMax*/, "Ia32PerfEvtSelN", &i);
+        else if (uMsr == 0x000001a0)
+            rc = reportMsr_Ia32MiscEnable(uMsr, uValue);
+        else if (uMsr >= 0x000001a6 && uMsr <= 0x000001a7)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 2 /*cMax*/, "IntelI7MsrOffCoreResponseN", &i);
+        else if (uMsr == 0x00000200)
+            rc = reportMsr_Ia32MtrrPhysBaseMaskN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr >= 0x00000250 && uMsr <= 0x00000279)
+            rc = reportMsr_Ia32MtrrFixedOrPat(uMsr);
+        else if (uMsr >= 0x00000280 && uMsr <= 0x00000295)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 22 /*cMax*/, NULL, 0x00000280, true /*fEarlyEndOk*/, 0, &i);
+        else if (uMsr == 0x000002ff)
+            rc = reportMsr_Ia32MtrrDefType(uMsr);
+        else if (uMsr >= 0x00000309 && uMsr <= 0x0000030b)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 3 /*cMax*/, NULL, 0x00000309, true /*fEarlyEndOk*/, 0, &i);
+        else if (uMsr == 0x000003f8 || uMsr == 0x000003fc || uMsr == 0x0000060a)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 4, NULL, uMsr - 3, true, 0, &i);
+        else if (uMsr == 0x000003f9 || uMsr == 0x000003fd || uMsr == 0x0000060b)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 8, NULL, uMsr - 6, true, 0, &i);
+        else if (uMsr == 0x000003fa || uMsr == 0x000003fe || uMsr == 0x0000060c)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 8, NULL, uMsr - 7, true, 0, &i);
+        else if (uMsr >= 0x00000400 && uMsr <= 0x0000047f)
+            rc = reportMsr_Ia32McCtlStatusAddrMiscN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0x000004c1)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 8, NULL, &i);
+        else if (uMsr == 0x00000680 || uMsr == 0x000006c0)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 16, NULL, uMsr, false, UINT64_C(0xffff800000000000), &i);
+        else if (uMsr >= 0x00000800 && uMsr <= 0x000008ff)
+            rc = reportMsr_GenX2Apic(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0x00002000 && g_enmVendor == CPUMCPUVENDOR_INTEL)
+            rc = reportMsr_GenFunctionEx(uMsr, "IntelP6CrN", 0, X86_CR0_PE | X86_CR0_PG,
+                                         annotateIfMissingBits(uValue, X86_CR0_PE | X86_CR0_PE | X86_CR0_ET));
+        else if (uMsr == 0x00002002 && g_enmVendor == CPUMCPUVENDOR_INTEL)
+            rc = reportMsr_GenFunctionEx(uMsr, "IntelP6CrN", 2, 0, annotateValue(uValue));
+        else if (uMsr == 0x00002003 && g_enmVendor == CPUMCPUVENDOR_INTEL)
+        {
+            uint64_t fCr3Mask = (RT_BIT_64(vbCpuRepGetPhysAddrWidth()) - 1) & (X86_CR3_PAE_PAGE_MASK | X86_CR3_AMD64_PAGE_MASK);
+            if (!vbCpuRepSupportsPae())
+                fCr3Mask &= X86_CR3_PAGE_MASK | X86_CR3_AMD64_PAGE_MASK;
+            rc = reportMsr_GenFunctionEx(uMsr, "IntelP6CrN", 3, fCr3Mask, annotateValue(uValue));
+        }
+        else if (uMsr == 0x00002004 && g_enmVendor == CPUMCPUVENDOR_INTEL)
+            rc = reportMsr_GenFunctionEx(uMsr, "IntelP6CrN", 4,
+                                         X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE | X86_CR4_SMXE,
+                                         annotateValue(uValue));
+        else if (uMsr == 0xc0000080)
+            rc = reportMsr_Amd64Efer(uMsr, uValue);
+        else if (uMsr == 0xc0000082 || uMsr == 0xc0000083 || uMsr == 0xc0000100 || uMsr == 0xc0000101 || uMsr == 0xc0000102)
+            rc = reportMsr_GenFunctionEx(uMsr, NULL, 0, UINT64_C(0xffff800000000000), annotateValue(uValue)); /* Canoncial address hack. */
+        else if (uMsr >= 0xc0000408 && uMsr <= 0xc000040f)
+            rc = reportMsr_AmdFam10hMc4MiscN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0xc0010000)
+            rc = reportMsr_AmdK8PerfCtlN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0xc0010004)
+            rc = reportMsr_AmdK8PerfCtrN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0xc0010010)
+            rc = reportMsr_AmdK8SysCfg(uMsr, uValue);
+        else if (uMsr == 0xc0010015)
+            rc = reportMsr_AmdK8HwCr(uMsr, uValue);
+        else if (uMsr == 0xc0010016 || uMsr == 0xc0010018)
+            rc = reportMsr_AmdK8IorrBaseN(uMsr, uValue);
+        else if (uMsr == 0xc0010017 || uMsr == 0xc0010019)
+            rc = reportMsr_AmdK8IorrMaskN(uMsr, uValue);
+        else if (uMsr == 0xc001001a || uMsr == 0xc001001d)
+            rc = reportMsr_AmdK8TopMemN(uMsr, uValue);
+        else if (uMsr == 0xc0010030)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 6, "AmdK8CpuNameN", &i);
+        else if (uMsr >= 0xc0010044 && uMsr <= 0xc001004a)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 7, "AmdK8McCtlMaskN", 0xc0010044, true /*fEarlyEndOk*/, 0, &i);
+        else if (uMsr == 0xc0010050)
+            rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 4, "AmdK8SmiOnIoTrapN", &i);
+        else if (uMsr == 0xc0010064)
+            rc = reportMsr_AmdFam10hPStateN(&paMsrs[i], cMsrs - i, &i);
+        else if (uMsr == 0xc0010070)
+            rc = reportMsr_AmdFam10hCofVidControl(uMsr, uValue);
+        else if (uMsr == 0xc0010118 || uMsr == 0xc0010119)
+            rc = printMsrFunction(uMsr, NULL, NULL, annotateValue(uValue)); /* RAZ, write key. */
+        else if (uMsr == 0xc0010200)
+            rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 12, &i);
+        else if (uMsr == 0xc0010230)
+            rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 8, &i);
+        else if (uMsr == 0xc0010240)
+            rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 8, &i);
+        else if (uMsr == 0xc0011019 && g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 3, "AmdK7DrXAddrMaskN", 0xc0011019 - 1,
+                                              false /*fEarlyEndOk*/, 0, &i);
+        else if (uMsr == 0xc0011021)
+            rc = reportMsr_AmdK7InstrCacheCfg(uMsr, uValue);
+        else if (uMsr == 0xc0011023 && CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+            rc = reportMsr_AmdFam15hCombUnitCfg(uMsr, uValue);
+        else if (uMsr == 0xc0011027)
+            rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 1, "AmdK7DrXAddrMaskN", 0xc0011027,
+                                              false /*fEarlyEndOk*/, 0, &i);
+        else if (uMsr == 0xc001102c && CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch))
+            rc = reportMsr_AmdFam15hExecUnitCfg(uMsr, uValue);
+        /* generic handling. */
+        else
+            rc = reportMsr_Generic(uMsr, fFlags, uValue);
+
+        if (RT_FAILURE(rc))
+            return rc;
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Custom MSR hacking & probing.
+ *
+ * Called when the '-d' option is given.
+ *
+ * @returns VBox status code.
+ */
+static int hackingMsrs(void)
+{
+#if 0
+    vbCpuRepDebug("\nhackingMsrs:\n"); RTStrmFlush(g_pDebugOut); RTThreadSleep(2000);
+
+    uint32_t uMsr = 0xc0000081;
+    vbCpuRepDebug("%#x: msrProberModifyNoChange -> %RTbool\n", uMsr, msrProberModifyNoChange(uMsr));
+    RTThreadSleep(3000);
+
+    vbCpuRepDebug("%#x: msrProberModifyBit 30 -> %d\n", uMsr, msrProberModifyBit(uMsr, 30));
+    RTThreadSleep(3000);
+
+    vbCpuRepDebug("%#x: msrProberModifyZero -> %RTbool\n", uMsr, msrProberModifyZero(uMsr));
+    RTThreadSleep(3000);
+
+    for (uint32_t i = 0; i < 63; i++)
+    {
+        vbCpuRepDebug("%#x: bit=%02u -> %d\n", msrProberModifyBit(uMsr, i));
+        RTThreadSleep(500);
+    }
+#else
+
+    uint32_t uMsr = 0xc0010015;
+    uint64_t uValue = 0;
+    msrProberRead(uMsr, &uValue);
+    reportMsr_AmdK8HwCr(uMsr, uValue);
+#endif
+    return VINF_SUCCESS;
+}
+
+
+static int probeMsrs(bool fHacking, const char *pszNameC, const char *pszCpuDesc,
+                     char *pszMsrMask, size_t cbMsrMask)
+{
+    /* Initialize the mask. */
+    if (pszMsrMask && cbMsrMask)
+        RTStrCopy(pszMsrMask, cbMsrMask, "UINT32_MAX /** @todo */");
+
+    /*
+     * Are MSRs supported by the CPU?
+     */
+    if (   !ASMIsValidStdRange(ASMCpuId_EAX(0))
+        || !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_MSR) )
+    {
+        vbCpuRepDebug("Skipping MSR probing, CPUID indicates there isn't any MSR support.\n");
+        return VINF_SUCCESS;
+    }
+
+    /*
+     * Initialize the support library and check if we can read MSRs.
+     */
+    int rc = SUPR3Init(NULL);
+    if (RT_FAILURE(rc))
+    {
+        vbCpuRepDebug("warning: Unable to initialize the support library (%Rrc), skipping MSR detection.\n", rc);
+        return VINF_SUCCESS;
+    }
+    uint64_t uValue;
+    bool     fGp;
+    rc = SUPR3MsrProberRead(MSR_IA32_TSC, NIL_RTCPUID, &uValue, &fGp);
+    if (RT_FAILURE(rc))
+    {
+        vbCpuRepDebug("warning: MSR probing not supported by the support driver (%Rrc), skipping MSR detection.\n", rc);
+        return VINF_SUCCESS;
+    }
+    vbCpuRepDebug("MSR_IA32_TSC: %#llx fGp=%RTbool\n", uValue, fGp);
+    rc = SUPR3MsrProberRead(0xdeadface, NIL_RTCPUID, &uValue, &fGp);
+    vbCpuRepDebug("0xdeadface: %#llx fGp=%RTbool rc=%Rrc\n", uValue, fGp, rc);
+
+    /*
+     * Initialize globals we use.
+     */
+    uint32_t uEax, uEbx, uEcx, uEdx;
+    ASMCpuIdExSlow(0, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    if (!ASMIsValidStdRange(uEax))
+        return RTMsgErrorRc(VERR_NOT_SUPPORTED, "Invalid std CPUID range: %#x\n", uEax);
+    g_enmVendor = CPUMR3CpuIdDetectVendorEx(uEax, uEbx, uEcx, uEdx);
+
+    ASMCpuIdExSlow(1, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    g_enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(g_enmVendor,
+                                                     ASMGetCpuFamily(uEax),
+                                                     ASMGetCpuModel(uEax, g_enmVendor == CPUMCPUVENDOR_INTEL),
+                                                     ASMGetCpuStepping(uEax));
+    g_fIntelNetBurst = CPUMMICROARCH_IS_INTEL_NETBURST(g_enmMicroarch);
+
+    /*
+     * Do the probing.
+     */
+    if (fHacking)
+        rc = hackingMsrs();
+    else
+    {
+        /* Determine the MSR mask. */
+        uint32_t fMsrMask = determineMsrAndMask();
+        if (fMsrMask == UINT32_MAX)
+            RTStrCopy(pszMsrMask, cbMsrMask, "UINT32_MAX");
+        else
+            RTStrPrintf(pszMsrMask, cbMsrMask, "UINT32_C(%#x)", fMsrMask);
+
+        /* Detect MSR. */
+        VBCPUREPMSR    *paMsrs;
+        uint32_t        cMsrs;
+        rc = findMsrs(&paMsrs, &cMsrs, fMsrMask);
+        if (RT_FAILURE(rc))
+            return rc;
+
+        /* Probe the MSRs and spit out the database table. */
+        vbCpuRepPrintf("\n"
+                       "#ifndef CPUM_DB_STANDALONE\n"
+                       "/**\n"
+                       " * MSR ranges for %s.\n"
+                       " */\n"
+                       "static CPUMMSRRANGE const g_aMsrRanges_%s[] = \n{\n",
+                       pszCpuDesc,
+                       pszNameC);
+        rc = produceMsrReport(paMsrs, cMsrs);
+        vbCpuRepPrintf("};\n"
+                       "#endif /* !CPUM_DB_STANDALONE */\n"
+                       "\n"
+                       );
+
+        RTMemFree(paMsrs);
+        paMsrs = NULL;
+    }
+    return rc;
+}
+
+
+static int produceCpuIdArray(const char *pszNameC, const char *pszCpuDesc)
+{
+    /*
+     * Collect the data.
+     */
+    PCPUMCPUIDLEAF  paLeaves;
+    uint32_t        cLeaves;
+    int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
+    if (RT_FAILURE(rc))
+        return RTMsgErrorRc(rc, "CPUMR3CollectCpuIdInfo failed: %Rrc\n", rc);
+
+    /*
+     * Dump the array.
+     */
+    vbCpuRepPrintf("\n"
+                   "#ifndef CPUM_DB_STANDALONE\n"
+                   "/**\n"
+                   " * CPUID leaves for %s.\n"
+                   " */\n"
+                   "static CPUMCPUIDLEAF const g_aCpuIdLeaves_%s[] = \n{\n",
+                   pszCpuDesc,
+                   pszNameC);
+    for (uint32_t i = 0; i < cLeaves; i++)
+    {
+        vbCpuRepPrintf("    { %#010x, %#010x, ", paLeaves[i].uLeaf, paLeaves[i].uSubLeaf);
+        if (paLeaves[i].fSubLeafMask == UINT32_MAX)
+            vbCpuRepPrintf("UINT32_MAX, ");
+        else
+            vbCpuRepPrintf("%#010x, ", paLeaves[i].fSubLeafMask);
+        vbCpuRepPrintf("%#010x, %#010x, %#010x, %#010x, ",
+                     paLeaves[i].uEax, paLeaves[i].uEbx, paLeaves[i].uEcx, paLeaves[i].uEdx);
+        if (paLeaves[i].fFlags == 0)
+            vbCpuRepPrintf("0 },\n");
+        else
+        {
+            vbCpuRepPrintf("0");
+            uint32_t fFlags = paLeaves[i].fFlags;
+            if (paLeaves[i].fFlags & CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED)
+            {
+                vbCpuRepPrintf(" | CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED");
+                fFlags &= ~CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED;
+            }
+            if (fFlags)
+            {
+                RTMemFree(paLeaves);
+                return RTMsgErrorRc(rc, "Unknown CPUID flags %#x\n", fFlags);
+            }
+            vbCpuRepPrintf(" },\n");
+        }
+    }
+    vbCpuRepPrintf("};\n"
+                   "#endif /* !CPUM_DB_STANDALONE */\n"
+                   "\n");
+    RTMemFree(paLeaves);
+    return VINF_SUCCESS;
+}
+
+
+static const char *cpuVendorToString(CPUMCPUVENDOR enmCpuVendor)
+{
+    switch (enmCpuVendor)
+    {
+        case CPUMCPUVENDOR_INTEL:       return "Intel";
+        case CPUMCPUVENDOR_AMD:         return "AMD";
+        case CPUMCPUVENDOR_VIA:         return "VIA";
+        case CPUMCPUVENDOR_CYRIX:       return "Cyrix";
+        case CPUMCPUVENDOR_INVALID:
+        case CPUMCPUVENDOR_UNKNOWN:
+        case CPUMCPUVENDOR_32BIT_HACK:
+            break;
+    }
+    return "invalid-cpu-vendor";
+}
+
+
+static int produceCpuReport(void)
+{
+    /*
+     * Figure the cpu vendor.
+     */
+    if (!ASMHasCpuId())
+        return RTMsgErrorRc(VERR_NOT_SUPPORTED, "No CPUID support.\n");
+    uint32_t uEax, uEbx, uEcx, uEdx;
+    ASMCpuIdExSlow(0, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    if (!ASMIsValidStdRange(uEax))
+        return RTMsgErrorRc(VERR_NOT_SUPPORTED, "Invalid std CPUID range: %#x\n", uEax);
+
+    CPUMCPUVENDOR enmVendor = CPUMR3CpuIdDetectVendorEx(uEax, uEbx, uEcx, uEdx);
+    if (enmVendor == CPUMCPUVENDOR_UNKNOWN)
+        return RTMsgErrorRc(VERR_NOT_IMPLEMENTED, "Unknown CPU vendor: %.4s%.4s%.4s\n", &uEbx, &uEdx, &uEcx);
+    vbCpuRepDebug("CPU Vendor: %s - %.4s%.4s%.4s\n", CPUMR3CpuVendorName(enmVendor), &uEbx, &uEdx, &uEcx);
+
+    /*
+     * Determine the micro arch.
+     */
+    ASMCpuIdExSlow(1, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    CPUMMICROARCH enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor,
+                                                                 ASMGetCpuFamily(uEax),
+                                                                 ASMGetCpuModel(uEax, enmVendor == CPUMCPUVENDOR_INTEL),
+                                                                 ASMGetCpuStepping(uEax));
+
+    /*
+     * Generate a name.
+     */
+    char  szName[16*3+1];
+    char  szNameC[16*3+1];
+    char  szNameRaw[16*3+1];
+    char *pszName    = szName;
+    char *pszCpuDesc = (char *)"";
+
+    ASMCpuIdExSlow(0x80000000, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    if (ASMIsValidExtRange(uEax) && uEax >= UINT32_C(0x80000004))
+    {
+        /* Get the raw name and strip leading spaces. */
+        ASMCpuIdExSlow(0x80000002, 0, 0, 0, &szNameRaw[0 +  0], &szNameRaw[4 +  0], &szNameRaw[8 +  0], &szNameRaw[12 +  0]);
+        ASMCpuIdExSlow(0x80000003, 0, 0, 0, &szNameRaw[0 + 16], &szNameRaw[4 + 16], &szNameRaw[8 + 16], &szNameRaw[12 + 16]);
+        ASMCpuIdExSlow(0x80000004, 0, 0, 0, &szNameRaw[0 + 32], &szNameRaw[4 + 32], &szNameRaw[8 + 32], &szNameRaw[12 + 32]);
+        szNameRaw[48] = '\0';
+        pszCpuDesc = RTStrStrip(szNameRaw);
+        vbCpuRepDebug("Name2: %s\n", pszCpuDesc);
+
+        /* Reduce the name. */
+        pszName = strcpy(szName, pszCpuDesc);
+
+        static const char * const s_apszSuffixes[] =
+        {
+            "CPU @",
+        };
+        for (uint32_t i = 0; i < RT_ELEMENTS(s_apszSuffixes); i++)
+        {
+            char *pszHit = strstr(pszName, s_apszSuffixes[i]);
+            if (pszHit)
+                RT_BZERO(pszHit, strlen(pszHit));
+        }
+
+        static const char * const s_apszWords[] =
+        {
+            "(TM)", "(tm)", "(R)", "(r)", "Processor", "CPU", "@",
+        };
+        for (uint32_t i = 0; i < RT_ELEMENTS(s_apszWords); i++)
+        {
+            const char *pszWord = s_apszWords[i];
+            size_t      cchWord = strlen(pszWord);
+            char       *pszHit;
+            while ((pszHit = strstr(pszName, pszWord)) != NULL)
+                memmove(pszHit, pszHit + cchWord, strlen(pszHit + cchWord) + 1);
+        }
+
+        RTStrStripR(pszName);
+        for (char *psz = pszName; *psz; psz++)
+            if (RT_C_IS_BLANK(*psz))
+            {
+                size_t cchBlanks = 1;
+                while (RT_C_IS_BLANK(psz[cchBlanks]))
+                    cchBlanks++;
+                *psz = ' ';
+                if (cchBlanks > 1)
+                    memmove(psz + 1, psz + cchBlanks, strlen(psz + cchBlanks) + 1);
+            }
+        pszName = RTStrStripL(pszName);
+        vbCpuRepDebug("Name: %s\n", pszName);
+
+        /* Make it C/C++ acceptable. */
+        strcpy(szNameC, pszName);
+        for (char *psz = szNameC; *psz; psz++)
+            if (!RT_C_IS_ALNUM(*psz) && *psz != '_')
+                *psz = '_';
+        vbCpuRepDebug("NameC: %s\n", szNameC);
+    }
+    else
+    {
+        ASMCpuIdExSlow(1, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+        RTStrPrintf(szNameC, sizeof(szNameC), "%s_%u_%u_%u", cpuVendorToString(enmVendor), ASMGetCpuFamily(uEax),
+                    ASMGetCpuModel(uEax, enmVendor == CPUMCPUVENDOR_INTEL), ASMGetCpuStepping(uEax));
+        pszCpuDesc = pszName = szNameC;
+        vbCpuRepDebug("Name/NameC: %s\n", szNameC);
+    }
+
+    /*
+     * Print a file header, if we're not outputting to stdout (assumption being
+     * that stdout is used while hacking the reporter and too much output is
+     * unwanted).
+     */
+    if (g_pReportOut)
+    {
+        RTTIMESPEC Now;
+        char       szNow[64];
+        RTTimeSpecToString(RTTimeNow(&Now), szNow, sizeof(szNow));
+        char *pchDot = strchr(szNow, '.');
+        if (pchDot)
+            strcpy(pchDot, "Z");
+
+        vbCpuRepPrintf("/* $" "Id" "$ */\n"
+                       "/** @file\n"
+                       " * CPU database entry \"%s\".\n"
+                       " * Generated at %s by VBoxCpuReport v%sr%s on %s.%s.\n"
+                       " */\n"
+                       "\n"
+                       "/*\n"
+                       " * Copyright (C) 2013 Oracle Corporation\n"
+                       " *\n"
+                       " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
+                       " * available from http://www.virtualbox.org. This file is free software;\n"
+                       " * you can redistribute it and/or modify it under the terms of the GNU\n"
+                       " * General Public License (GPL) as published by the Free Software\n"
+                       " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
+                       " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
+                       " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
+                       " */\n"
+                       "\n"
+                       "#ifndef VBOX_CPUDB_%s\n"
+                       "#define VBOX_CPUDB_%s\n"
+                       "\n",
+                       pszName,
+                       szNow, RTBldCfgVersion(), RTBldCfgRevisionStr(), RTBldCfgTarget(), RTBldCfgTargetArch(),
+                       szNameC, szNameC);
+    }
+
+    /*
+     * Extract CPUID based data.
+     */
+    int rc = produceCpuIdArray(szNameC, pszCpuDesc);
+    if (RT_FAILURE(rc))
+        return rc;
+
+    CPUMUKNOWNCPUID enmUnknownMethod;
+    CPUMCPUID       DefUnknown;
+    rc = CPUMR3CpuIdDetectUnknownLeafMethod(&enmUnknownMethod, &DefUnknown);
+    if (RT_FAILURE(rc))
+        return RTMsgErrorRc(rc, "CPUMR3DetectCpuIdUnknownMethod failed: %Rrc\n", rc);
+    vbCpuRepDebug("enmUnknownMethod=%s\n", CPUMR3CpuIdUnknownLeafMethodName(enmUnknownMethod));
+
+    /*
+     * Do the MSRs, if we can.
+     */
+    char szMsrMask[64];
+    probeMsrs(false /*fHacking*/, szNameC, pszCpuDesc, szMsrMask, sizeof(szMsrMask));
+
+    /*
+     * Emit the CPUMDBENTRY record.
+     */
+    ASMCpuIdExSlow(1, 0, 0, 0, &uEax, &uEbx, &uEcx, &uEdx);
+    vbCpuRepPrintf("\n"
+                   "/**\n"
+                   " * Database entry for %s.\n"
+                   " */\n"
+                   "static CPUMDBENTRY const g_Entry_%s = \n"
+                   "{\n"
+                   "    /*.pszName          = */ \"%s\",\n"
+                   "    /*.pszFullName      = */ \"%s\",\n"
+                   "    /*.enmVendor        = */ CPUMCPUVENDOR_%s,\n"
+                   "    /*.uFamily          = */ %u,\n"
+                   "    /*.uModel           = */ %u,\n"
+                   "    /*.uStepping        = */ %u,\n"
+                   "    /*.enmMicroarch     = */ kCpumMicroarch_%s,\n"
+                   "    /*.fFlags           = */ 0,\n"
+                   "    /*.cMaxPhysAddrWidth= */ %u,\n"
+                   "    /*.paCpuIdLeaves    = */ NULL_ALONE(g_aCpuIdLeaves_%s),\n"
+                   "    /*.cCpuIdLeaves     = */ ZERO_ALONE(RT_ELEMENTS(g_aCpuIdLeaves_%s)),\n"
+                   "    /*.enmUnknownCpuId  = */ CPUMUKNOWNCPUID_%s,\n"
+                   "    /*.DefUnknownCpuId  = */ { %#010x, %#010x, %#010x, %#010x },\n"
+                   "    /*.fMsrMask         = */ %s,\n"
+                   "    /*.cMsrRanges       = */ ZERO_ALONE(RT_ELEMENTS(g_aMsrRanges_%s)),\n"
+                   "    /*.paMsrRanges      = */ NULL_ALONE(g_aMsrRanges_%s),\n"
+                   "};\n"
+                   "\n"
+                   "#endif /* !VBOX_DB_%s */\n"
+                   "\n",
+                   pszCpuDesc,
+                   szNameC,
+                   pszName,
+                   pszCpuDesc,
+                   CPUMR3CpuVendorName(enmVendor),
+                   ASMGetCpuFamily(uEax),
+                   ASMGetCpuModel(uEax, enmVendor == CPUMCPUVENDOR_INTEL),
+                   ASMGetCpuStepping(uEax),
+                   CPUMR3MicroarchName(enmMicroarch),
+                   vbCpuRepGetPhysAddrWidth(),
+                   szNameC,
+                   szNameC,
+                   CPUMR3CpuIdUnknownLeafMethodName(enmUnknownMethod),
+                   DefUnknown.eax,
+                   DefUnknown.ebx,
+                   DefUnknown.ecx,
+                   DefUnknown.edx,
+                   szMsrMask,
+                   szNameC,
+                   szNameC,
+                   szNameC
+                   );
+
+    return VINF_SUCCESS;
+}
+
+
+int main(int argc, char **argv)
+{
+    int rc = RTR3InitExe(argc, &argv, 0 /*fFlags*/);
+    if (RT_FAILURE(rc))
+        return RTMsgInitFailure(rc);
+
+    /*
+     * Argument parsing?
+     */
+    static const RTGETOPTDEF s_aOptions[] =
+    {
+        { "--msrs-only", 'm', RTGETOPT_REQ_NOTHING },
+        { "--msrs-dev",  'd', RTGETOPT_REQ_NOTHING },
+        { "--output",    'o', RTGETOPT_REQ_STRING  },
+    };
+    RTGETOPTSTATE State;
+    RTGetOptInit(&State, argc, argv, &s_aOptions[0], RT_ELEMENTS(s_aOptions), 1, RTGETOPTINIT_FLAGS_OPTS_FIRST);
+
+    enum
+    {
+        kCpuReportOp_Normal,
+        kCpuReportOp_MsrsOnly,
+        kCpuReportOp_MsrsHacking
+    } enmOp = kCpuReportOp_Normal;
+    g_pReportOut = NULL;
+    g_pDebugOut  = g_pStdErr;
+    const char *pszOutput = NULL;
+
+    int iOpt;
+    RTGETOPTUNION ValueUnion;
+    while ((iOpt = RTGetOpt(&State, &ValueUnion)) != 0)
+    {
+        switch (iOpt)
+        {
+            case 'm':
+                enmOp = kCpuReportOp_MsrsOnly;
+                break;
+
+            case 'd':
+                enmOp = kCpuReportOp_MsrsHacking;
+                break;
+
+            case 'o':
+                pszOutput = ValueUnion.psz;
+                break;
+
+            case 'h':
+                RTPrintf("Usage: VBoxCpuReport [-m|--msrs-only] [-d|--msrs-dev] [-h|--help] [-V|--version]\n");
+                RTPrintf("Internal tool for gathering information to the VMM CPU database.\n");
+                return RTEXITCODE_SUCCESS;
+            case 'V':
+                RTPrintf("%sr%s\n", RTBldCfgVersion(), RTBldCfgRevisionStr());
+                return RTEXITCODE_SUCCESS;
+            default:
+                return RTGetOptPrintError(iOpt, &ValueUnion);
+        }
+    }
+
+    /*
+     * Do the requested job.
+     */
+    rc = VERR_INTERNAL_ERROR;
+    switch (enmOp)
+    {
+        case kCpuReportOp_Normal:
+            /* switch output file. */
+            if (pszOutput)
+            {
+                if (RTFileExists(pszOutput) && !RTSymlinkExists(pszOutput))
+                {
+                    char szOld[RTPATH_MAX];
+                    rc = RTStrCopy(szOld, sizeof(szOld), pszOutput);
+                    if (RT_SUCCESS(rc))
+                        rc = RTStrCat(szOld, sizeof(szOld), ".old");
+                    if (RT_SUCCESS(rc))
+                        RTFileRename(pszOutput, szOld, RTFILEMOVE_FLAGS_REPLACE);
+                }
+                rc = RTStrmOpen(pszOutput, "w", &g_pReportOut);
+                if (RT_FAILURE(rc))
+                {
+                    RTMsgError("Error opening '%s': %Rrc", pszOutput, rc);
+                    break;
+                }
+            }
+            rc = produceCpuReport();
+            break;
+        case kCpuReportOp_MsrsOnly:
+        case kCpuReportOp_MsrsHacking:
+            rc = probeMsrs(enmOp == kCpuReportOp_MsrsHacking, NULL, NULL, NULL, 0);
+            break;
+    }
+    return RT_SUCCESS(rc) ? RTEXITCODE_SUCCESS : RTEXITCODE_FAILURE;
+}
+
+
