Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 54713)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 54714)
@@ -272,4 +272,7 @@
 /**
  * CPUID leaf.
+ *
+ * @remarks This structure is used by the patch manager and is therefore
+ *          more or less set in stone.
  */
 typedef struct CPUMCPUIDLEAF
@@ -294,4 +297,5 @@
     uint32_t    fFlags;
 } CPUMCPUIDLEAF;
+AssertCompileSize(CPUMCPUIDLEAF, 32);
 /** Pointer to a CPUID leaf. */
 typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
@@ -309,4 +313,5 @@
 /**
  * Method used to deal with unknown CPUID leafs.
+ * @remarks Used in patch code.
  */
 typedef enum CPUMUKNOWNCPUID
@@ -1271,13 +1276,17 @@
 
 # if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
-/** @name APIs for Patch Manager CPUID legacy tables
+/** @name APIs for the CPUID raw-mode patch.
  * @{ */
-VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
-VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
-VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
-VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
-VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
-VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
-VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM);
+VMMR3_INT_DECL(CPUMUKNOWNCPUID)            CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM);
+/* Legacy: */
+VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
+VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
+VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
 /** @} */
 # endif
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 54713)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 54714)
@@ -4,5 +4,5 @@
 
 ;
-; Copyright (C) 2006-2012 Oracle Corporation
+; Copyright (C) 2006-2015 Oracle Corporation
 ;
 ; This file is part of VirtualBox Open Source Edition (OSE), as
@@ -26,4 +26,42 @@
 %ifndef ___VBox_vmm_cpum_mac__
 %define ___VBox_vmm_cpum_mac__
+
+%include "iprt/asmdefs.mac"
+
+;;
+; CPUID leaf.
+; @remarks This structure is used by the patch manager and can only be extended
+;          by adding to the end of it.
+struc CPUMCPUIDLEAF
+    .uLeaf              resd    1
+    .uSubLeaf           resd    1
+    .fSubLeafMask       resd    1
+    .uEax               resd    1
+    .uEbx               resd    1
+    .uEcx               resd    1
+    .uEdx               resd    1
+    .fFlags             resd    1
+endstruc
+%define CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED RT_BIT_32(0)
+
+;;
+; For the default CPUID leaf value.
+; @remarks This is used by the patch manager and cannot be modified in any way.
+struc CPUMCPUID
+    .uEax               resd    1
+    .uEbx               resd    1
+    .uEcx               resd    1
+    .uEdx               resd    1
+endstruc
+
+
+;; @name Method used to deal with unknown CPUID leafs.
+;; @{
+%define CPUMUKNOWNCPUID_DEFAULTS                1
+%define CPUMUKNOWNCPUID_LAST_STD_LEAF           2
+%define CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX  3
+%define CPUMUKNOWNCPUID_PASSTHRU                4
+;; @}
+
 
 ;;
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 54713)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 54714)
@@ -4,5 +4,5 @@
 
 /*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2015 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -468,8 +468,8 @@
 typedef struct CPUMCPUID
 {
-    uint32_t eax;
-    uint32_t ebx;
-    uint32_t ecx;
-    uint32_t edx;
+    uint32_t uEax;
+    uint32_t uEbx;
+    uint32_t uEcx;
+    uint32_t uEdx;
 } CPUMCPUID;
 /** Pointer to a CPUID leaf. */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 54714)
@@ -1328,6 +1328,6 @@
     PVM             pVM          = pVCpu->CTX_SUFF(pVM);
     uint64_t const  uOldEfer     = pVCpu->cpum.s.Guest.msrEFER;
-    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].eax >= 0x80000001
-                                 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].edx
+    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001
+                                 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx
                                  : 0;
     uint64_t        fMask        = 0;
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 54714)
@@ -1179,6 +1179,6 @@
                     if (uSubLeaf < paLeaves[i].uSubLeaf)
                         while (   i > 0
-                               && uLeaf    == paLeaves[i].uLeaf
-                               && uSubLeaf  < paLeaves[i].uSubLeaf)
+                               && uLeaf    == paLeaves[i - 1].uLeaf
+                               && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
                             i--;
                     else
@@ -1217,5 +1217,5 @@
         pCpuId = &pVM->cpum.s.aGuestCpuIdPatmExt[iLeaf - UINT32_C(0x80000000)];
     else if (   iLeaf - UINT32_C(0x40000000) < 0x100   /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */
-             && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
+             && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].uEcx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
     {
         PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */);
@@ -1240,8 +1240,8 @@
     uint32_t cCurrentCacheIndex = *pEcx;
 
-    *pEax = pCpuId->eax;
-    *pEbx = pCpuId->ebx;
-    *pEcx = pCpuId->ecx;
-    *pEdx = pCpuId->edx;
+    *pEax = pCpuId->uEax;
+    *pEbx = pCpuId->uEbx;
+    *pEcx = pCpuId->uEcx;
+    *pEdx = pCpuId->uEdx;
 
     if (    iLeaf == 1)
@@ -1328,10 +1328,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (   pLeaf
                 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
 
             pVM->cpum.s.GuestFeatures.fApic = 1;
@@ -1345,5 +1345,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
             pVM->cpum.s.GuestFeatures.fX2Apic = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
@@ -1363,5 +1363,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
             pVM->cpum.s.GuestFeatures.fSysEnter = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
@@ -1390,5 +1390,5 @@
 
             /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
-            pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
+            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
             pVM->cpum.s.GuestFeatures.fSysCall = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
@@ -1408,10 +1408,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (    pLeaf
                 &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
 
             pVM->cpum.s.GuestFeatures.fPae = 1;
@@ -1433,5 +1433,5 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
             pVM->cpum.s.GuestFeatures.fLongMode = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
@@ -1452,5 +1452,5 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
+            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
             pVM->cpum.s.GuestFeatures.fNoExecute = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
@@ -1472,5 +1472,5 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
             pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
@@ -1485,10 +1485,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (   pLeaf
                 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
 
             pVM->cpum.s.GuestFeatures.fPat = 1;
@@ -1512,5 +1512,5 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
             pVM->cpum.s.HostFeatures.fRdTscP = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
@@ -1523,5 +1523,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
             pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
@@ -1542,5 +1542,5 @@
 
             /* Valid for both Intel and AMD. */
-            pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
+            pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
             pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
             LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
@@ -1607,10 +1607,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (   pLeaf
                 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
 
             pVM->cpum.s.GuestFeatures.fApic = 0;
@@ -1621,5 +1621,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
             pVM->cpum.s.GuestFeatures.fX2Apic = 0;
             Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
@@ -1629,10 +1629,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (   pLeaf
                 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
 
             pVM->cpum.s.GuestFeatures.fPae = 0;
@@ -1643,10 +1643,10 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
 
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (   pLeaf
                 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
 
             pVM->cpum.s.GuestFeatures.fPat = 0;
@@ -1657,5 +1657,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
             pVM->cpum.s.GuestFeatures.fLongMode = 0;
             break;
@@ -1664,5 +1664,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
             pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
             break;
@@ -1671,5 +1671,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
+                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
             pVM->cpum.s.GuestFeatures.fRdTscP = 0;
             Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
@@ -1679,5 +1679,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
+                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
             pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
             break;
@@ -1686,5 +1686,5 @@
             pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
             if (pLeaf)
-                pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
+                pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
             pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
             Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
Index: /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 54714)
@@ -80,6 +80,6 @@
 {
     uint32_t uLeaf;  /**< Leaf to check. */
-    uint32_t ecx;    /**< which bits in ecx to unify between CPUs. */
-    uint32_t edx;    /**< which bits in edx to unify between CPUs. */
+    uint32_t uEcx;   /**< which bits in ecx to unify between CPUs. */
+    uint32_t uEdx;   /**< which bits in edx to unify between CPUs. */
 }
 const g_aCpuidUnifyBits[] =
@@ -170,6 +170,6 @@
         ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
 
-        ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
-        ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
+        ASMAtomicAndU32(&pLegacyLeaf->uEcx, ecx | ~g_aCpuidUnifyBits[i].uEcx);
+        ASMAtomicAndU32(&pLegacyLeaf->uEdx, edx | ~g_aCpuidUnifyBits[i].uEdx);
     }
 }
@@ -300,6 +300,6 @@
                     continue;
 
-                pLeaf->uEcx = pLegacyLeaf->ecx;
-                pLeaf->uEdx = pLegacyLeaf->edx;
+                pLeaf->uEcx = pLegacyLeaf->uEcx;
+                pLeaf->uEdx = pLegacyLeaf->uEdx;
             }
         }
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 54714)
@@ -727,7 +727,7 @@
     if (!fHWVirtExEnabled)
     {
-        Assert(   pVM->cpum.s.aGuestCpuIdPatmStd[4].eax == 0
-               || pVM->cpum.s.aGuestCpuIdPatmStd[0].eax < 0x4);
-        pVM->cpum.s.aGuestCpuIdPatmStd[4].eax = 0;
+        Assert(   pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax == 0
+               || pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax < 0x4);
+        pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax = 0;
     }
 }
@@ -1232,5 +1232,5 @@
          * features in the future.
          */
-        AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx &
+        AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
                               (   X86_CPUID_FEATURE_ECX_DTES64
                                |  X86_CPUID_FEATURE_ECX_VMX
@@ -1875,5 +1875,5 @@
     CPUMCPUID   Host;
     CPUMCPUID   Guest;
-    unsigned    cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].eax;
+    unsigned    cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax;
 
     uint32_t    cStdHstMax;
@@ -1891,20 +1891,20 @@
         {
             Guest = pVM->cpum.s.aGuestCpuIdPatmStd[i];
-            ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+            ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
 
             pHlp->pfnPrintf(pHlp,
                             "Gst: %08x  %08x %08x %08x %08x%s\n"
                             "Hst:           %08x %08x %08x %08x\n",
-                            i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
+                            i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
                             i <= cStdMax ? "" : "*",
-                            Host.eax, Host.ebx, Host.ecx, Host.edx);
+                            Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
         }
         else
         {
-            ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+            ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
 
             pHlp->pfnPrintf(pHlp,
                             "Hst: %08x  %08x %08x %08x %08x\n",
-                            i, Host.eax, Host.ebx, Host.ecx, Host.edx);
+                            i, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
         }
     }
@@ -1919,5 +1919,5 @@
                         "Name:                            %.04s%.04s%.04s\n"
                         "Supports:                        0-%x\n",
-                        &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
+                        &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax);
     }
 
@@ -1925,7 +1925,7 @@
      * Get Features.
      */
-    bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].ebx,
-                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].ecx,
-                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].edx);
+    bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
+                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
+                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
     if (cStdMax >= 1 && iVerbosity)
     {
@@ -1933,5 +1933,5 @@
 
         Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1];
-        uint32_t uEAX = Guest.eax;
+        uint32_t uEAX = Guest.uEax;
 
         pHlp->pfnPrintf(pHlp,
@@ -1948,11 +1948,11 @@
                         ASMGetCpuStepping(uEAX),
                         (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
-                        (Guest.ebx >> 24) & 0xff,
-                        (Guest.ebx >> 16) & 0xff,
-                        (Guest.ebx >>  8) & 0xff,
-                        (Guest.ebx >>  0) & 0xff);
+                        (Guest.uEbx >> 24) & 0xff,
+                        (Guest.uEbx >> 16) & 0xff,
+                        (Guest.uEbx >>  8) & 0xff,
+                        (Guest.uEbx >>  0) & 0xff);
         if (iVerbosity == 1)
         {
-            uint32_t uEDX = Guest.edx;
+            uint32_t uEDX = Guest.uEdx;
             pHlp->pfnPrintf(pHlp, "Features EDX:                   ");
             if (uEDX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " FPU");
@@ -1990,5 +1990,5 @@
             pHlp->pfnPrintf(pHlp, "\n");
 
-            uint32_t uECX = Guest.ecx;
+            uint32_t uECX = Guest.uEcx;
             pHlp->pfnPrintf(pHlp, "Features ECX:                   ");
             if (uECX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " SSE3");
@@ -2028,10 +2028,10 @@
         else
         {
-            ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
-
-            X86CPUIDFEATEDX EdxHost  = *(PX86CPUIDFEATEDX)&Host.edx;
-            X86CPUIDFEATECX EcxHost  = *(PX86CPUIDFEATECX)&Host.ecx;
-            X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
-            X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
+            ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+
+            X86CPUIDFEATEDX EdxHost  = *(PX86CPUIDFEATEDX)&Host.uEdx;
+            X86CPUIDFEATECX EcxHost  = *(PX86CPUIDFEATECX)&Host.uEcx;
+            X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.uEdx;
+            X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.uEcx;
 
             pHlp->pfnPrintf(pHlp, "Mnemonic - Description                 = guest (host)\n");
@@ -2112,5 +2112,5 @@
      * Implemented after AMD specs.
      */
-    unsigned    cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].eax & 0xffff;
+    unsigned    cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax & 0xffff;
 
     pHlp->pfnPrintf(pHlp,
@@ -2122,8 +2122,8 @@
     {
         Guest = pVM->cpum.s.aGuestCpuIdPatmExt[i];
-        ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+        ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
 
         if (   i == 7
-            && (Host.edx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))
+            && (Host.uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))
         {
             fSupportsInvariantTsc = true;
@@ -2132,7 +2132,7 @@
                         "Gst: %08x  %08x %08x %08x %08x%s\n"
                         "Hst:           %08x %08x %08x %08x\n",
-                        0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
+                        0x80000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
                         i <= cExtMax ? "" : "*",
-                        Host.eax, Host.ebx, Host.ecx, Host.edx);
+                        Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
     }
 
@@ -2146,5 +2146,5 @@
                         "Ext Name:                        %.4s%.4s%.4s\n"
                         "Ext Supports:                    0x80000000-%#010x\n",
-                        &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
+                        &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax);
     }
 
@@ -2152,5 +2152,5 @@
     {
         Guest = pVM->cpum.s.aGuestCpuIdPatmExt[1];
-        uint32_t uEAX = Guest.eax;
+        uint32_t uEAX = Guest.uEax;
         pHlp->pfnPrintf(pHlp,
                         "Family:                          %d  \tExtended: %d \tEffective: %d\n"
@@ -2161,9 +2161,9 @@
                         (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
                         ASMGetCpuStepping(uEAX),
-                        Guest.ebx & 0xfff);
+                        Guest.uEbx & 0xfff);
 
         if (iVerbosity == 1)
         {
-            uint32_t uEDX = Guest.edx;
+            uint32_t uEDX = Guest.uEdx;
             pHlp->pfnPrintf(pHlp, "Features EDX:                   ");
             if (uEDX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " FPU");
@@ -2201,5 +2201,5 @@
             pHlp->pfnPrintf(pHlp, "\n");
 
-            uint32_t uECX = Guest.ecx;
+            uint32_t uECX = Guest.uEcx;
             pHlp->pfnPrintf(pHlp, "Features ECX:                   ");
             if (uECX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
@@ -2224,8 +2224,8 @@
         else
         {
-            ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
-
-            uint32_t uEdxGst = Guest.edx;
-            uint32_t uEdxHst = Host.edx;
+            ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+
+            uint32_t uEdxGst = Guest.uEdx;
+            uint32_t uEdxHst = Host.uEdx;
             pHlp->pfnPrintf(pHlp, "Mnemonic - Description                 = guest (host)\n");
             pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip                  = %d (%d)\n",  !!(uEdxGst & RT_BIT( 0)),  !!(uEdxHst & RT_BIT( 0)));
@@ -2262,6 +2262,6 @@
             pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow!                        = %d (%d)\n",  !!(uEdxGst & RT_BIT(31)),  !!(uEdxHst & RT_BIT(31)));
 
-            uint32_t uEcxGst = Guest.ecx;
-            uint32_t uEcxHst = Host.ecx;
+            uint32_t uEcxGst = Guest.uEcx;
+            uint32_t uEcxHst = Host.uEcx;
             pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode    = %d (%d)\n",  !!(uEcxGst & RT_BIT( 0)),  !!(uEcxHst & RT_BIT( 0)));
             pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n",  !!(uEcxGst & RT_BIT( 1)),  !!(uEcxHst & RT_BIT( 1)));
@@ -2286,21 +2286,21 @@
         char szString[4*4*3+1] = {0};
         uint32_t *pu32 = (uint32_t *)szString;
-        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].eax;
-        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ebx;
-        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ecx;
-        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].edx;
+        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEax;
+        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEbx;
+        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEcx;
+        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEdx;
         if (cExtMax >= 3)
         {
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].eax;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ebx;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ecx;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].edx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEax;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEbx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEcx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEdx;
         }
         if (cExtMax >= 4)
         {
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].eax;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ebx;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ecx;
-            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].edx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEax;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEbx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEcx;
+            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEdx;
         }
         pHlp->pfnPrintf(pHlp, "Full Name:                       %s\n", szString);
@@ -2309,8 +2309,8 @@
     if (iVerbosity && cExtMax >= 5)
     {
-        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].eax;
-        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ebx;
-        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ecx;
-        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].edx;
+        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEax;
+        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEbx;
+        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEcx;
+        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEdx;
         char sz1[32];
         char sz2[32];
@@ -2347,7 +2347,7 @@
     if (iVerbosity && cExtMax >= 6)
     {
-        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].eax;
-        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].ebx;
-        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].edx;
+        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEax;
+        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEbx;
+        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEdx;
 
         pHlp->pfnPrintf(pHlp,
@@ -2374,5 +2374,5 @@
     if (iVerbosity && cExtMax >= 7)
     {
-        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].edx;
+        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].uEdx;
 
         pHlp->pfnPrintf(pHlp, "Host Invariant-TSC support:      %RTbool\n", fSupportsInvariantTsc);
@@ -2396,6 +2396,6 @@
     if (iVerbosity && cExtMax >= 8)
     {
-        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].eax;
-        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].ecx;
+        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEax;
+        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEcx;
 
         pHlp->pfnPrintf(pHlp,
@@ -2420,11 +2420,11 @@
     RT_ZERO(Host);
     if (cStdHstMax >= 1)
-        ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
-    bool fHostHvp  = RT_BOOL(Host.ecx & X86_CPUID_FEATURE_ECX_HVP);
+        ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+    bool fHostHvp  = RT_BOOL(Host.uEcx & X86_CPUID_FEATURE_ECX_HVP);
     bool fGuestHvp = false;
     if (cStdMax >= 1)
     {
         Guest     = pVM->cpum.s.aGuestCpuIdPatmStd[1];
-        fGuestHvp = RT_BOOL(Guest.ecx & X86_CPUID_FEATURE_ECX_HVP);
+        fGuestHvp = RT_BOOL(Guest.uEcx & X86_CPUID_FEATURE_ECX_HVP);
     }
 
@@ -2447,9 +2447,9 @@
         RT_ZERO(Host);
         if (fHostHvp)
-            ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+            ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
 
         CPUMCPUIDLEAF  GuestLeaf;
         uint32_t const cHyperGstMax = pHyperLeafGst ? pHyperLeafGst->uEax : 0;
-        uint32_t const cHyperHstMax = Host.eax;
+        uint32_t const cHyperHstMax = Host.uEax;
         uint32_t const cHyperMax    = RT_MAX(cHyperHstMax, cHyperGstMax);
         for (uint32_t i = uHyperLeaf; i <= cHyperMax; i++)
@@ -2458,5 +2458,5 @@
             RT_ZERO(GuestLeaf);
             if (i <= cHyperHstMax)
-                ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+                ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
             CPUMR3CpuIdGetLeaf(pVM, &GuestLeaf, i, 0 /* uSubLeaf */);
             if (!fHostHvp)
@@ -2473,5 +2473,5 @@
                                 i, GuestLeaf.uEax, GuestLeaf.uEbx, GuestLeaf.uEcx, GuestLeaf.uEdx,
                                 i <= cHyperGstMax ? "" : "*",
-                                Host.eax, Host.ebx, Host.ecx, Host.edx, i <= cHyperHstMax ? "" : "*");
+                                Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx, i <= cHyperHstMax ? "" : "*");
             }
         }
@@ -2481,5 +2481,5 @@
      * Centaur.
      */
-    unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].eax & 0xffff;
+    unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].uEax & 0xffff;
 
     pHlp->pfnPrintf(pHlp,
@@ -2490,12 +2490,12 @@
     {
         Guest = pVM->cpum.s.aGuestCpuIdPatmCentaur[i];
-        ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
+        ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
 
         pHlp->pfnPrintf(pHlp,
                         "Gst: %08x  %08x %08x %08x %08x%s\n"
                         "Hst:           %08x %08x %08x %08x\n",
-                        0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
+                        0xc0000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
                         i <= cCentaurMax ? "" : "*",
-                        Host.eax, Host.ebx, Host.ecx, Host.edx);
+                        Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
     }
 
@@ -2508,12 +2508,12 @@
         pHlp->pfnPrintf(pHlp,
                         "Centaur Supports:                0xc0000000-%#010x\n",
-                        Guest.eax);
+                        Guest.uEax);
     }
 
     if (iVerbosity && cCentaurMax >= 1)
     {
-        ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
-        uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].edx;
-        uint32_t uEdxHst = Host.edx;
+        ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
+        uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].uEdx;
+        uint32_t uEdxHst = Host.uEdx;
 
         if (iVerbosity == 1)
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 54714)
@@ -541,8 +541,8 @@
     if (pLeaf)
     {
-        pLegacy->eax = pLeaf->uEax;
-        pLegacy->ebx = pLeaf->uEbx;
-        pLegacy->ecx = pLeaf->uEcx;
-        pLegacy->edx = pLeaf->uEdx;
+        pLegacy->uEax = pLeaf->uEax;
+        pLegacy->uEbx = pLeaf->uEbx;
+        pLegacy->uEcx = pLeaf->uEcx;
+        pLegacy->uEdx = pLeaf->uEdx;
         return true;
     }
@@ -1195,8 +1195,8 @@
      */
     *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS;
-    pDefUnknown->eax = 0;
-    pDefUnknown->ebx = 0;
-    pDefUnknown->ecx = 0;
-    pDefUnknown->edx = 0;
+    pDefUnknown->uEax = 0;
+    pDefUnknown->uEbx = 0;
+    pDefUnknown->uEcx = 0;
+    pDefUnknown->uEdx = 0;
 
     /*
@@ -1258,8 +1258,8 @@
         else
             *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
-        pDefUnknown->eax = auLast[0];
-        pDefUnknown->ebx = auLast[1];
-        pDefUnknown->ecx = auLast[2];
-        pDefUnknown->edx = auLast[3];
+        pDefUnknown->uEax = auLast[0];
+        pDefUnknown->uEbx = auLast[1];
+        pDefUnknown->uEcx = auLast[2];
+        pDefUnknown->uEdx = auLast[3];
         return VINF_SUCCESS;
     }
@@ -1691,5 +1691,5 @@
         int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
         if (RT_SUCCESS(rc))
-            pLeaf->eax = u32;
+            pLeaf->uEax = u32;
         else
             AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
@@ -1697,5 +1697,5 @@
         rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
         if (RT_SUCCESS(rc))
-            pLeaf->ebx = u32;
+            pLeaf->uEbx = u32;
         else
             AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
@@ -1703,5 +1703,5 @@
         rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
         if (RT_SUCCESS(rc))
-            pLeaf->ecx = u32;
+            pLeaf->uEcx = u32;
         else
             AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
@@ -1709,5 +1709,5 @@
         rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
         if (RT_SUCCESS(rc))
-            pLeaf->edx = u32;
+            pLeaf->uEdx = u32;
         else
             AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
@@ -1753,5 +1753,5 @@
     /* Using the ECX variant for all of them can't hurt... */
     for (uint32_t i = 0; i < cLeaves; i++)
-        ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
+        ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].uEax, &paLeaves[i].uEbx, &paLeaves[i].uEcx, &paLeaves[i].uEdx);
 
     /* Load CPUID leaf override; we currently don't care if the user
@@ -1816,8 +1816,8 @@
             if (pLeaf)
             {
-                pLegacyLeaf->eax = pLeaf->uEax;
-                pLegacyLeaf->ebx = pLeaf->uEbx;
-                pLegacyLeaf->ecx = pLeaf->uEcx;
-                pLegacyLeaf->edx = pLeaf->uEdx;
+                pLegacyLeaf->uEax = pLeaf->uEax;
+                pLegacyLeaf->uEbx = pLeaf->uEbx;
+                pLegacyLeaf->uEcx = pLeaf->uEcx;
+                pLegacyLeaf->uEdx = pLeaf->uEdx;
             }
             else
@@ -2603,5 +2603,5 @@
     CPUMCPUID   aRawStd[16];
     for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
-        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
+        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
     SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
     SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
@@ -2609,5 +2609,5 @@
     CPUMCPUID   aRawExt[32];
     for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
-        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
+        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
     SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
     SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
@@ -2634,8 +2634,8 @@
                 NewLeaf.uSubLeaf        = 0;
                 NewLeaf.fSubLeafMask    = 0;
-                NewLeaf.uEax            = CpuId.eax;
-                NewLeaf.uEbx            = CpuId.ebx;
-                NewLeaf.uEcx            = CpuId.ecx;
-                NewLeaf.uEdx            = CpuId.edx;
+                NewLeaf.uEax            = CpuId.uEax;
+                NewLeaf.uEbx            = CpuId.uEbx;
+                NewLeaf.uEcx            = CpuId.uEcx;
+                NewLeaf.uEdx            = CpuId.uEdx;
                 NewLeaf.fFlags          = 0;
                 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
@@ -2873,5 +2873,5 @@
     AssertRCReturn(rc, rc);
     for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
-        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
+        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
 
     CPUMCPUID   aRawExt[32];
@@ -2883,5 +2883,5 @@
     AssertRCReturn(rc, rc);
     for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
-        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
+        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
 
     /*
@@ -2890,10 +2890,10 @@
     CPUMCPUID   aHostRawStd[16];
     for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
-        ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
+        ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
 
     CPUMCPUID   aHostRawExt[32];
     for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
         ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
-                       &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
+                       &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
 
     /*
@@ -2926,92 +2926,92 @@
     {
         /* CPUID(0) */
-        CPUID_CHECK_RET(   aHostRawStd[0].ebx == aRawStd[0].ebx
-                        && aHostRawStd[0].ecx == aRawStd[0].ecx
-                        && aHostRawStd[0].edx == aRawStd[0].edx,
+        CPUID_CHECK_RET(   aHostRawStd[0].uEbx == aRawStd[0].uEbx
+                        && aHostRawStd[0].uEcx == aRawStd[0].uEcx
+                        && aHostRawStd[0].uEdx == aRawStd[0].uEdx,
                         (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
-                         &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,
-                         &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));
-        CPUID_CHECK2_WRN("Std CPUID max leaf",   aHostRawStd[0].eax, aRawStd[0].eax);
-        CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);
-        CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].eax >> 28,       aRawExt[1].eax >> 28);
-
-        bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);
+                         &aHostRawStd[0].uEbx, &aHostRawStd[0].uEdx, &aHostRawStd[0].uEcx,
+                         &aRawStd[0].uEbx, &aRawStd[0].uEdx, &aRawStd[0].uEcx));
+        CPUID_CHECK2_WRN("Std CPUID max leaf",   aHostRawStd[0].uEax, aRawStd[0].uEax);
+        CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3);
+        CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].uEax >> 28,       aRawExt[1].uEax >> 28);
+
+        bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].uEbx, aRawStd[0].uEcx, aRawStd[0].uEdx);
 
         /* CPUID(1).eax */
-        CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawStd[1].eax),        ASMGetCpuFamily(aRawStd[1].eax));
-        CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));
-        CPUID_CHECK2_WRN("CPU type",            (aHostRawStd[1].eax >> 12) & 3,             (aRawStd[1].eax >> 12) & 3 );
+        CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawStd[1].uEax),        ASMGetCpuFamily(aRawStd[1].uEax));
+        CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawStd[1].uEax, fIntel), ASMGetCpuModel(aRawStd[1].uEax, fIntel));
+        CPUID_CHECK2_WRN("CPU type",            (aHostRawStd[1].uEax >> 12) & 3,             (aRawStd[1].uEax >> 12) & 3 );
 
         /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */
-        CPUID_CHECK2_RET("CPU brand ID",         aHostRawStd[1].ebx & 0xff,                 aRawStd[1].ebx & 0xff);
-        CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff,           (aRawStd[1].ebx >> 8) & 0xff);
+        CPUID_CHECK2_RET("CPU brand ID",         aHostRawStd[1].uEbx & 0xff,                 aRawStd[1].uEbx & 0xff);
+        CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].uEbx >> 8) & 0xff,           (aRawStd[1].uEbx >> 8) & 0xff);
 
         /* CPUID(1).ecx */
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);
-        CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);
-        CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);
-        CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);
-        CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_EST);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ );
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT);
+        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
+        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP);
 
         /* CPUID(1).edx */
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
-        CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);
-        CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);
-        CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);
-        CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);
-        CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_DS);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SS);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_TM);
+        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/);
+        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE);
 
         /* CPUID(2) - config, mostly about caches. ignore. */
@@ -3027,14 +3027,14 @@
 
         /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */
-        CPUID_CHECK_WRN(   aRawStd[0].eax     <  UINT32_C(0x0000000d)
-                        || aHostRawStd[0].eax >= UINT32_C(0x0000000d),
+        CPUID_CHECK_WRN(   aRawStd[0].uEax     <  UINT32_C(0x0000000d)
+                        || aHostRawStd[0].uEax >= UINT32_C(0x0000000d),
                         ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));
-        if (   aRawStd[0].eax     >= UINT32_C(0x0000000d)
-            && aHostRawStd[0].eax >= UINT32_C(0x0000000d))
+        if (   aRawStd[0].uEax     >= UINT32_C(0x0000000d)
+            && aHostRawStd[0].uEax >= UINT32_C(0x0000000d))
         {
-            CPUID_CHECK2_WRN("Valid low XCR0 bits",             aHostRawStd[0xd].eax, aRawStd[0xd].eax);
-            CPUID_CHECK2_WRN("Valid high XCR0 bits",            aHostRawStd[0xd].edx, aRawStd[0xd].edx);
-            CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size",  aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);
-            CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size",      aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);
+            CPUID_CHECK2_WRN("Valid low XCR0 bits",             aHostRawStd[0xd].uEax, aRawStd[0xd].uEax);
+            CPUID_CHECK2_WRN("Valid high XCR0 bits",            aHostRawStd[0xd].uEdx, aRawStd[0xd].uEdx);
+            CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size",  aHostRawStd[0xd].uEbx, aRawStd[0xd].uEbx);
+            CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size",      aHostRawStd[0xd].uEcx, aRawStd[0xd].uEcx);
         }
 
@@ -3042,96 +3042,96 @@
            Note! Intel have/is marking many of the fields here as reserved. We
                  will verify them as if it's an AMD CPU. */
-        CPUID_CHECK_RET(   (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))
-                        || !(aRawExt[0].eax    >= UINT32_C(0x80000001) && aRawExt[0].eax     <= UINT32_C(0x8000007f)),
+        CPUID_CHECK_RET(   (aHostRawExt[0].uEax >= UINT32_C(0x80000001) && aHostRawExt[0].uEax <= UINT32_C(0x8000007f))
+                        || !(aRawExt[0].uEax    >= UINT32_C(0x80000001) && aRawExt[0].uEax     <= UINT32_C(0x8000007f)),
                         (N_("Extended leaves was present on saved state host, but is missing on the current\n")));
-        if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax     <= UINT32_C(0x8000007f))
+        if (aRawExt[0].uEax >= UINT32_C(0x80000001) && aRawExt[0].uEax     <= UINT32_C(0x8000007f))
         {
-            CPUID_CHECK_RET(   aHostRawExt[0].ebx == aRawExt[0].ebx
-                            && aHostRawExt[0].ecx == aRawExt[0].ecx
-                            && aHostRawExt[0].edx == aRawExt[0].edx,
+            CPUID_CHECK_RET(   aHostRawExt[0].uEbx == aRawExt[0].uEbx
+                            && aHostRawExt[0].uEcx == aRawExt[0].uEcx
+                            && aHostRawExt[0].uEdx == aRawExt[0].uEdx,
                             (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
-                             &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,
-                             &aRawExt[0].ebx,     &aRawExt[0].edx,     &aRawExt[0].ecx));
-            CPUID_CHECK2_WRN("Ext CPUID max leaf",   aHostRawExt[0].eax, aRawExt[0].eax);
+                             &aHostRawExt[0].uEbx, &aHostRawExt[0].uEdx, &aHostRawExt[0].uEcx,
+                             &aRawExt[0].uEbx,     &aRawExt[0].uEdx,     &aRawExt[0].uEcx));
+            CPUID_CHECK2_WRN("Ext CPUID max leaf",   aHostRawExt[0].uEax, aRawExt[0].uEax);
 
             /* CPUID(0x80000001).eax - same as CPUID(0).eax. */
-            CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawExt[1].eax),        ASMGetCpuFamily(aRawExt[1].eax));
-            CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));
-            CPUID_CHECK2_WRN("CPU type",            (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );
-            CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );
-            CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
+            CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawExt[1].uEax),        ASMGetCpuFamily(aRawExt[1].uEax));
+            CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawExt[1].uEax, fIntel), ASMGetCpuModel(aRawExt[1].uEax, fIntel));
+            CPUID_CHECK2_WRN("CPU type",            (aHostRawExt[1].uEax >> 12) & 3, (aRawExt[1].uEax >> 12) & 3 );
+            CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3 );
+            CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].uEax >> 28, aRawExt[1].uEax >> 28);
 
             /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */
-            CPUID_CHECK2_WRN("CPU BrandID",          aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);
-            CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);
-            CPUID_CHECK2_WRN("PkgType",             (aHostRawExt[1].ebx >> 28) &   0xf, (aRawExt[1].ebx >> 28) &   0xf);
+            CPUID_CHECK2_WRN("CPU BrandID",          aHostRawExt[1].uEbx & 0xffff, aRawExt[1].uEbx & 0xffff);
+            CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].uEbx >> 16) & 0xfff, (aRawExt[1].uEbx >> 16) & 0xfff);
+            CPUID_CHECK2_WRN("PkgType",             (aHostRawExt[1].uEbx >> 28) &   0xf, (aRawExt[1].uEbx >> 28) &   0xf);
 
             /* CPUID(0x80000001).ecx */
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
-            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
-            CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
+            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT);
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
+            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
 
             /* CPUID(0x80000001).edx */
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
-            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
-            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_VME);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_DE);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SEP);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(21) /*reserved*/);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(28) /*reserved*/);
+            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
 
             /** @todo verify the rest as well. */
@@ -3158,70 +3158,70 @@
 
     /* CPUID(1).ecx */
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);  // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);  // -> EMU?
-    CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);   // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);     // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);   // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);  // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);     // -> EMU? what's this?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);    // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);    // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);     // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);  // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);  // -> EMU
-    CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);   // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);  // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);   // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);     // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);
-    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);
-    CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);     // Normally not set by host
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL);  // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64);  // -> EMU?
+    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS);   // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2);     // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3);   // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID);  // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ );
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA);     // -> EMU? what's this?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16);    // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM);    // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA);     // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1);  // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2);  // -> EMU
+    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE);   // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT);  // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE);   // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX);     // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
+    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
+    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP);     // Normally not set by host
 
     /* CPUID(1).edx */
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);      // -> EMU?
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
-    CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
-    CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);   // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS);      // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);    // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);     // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);    // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS);      // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT);     // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM);      // -> EMU?
-    CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);   // -> EMU
-    CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE);     // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE);      // -> EMU?
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
+    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
+    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH);   // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS);      // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI);    // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE);     // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2);    // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS);      // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT);     // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM);      // -> EMU?
+    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/);   // -> EMU
+    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE);     // -> EMU?
 
     /* CPUID(0x80000000). */
@@ -3231,74 +3231,74 @@
     {
         /** @todo deal with no 0x80000001 on the host. */
-        bool const fHostAmd  = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
-        bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);
+        bool const fHostAmd  = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
+        bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
 
         /* CPUID(0x80000001).ecx */
-        CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);   // -> EMU
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);    // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);     // -> EMU
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);    // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);     // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);   // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);    // -> EMU?
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);     // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);    // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);  // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);     // -> EMU
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
-        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
+        CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);   // -> EMU
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL);    // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM);     // -> EMU
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L);    // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM);     // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);   // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW);    // -> EMU?
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS);     // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5);    // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);  // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT);     // -> EMU
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
 
         /* CPUID(0x80000001).edx */
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_FPU,   X86_CPUID_FEATURE_EDX_FPU);     // -> EMU
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_VME,   X86_CPUID_FEATURE_EDX_VME);     // -> EMU
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_DE,    X86_CPUID_FEATURE_EDX_DE);      // -> EMU
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PSE,   X86_CPUID_FEATURE_EDX_PSE);
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_TSC,   X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_MSR,   X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_PAE,   X86_CPUID_FEATURE_EDX_PAE);
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MCE,   X86_CPUID_FEATURE_EDX_MCE);
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_CX8,   X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_APIC,  X86_CPUID_FEATURE_EDX_APIC);
-        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
-        CPUID_GST_FEATURE_IGN(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL);                              // On Intel: long mode only.
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MTRR,  X86_CPUID_FEATURE_EDX_MTRR);
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PGE,   X86_CPUID_FEATURE_EDX_PGE);
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MCA,   X86_CPUID_FEATURE_EDX_MCA);
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_CMOV,  X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PAT,   X86_CPUID_FEATURE_EDX_PAT);
-        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
-        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
-        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
-        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
-        CPUID_GST_FEATURE_WRN(    Ext, edx, RT_BIT_32(21) /*reserved*/);
-        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_MMX,   X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
-        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_FXSR,  X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
-        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
-        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
-        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
-        CPUID_GST_FEATURE_IGN(    Ext, edx, RT_BIT_32(28) /*reserved*/);
-        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
-        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
-        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU,   X86_CPUID_FEATURE_EDX_FPU);     // -> EMU
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_VME,   X86_CPUID_FEATURE_EDX_VME);     // -> EMU
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_DE,    X86_CPUID_FEATURE_EDX_DE);      // -> EMU
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE,   X86_CPUID_FEATURE_EDX_PSE);
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC,   X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR,   X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE,   X86_CPUID_FEATURE_EDX_PAE);
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE,   X86_CPUID_FEATURE_EDX_MCE);
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8,   X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC,  X86_CPUID_FEATURE_EDX_APIC);
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
+        CPUID_GST_FEATURE_IGN(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL);                              // On Intel: long mode only.
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR,  X86_CPUID_FEATURE_EDX_MTRR);
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE,   X86_CPUID_FEATURE_EDX_PGE);
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA,   X86_CPUID_FEATURE_EDX_MCA);
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV,  X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT,   X86_CPUID_FEATURE_EDX_PAT);
+        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
+        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
+        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
+        CPUID_GST_FEATURE_WRN(    Ext, uEdx, RT_BIT_32(21) /*reserved*/);
+        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX,   X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
+        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR,  X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
+        CPUID_GST_FEATURE_IGN(    Ext, uEdx, RT_BIT_32(28) /*reserved*/);
+        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
+        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     }
 
@@ -3346,9 +3346,65 @@
 
 /**
+ * Gets a pointer to the default CPUID leaf.
+ *
+ * @returns Raw-mode pointer to the default CPUID leaf (read-only).
+ * @param   pVM         Pointer to the VM.
+ * @remark  Intended for PATM only.
+ */
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM)
+{
+    return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef);
+}
+
+
+/**
+ * Gets a pointer to the CPUID leaf array.
+ *
+ * @returns Raw-mode pointer to the CPUID leaf array.
+ * @param   pVM         Pointer to the VM.
+ * @remark  Intended for PATM only.
+ */
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM)
+{
+    Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
+    return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC;
+}
+
+
+/**
+ * Gets a pointer to the CPUID leaf array.
+ *
+ * @returns Raw-mode pointer to the end of CPUID leaf array (exclusive).
+ * @param   pVM         Pointer to the VM.
+ * @remark  Intended for PATM only.
+ */
+VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM)
+{
+    Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
+    return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC
+         + pVM->cpum.s.GuestInfo.cCpuIdLeaves * sizeof(CPUMCPUIDLEAF);
+}
+
+
+/**
+ * Gets the unknown CPUID leaf method.
+ *
+ * @returns Unknown CPUID leaf method.
+ * @param   pVM         Pointer to the VM.
+ * @remark  Intended for PATM only.
+ */
+VMMR3_INT_DECL(CPUMUKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM)
+{
+    return pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod;
+}
+
+
+
+/**
  * Gets a number of standard CPUID leafs (PATM only).
  *
  * @returns Number of leafs.
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM)
@@ -3363,5 +3419,5 @@
  * @returns Number of leafs.
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM)
@@ -3376,5 +3432,5 @@
  * @returns Number of leafs.
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM)
@@ -3389,7 +3445,7 @@
  * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
  *
- * @returns Pointer to the standard CPUID leaves (read-only).
+ * @returns Raw-mode pointer to the standard CPUID leaves (read-only).
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM)
@@ -3404,7 +3460,7 @@
  * CPUMGetGuestCpuIdExtMax() give the size of the array.
  *
- * @returns Pointer to the extended CPUID leaves (read-only).
+ * @returns Raw-mode pointer to the extended CPUID leaves (read-only).
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM)
@@ -3419,24 +3475,11 @@
  * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
  *
- * @returns Pointer to the centaur CPUID leaves (read-only).
+ * @returns Raw-mode pointer to the centaur CPUID leaves (read-only).
  * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
+ * @remark  Intended for PATM - legacy, don't use in new code.
  */
 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM)
 {
     return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0]);
-}
-
-
-/**
- * Gets a pointer to the default CPUID leaf.
- *
- * @returns Pointer to the default CPUID leaf (read-only).
- * @param   pVM         Pointer to the VM.
- * @remark  Intended for PATM.
- */
-VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM)
-{
-    return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef);
 }
 
Index: /trunk/src/VBox/VMM/VMMR3/PATM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATM.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/PATM.cpp	(revision 54714)
@@ -748,20 +748,24 @@
 
     /*
-     * Apply fixups
+     * Apply fixups.
      */
-    PRELOCREC pRec = 0;
-    AVLPVKEY  key  = 0;
-
-    while (true)
-    {
-        /* Get the record that's closest from above */
-        pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
-        if (pRec == 0)
+    AVLPVKEY key = NULL;
+    for (;;)
+    {
+        /* Get the record that's closest from above (after or equal to key). */
+        PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
+        if (!pRec)
             break;
 
-        key = (AVLPVKEY)(pRec->pRelocPos + 1);   /* search for the next record during the next round. */
+        key = (uint8_t *)pRec->Core.Key + 1;   /* search for the next record during the next round. */
 
         switch (pRec->uType)
         {
+        case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
+            Assert(pRec->pDest == pRec->pSource);
+            Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
+            *(RTRCUINTPTR *)pRec->pRelocPos += delta;
+            break;
+
         case FIXUP_ABSOLUTE:
             Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
@@ -2648,5 +2652,6 @@
             if (fAddFixup)
             {
-                if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
+                if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
+                                        pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
                 {
                     Log(("Relocation failed for the jump in the guest code!!\n"));
@@ -2664,5 +2669,6 @@
             if (fAddFixup)
             {
-                if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
+                if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
+                                        pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
                 {
                     Log(("Relocation failed for the jump in the guest code!!\n"));
@@ -2689,5 +2695,6 @@
         if (fAddFixup)
         {
-            if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
+            if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
+                                    PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
             {
                 Log(("Relocation failed for the jump in the guest code!!\n"));
@@ -2783,5 +2790,6 @@
     if (fAddFixup)
     {
-        if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
+        if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
+                                pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
         {
             Log(("Relocation failed for the jump in the guest code!!\n"));
Index: /trunk/src/VBox/VMM/VMMR3/PATMA.asm
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMA.asm	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/PATMA.asm	(revision 54714)
@@ -32,4 +32,5 @@
 %include "VBox/err.mac"
 %include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
 %include "VBox/vmm/vm.mac"
 %include "PATMA.mac"
@@ -1723,54 +1724,114 @@
 ;
 BEGIN_PATCH g_patmCpuidRecord, PATMCpuidReplacement
+    not     dword [esp-16]              ; probe stack before starting, just in case.
+    not     dword [esp-16]
     mov     dword [ss:PATM_INTERRUPTFLAG], 0
 PATCH_FIXUP PATM_INTERRUPTFLAG
     pushf
 
-    cmp     eax, PATM_CPUID_STD_MAX
-PATCH_FIXUP PATM_CPUID_STD_MAX
-    jb      cpuid_std
-    cmp     eax, 0x80000000
-    jb      cpuid_def
-    cmp     eax, PATM_CPUID_EXT_MAX
-PATCH_FIXUP PATM_CPUID_EXT_MAX
-    jb      cpuid_ext
-    cmp     eax, 0xc0000000
-    jb      cpuid_def
-    cmp     eax, PATM_CPUID_CENTAUR_MAX
-PATCH_FIXUP PATM_CPUID_CENTAUR_MAX
-    jb      cpuid_centaur
-
-cpuid_def:
-    mov     eax, PATM_CPUID_DEF_PTR
+;; @todo We could put all this stuff in a CPUM assembly function can simply call it.
+
+    ; Save the registers we use for passthru and sub-leaf matching (eax is not used).
+    push    edx
+    push    ecx
+    push    ebx
+
+    ;
+    ; Perform a linear search of the strictly sorted CPUID leaf array. 
+    ;
+    ; (Was going to do a binary search, but that ended up being complicated if 
+    ; we want a flexible leaf size. Linear search is probably good enough.)
+    ;
+    mov     ebx, PATM_CPUID_ARRAY_PTR
+PATCH_FIXUP PATM_CPUID_ARRAY_PTR
+    mov     edx, PATM_CPUID_ARRAY_END_PTR
+PATCH_FIXUP PATM_CPUID_ARRAY_END_PTR
+    cmp     ebx, edx
+    jae     cpuid_unknown
+
+cpuid_lookup_leaf:
+    cmp     eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf]
+    jbe     cpuid_maybe_match_eax
+    add     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
+PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
+    cmp     ebx, edx
+    jb      cpuid_lookup_leaf
+    jmp     cpuid_unknown
+
+cpuid_maybe_match_eax:    
+    jne     cpuid_unknown
+
+    ; Sub-leaf match too?
+    mov     ecx, [esp + 4]
+    and     ecx, [ss:ebx + CPUMCPUIDLEAF.fSubLeafMask]
+    cmp     ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]
+    je      cpuid_fetch
+
+    ; Search forward until we've got a matching sub-leaf (or not). 
+cpuid_subleaf_lookup:
+    add     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
+PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
+    cmp     ebx, edx
+    jae     cpuid_subleaf_not_found_sub_ebx
+    cmp     eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf]    
+    jne     cpuid_subleaf_not_found_sub_ebx
+    cmp     ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]    
+    ja      cpuid_subleaf_lookup
+    je      cpuid_fetch
+cpuid_subleaf_not_found_sub_ebx:
+    sub     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
+PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
+    
+    ;
+    ; Out of range sub-leafs aren't quite as easy and pretty as we emulate them
+    ; here, but we do an adequate job.
+    ;    
+cpuid_subleaf_not_found:
+    mov     ecx, [esp + 4]
+    test    dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED
+    jnz     cpuid_load_zeros_except_ecx
+cpuid_load_zeros:
+    xor     ecx, ecx
+cpuid_load_zeros_except_ecx:
+    xor     edx, edx
+    xor     eax, eax
+    xor     ebx, ebx
+    jmp     cpuid_done
+
+    ;
+    ; Different CPUs have different ways of dealing with unknown CPUID leaves.
+    ;
+cpuid_unknown:
+    mov     edx, PATM_CPUID_UNKNOWN_METHOD
+PATCH_FIXUP PATM_CPUID_UNKNOWN_METHOD
+    cmp     edx, CPUMUKNOWNCPUID_PASSTHRU
+    je      cpuid_unknown_passthru
+    ; Load the default cpuid leaf.
+cpuid_unknown_def_leaf:
+    mov     ebx, PATM_CPUID_DEF_PTR
 PATCH_FIXUP PATM_CPUID_DEF_PTR
-    jmp     cpuid_fetch
-
-cpuid_std:
-    mov     edx, PATM_CPUID_STD_PTR
-PATCH_FIXUP PATM_CPUID_STD_PTR
-    jmp     cpuid_calc
-
-cpuid_ext:
-    and     eax, 0ffh                   
-    mov     edx, PATM_CPUID_EXT_PTR
-PATCH_FIXUP PATM_CPUID_EXT_PTR
-    jmp     cpuid_calc
-
-cpuid_centaur:
-    and     eax, 0ffh                   
-    mov     edx, PATM_CPUID_CENTAUR_PTR
-PATCH_FIXUP PATM_CPUID_CENTAUR_PTR
-
-cpuid_calc:
-    lea     eax, [ss:eax * 4]              ; 4 entries...
-    lea     eax, [ss:eax * 4]              ; 4 bytes each
-    add     eax, edx
-
+    mov     edx, [ss:ebx + CPUMCPUID.uEdx]
+    mov     ecx, [ss:ebx + CPUMCPUID.uEcx]
+    mov     eax, [ss:ebx + CPUMCPUID.uEax]
+    mov     ebx, [ss:ebx + CPUMCPUID.uEbx]
+    jmp     cpuid_done
+    ; Pass thru the input values unmodified (eax is still virgin).
+cpuid_unknown_passthru:
+    mov     edx, [esp + 8]
+    mov     ecx, [esp + 4]
+    mov     ebx, [esp]
+    jmp     cpuid_done
+
+    ;
+    ; Normal return.
+    ;
 cpuid_fetch:
-    mov     edx, [ss:eax + 12]             ; CPUMCPUID layout assumptions!
-    mov     ecx, [ss:eax + 8]
-    mov     ebx, [ss:eax + 4]
-    mov     eax, [ss:eax]
-
+    mov     edx, [ss:ebx + CPUMCPUIDLEAF.uEdx]
+    mov     ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx]
+    mov     eax, [ss:ebx + CPUMCPUIDLEAF.uEax]
+    mov     ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx]
+             
+cpuid_done:
+    add     esp, 12
     popf
     mov     dword [ss:PATM_INTERRUPTFLAG], 1
Index: /trunk/src/VBox/VMM/VMMR3/PATMA.mac
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMA.mac	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/PATMA.mac	(revision 54714)
@@ -20,4 +20,5 @@
 
 ;; @name Patch Fixup Types
+; @remarks These fixups types are part of the saved state.
 ; @{
 %define PATM_VMFLAGS                            0xF1ABCD00
@@ -52,4 +53,9 @@
 %define PATM_CALL_RETURN_ADDR                   0xF1ABCD19
 %define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a
+%define PATM_CPUID_ARRAY_PTR                    0xF1ABCD1b
+%define PATM_CPUID_ARRAY_END_PTR                0xF1ABCD1c
+%define PATM_CPUID_ARRAY_ENTRY_SIZE             0xF1ABCD1d
+%define PATM_CPUID_UNKNOWN_METHOD               0xF1ABCD1e
+
 
 ;/* Anything larger doesn't require a fixup */
Index: /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp	(revision 54714)
@@ -94,5 +94,7 @@
     PRELOCREC pRec;
 
-    Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
+    Assert(   uType == FIXUP_ABSOLUTE
+           || (uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL && pSource == pDest && PATM_IS_FIXUP_TYPE(pSource))
+           || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
 
     LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
@@ -162,206 +164,219 @@
 #endif
 
-                /**
-                 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
-                 * A DIFFERENT HYPERVISOR LAYOUT.
+                /*
+                 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING
+                 * A SAVED STATE WITH A DIFFERENT HYPERVISOR LAYOUT.
                  */
                 switch (pAsmRecord->aRelocs[i].uType)
                 {
-                case PATM_VMFLAGS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
-                    break;
-
-                case PATM_PENDINGACTION:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
-                    break;
-
-                case PATM_FIXUP:
-                    /* Offset in aRelocs[i].uInfo is from the base of the function. */
-                    dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
-                         + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
-                    break;
+                    /*
+                     * PATMGCSTATE member fixups.
+                     */
+                    case PATM_VMFLAGS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
+                        break;
+                    case PATM_PENDINGACTION:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
+                        break;
+                    case PATM_STACKPTR:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
+                        break;
+                    case PATM_INTERRUPTFLAG:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
+                        break;
+                    case PATM_INHIBITIRQADDR:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
+                        break;
+                    case PATM_TEMP_EAX:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
+                        break;
+                    case PATM_TEMP_ECX:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
+                        break;
+                    case PATM_TEMP_EDI:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
+                        break;
+                    case PATM_TEMP_EFLAGS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
+                        break;
+                    case PATM_TEMP_RESTORE_FLAGS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
+                        break;
+                    case PATM_CALL_PATCH_TARGET_ADDR:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
+                        break;
+                    case PATM_CALL_RETURN_ADDR:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
+                        break;
 #ifdef VBOX_WITH_STATISTICS
-                case PATM_ALLPATCHCALLS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
-                    break;
-
-                case PATM_IRETEFLAGS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
-                    break;
-
-                case PATM_IRETCS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
-                    break;
-
-                case PATM_IRETEIP:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
-                    break;
-
-                case PATM_PERPATCHCALLS:
-                    dest = patmPatchQueryStatAddress(pVM, pPatch);
-                    break;
+                    case PATM_ALLPATCHCALLS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
+                        break;
+                    case PATM_IRETEFLAGS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
+                        break;
+                    case PATM_IRETCS:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
+                        break;
+                    case PATM_IRETEIP:
+                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
+                        break;
 #endif
-                case PATM_STACKPTR:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
-                    break;
-
-                /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
-                 * part to store the original return addresses.
-                 */
-                case PATM_STACKBASE:
-                    dest = pVM->patm.s.pGCStackGC;
-                    break;
-
-                case PATM_STACKBASE_GUEST:
-                    dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
-                    break;
-
-                case PATM_RETURNADDR:   /* absolute guest address; no fixup required */
-                    Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
-                    dest = pCallInfo->pReturnGC;
-                    break;
-
-                case PATM_PATCHNEXTBLOCK:  /* relative address of instruction following this block */
-                    Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
-
-                    /** @note hardcoded assumption that we must return to the instruction following this block */
-                    dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
-                    break;
-
-                case PATM_CALLTARGET:   /* relative to patch address; no fixup required */
-                    Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
-
-                    /* Address must be filled in later. (see patmr3SetBranchTargets)  */
-                    patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
-                    dest = PATM_ILLEGAL_DESTINATION;
-                    break;
-
-                case PATM_PATCHBASE:    /* Patch GC base address */
-                    dest = pVM->patm.s.pPatchMemGC;
-                    break;
-
-                case PATM_CPUID_STD_PTR:
-                    dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
-                    break;
-
-                case PATM_CPUID_EXT_PTR:
-                    dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
-                    break;
-
-                case PATM_CPUID_CENTAUR_PTR:
-                    dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
-                    break;
-
-                case PATM_CPUID_DEF_PTR:
-                    dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
-                    break;
-
-                case PATM_CPUID_STD_MAX:
-                    dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
-                    break;
-
-                case PATM_CPUID_EXT_MAX:
-                    dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
-                    break;
-
-                case PATM_CPUID_CENTAUR_MAX:
-                    dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
-                    break;
-
-                case PATM_INTERRUPTFLAG:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
-                    break;
-
-                case PATM_INHIBITIRQADDR:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
-                    break;
-
-                case PATM_NEXTINSTRADDR:
-                    Assert(pCallInfo);
-                    /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
-                    dest = pCallInfo->pNextInstrGC;
-                    break;
-
-                case PATM_CURINSTRADDR:
-                    Assert(pCallInfo);
-                    dest = pCallInfo->pCurInstrGC;
-                    break;
-
-                case PATM_VM_FORCEDACTIONS:
-                    /* @todo dirty assumptions when correcting this fixup during saved state loading. */
-                    dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
-                    break;
-
-                case PATM_TEMP_EAX:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
-                    break;
-                case PATM_TEMP_ECX:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
-                    break;
-                case PATM_TEMP_EDI:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
-                    break;
-                case PATM_TEMP_EFLAGS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
-                    break;
-                case PATM_TEMP_RESTORE_FLAGS:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
-                    break;
-                case PATM_CALL_PATCH_TARGET_ADDR:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
-                    break;
-                case PATM_CALL_RETURN_ADDR:
-                    dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
-                    break;
-
-                /* Relative address of global patm lookup and call function. */
-                case PATM_LOOKUP_AND_CALL_FUNCTION:
-                {
-                    RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
-                    Assert(pVM->patm.s.pfnHelperCallGC);
-                    Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
-
-                    /* Relative value is target minus address of instruction after the actual call instruction. */
-                    dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
-                    break;
-                }
-
-                case PATM_RETURN_FUNCTION:
-                {
-                    RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
-                    Assert(pVM->patm.s.pfnHelperRetGC);
-                    Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
-
-                    /* Relative value is target minus address of instruction after the actual call instruction. */
-                    dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
-                    break;
-                }
-
-                case PATM_IRET_FUNCTION:
-                {
-                    RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
-                    Assert(pVM->patm.s.pfnHelperIretGC);
-                    Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
-
-                    /* Relative value is target minus address of instruction after the actual call instruction. */
-                    dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
-                    break;
-                }
-
-                case PATM_LOOKUP_AND_JUMP_FUNCTION:
-                {
-                    RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
-                    Assert(pVM->patm.s.pfnHelperJumpGC);
-                    Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
-
-                    /* Relative value is target minus address of instruction after the actual call instruction. */
-                    dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
-                    break;
-                }
-
-                default:
-                    dest = PATM_ILLEGAL_DESTINATION;
-                    AssertRelease(0);
-                    break;
+
+
+                    case PATM_FIXUP:
+                        /* Offset in aRelocs[i].uInfo is from the base of the function. */
+                        dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
+                             + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
+                        break;
+
+#ifdef VBOX_WITH_STATISTICS
+                    case PATM_PERPATCHCALLS:
+                        dest = patmPatchQueryStatAddress(pVM, pPatch);
+                        break;
+#endif
+
+                    /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
+                     * part to store the original return addresses.
+                     */
+                    case PATM_STACKBASE:
+                        dest = pVM->patm.s.pGCStackGC;
+                        break;
+
+                    case PATM_STACKBASE_GUEST:
+                        dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
+                        break;
+
+                    case PATM_RETURNADDR:   /* absolute guest address; no fixup required */
+                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
+                        dest = pCallInfo->pReturnGC;
+                        break;
+
+                    case PATM_PATCHNEXTBLOCK:  /* relative address of instruction following this block */
+                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
+
+                        /** @note hardcoded assumption that we must return to the instruction following this block */
+                        dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
+                        break;
+
+                    case PATM_CALLTARGET:   /* relative to patch address; no fixup required */
+                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
+
+                        /* Address must be filled in later. (see patmr3SetBranchTargets)  */
+                        patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
+                        dest = PATM_ILLEGAL_DESTINATION;
+                        break;
+
+                    case PATM_PATCHBASE:    /* Patch GC base address */
+                        dest = pVM->patm.s.pPatchMemGC;
+                        break;
+
+                    case PATM_NEXTINSTRADDR:
+                        Assert(pCallInfo);
+                        /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
+                        dest = pCallInfo->pNextInstrGC;
+                        break;
+
+                    case PATM_CURINSTRADDR:
+                        Assert(pCallInfo);
+                        dest = pCallInfo->pCurInstrGC;
+                        break;
+
+                    /* Relative address of global patm lookup and call function. */
+                    case PATM_LOOKUP_AND_CALL_FUNCTION:
+                    {
+                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
+                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
+                        Assert(pVM->patm.s.pfnHelperCallGC);
+                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
+
+                        /* Relative value is target minus address of instruction after the actual call instruction. */
+                        dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
+                        break;
+                    }
+
+                    case PATM_RETURN_FUNCTION:
+                    {
+                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
+                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
+                        Assert(pVM->patm.s.pfnHelperRetGC);
+                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
+
+                        /* Relative value is target minus address of instruction after the actual call instruction. */
+                        dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
+                        break;
+                    }
+
+                    case PATM_IRET_FUNCTION:
+                    {
+                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
+                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
+                        Assert(pVM->patm.s.pfnHelperIretGC);
+                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
+
+                        /* Relative value is target minus address of instruction after the actual call instruction. */
+                        dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
+                        break;
+                    }
+
+                    case PATM_LOOKUP_AND_JUMP_FUNCTION:
+                    {
+                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
+                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
+                        Assert(pVM->patm.s.pfnHelperJumpGC);
+                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
+
+                        /* Relative value is target minus address of instruction after the actual call instruction. */
+                        dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
+                        break;
+                    }
+
+                    case PATM_CPUID_STD_MAX: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
+                        break;
+                    case PATM_CPUID_EXT_MAX: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
+                        break;
+                    case PATM_CPUID_CENTAUR_MAX: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
+                        break;
+
+                    /*
+                     * The following fixups needs to be recalculated when loading saved state
+                     * Note! Earlier saved state versions had different hacks for detecting these.
+                     */
+                    case PATM_VM_FORCEDACTIONS:
+                        dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
+                        break;
+                    case PATM_CPUID_DEF_PTR:
+                        dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
+                        break;
+                    case PATM_CPUID_ARRAY_PTR:
+                        dest = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
+                        break;
+                    case PATM_CPUID_ARRAY_END_PTR:
+                        dest = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
+                        break;
+                    case PATM_CPUID_ARRAY_ENTRY_SIZE:
+                        dest = sizeof(CPUMCPUIDLEAF);
+                        break;
+                    case PATM_CPUID_UNKNOWN_METHOD:
+                        dest = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
+                        break;
+
+                    case PATM_CPUID_STD_PTR: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
+                        break;
+                    case PATM_CPUID_EXT_PTR: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
+                        break;
+                    case PATM_CPUID_CENTAUR_PTR: /* saved state only */
+                        dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
+                        break;
+
+                    default:
+                        dest = PATM_ILLEGAL_DESTINATION;
+                        AssertReleaseFailed();
+                        break;
                 }
 
@@ -369,5 +384,6 @@
                 if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP)
                 {
-                    patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
+                    patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL,
+                                        pAsmRecord->aRelocs[i].uType /*pSources*/, pAsmRecord->aRelocs[i].uType /*pDest*/);
                 }
                 break;
Index: /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp	(revision 54714)
@@ -561,18 +561,4 @@
     rec.Core.Key = 0;
 
-    if (rec.uType == FIXUP_ABSOLUTE)
-    {
-        /* Core.Key abused to store the fixup type. */
-        if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
-            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
-        else if (*pFixup == CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM))
-            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
-        else if (*pFixup == CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM))
-            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
-        else if (*pFixup == CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM))
-            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
-        else if (*pFixup == CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM))
-            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
-    }
 
     /* Save the lookup record. */
@@ -1117,6 +1103,13 @@
     {
     case FIXUP_ABSOLUTE:
+    case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
     {
-        if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
+        Assert(   pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
+               || (pRec->pSource == pRec->pDest && PATM_IS_FIXUP_TYPE(pRec->pSource)) );
+
+        /* bird: What is this for exactly?  Only the MMIO fixups used to have pSource set. */
+        if (    pRec->pSource
+            && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)
+            && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
             break;
 
@@ -1255,10 +1248,12 @@
             *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
         }
-        /* Boldly ASSUMES:
+        /*
+         * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME:
          * 1. That pCPUMCtxGC is in the VM structure and that its location is
          *    at the first page of the same 4 MB chunk.
          * 2. That the forced actions were in the first 32 bytes of the VM
          *    structure.
-         * 3. That the CPUM leafs are less than 8KB into the structure. */
+         * 3. That the CPUM leafs are less than 8KB into the structure.
+         */
         else if (   uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
                  && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
@@ -1266,4 +1261,6 @@
             LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
             *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
+            pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
+            pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
         }
         else if (   uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
@@ -1272,18 +1269,26 @@
             static int cCpuidFixup = 0;
 
-            /* very dirty assumptions about the cpuid patch and cpuid ordering. */
+            /* Very dirty assumptions about the cpuid patch and cpuid ordering. */
             switch (cCpuidFixup & 3)
             {
             case 0:
                 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 break;
             case 1:
                 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 break;
             case 2:
                 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 break;
             case 3:
                 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 break;
             }
@@ -1291,5 +1296,10 @@
             cCpuidFixup++;
         }
-        else if (uVersion >= PATM_SAVED_STATE_VERSION_MEM)
+        /*
+         * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM
+         * we abused Core.Key to store the type for fixups needing correcting on load.
+         */
+        else if (   uVersion >= PATM_SAVED_STATE_VERSION_MEM
+                 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM)
         {
             /* Core.Key abused to store the type of fixup. */
@@ -1298,20 +1308,30 @@
             case PATM_FIXUP_CPU_FF_ACTION:
                 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
+                pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup));
                 break;
             case PATM_FIXUP_CPUID_DEFAULT:
                 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup));
                 break;
             case PATM_FIXUP_CPUID_STANDARD:
                 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup));
                 break;
             case PATM_FIXUP_CPUID_EXTENDED:
                 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup));
                 break;
             case PATM_FIXUP_CPUID_CENTAUR:
                 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
+                pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
+                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
                 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup));
                 break;
@@ -1319,4 +1339,43 @@
                 AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key));
                 break;
+            }
+        }
+        /*
+         * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type
+         * and instead put the patch fixup code in the source and target addresses.
+         */
+        else if (   uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM
+                 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
+        {
+            Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource));
+            switch (pRec->pSource)
+            {
+                case PATM_VM_FORCEDACTIONS:
+                    *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
+                    break;
+                case PATM_CPUID_DEF_PTR:
+                    *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
+                    break;
+                case PATM_CPUID_ARRAY_PTR:
+                    *pFixup = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
+                    break;
+                case PATM_CPUID_ARRAY_END_PTR:
+                    *pFixup = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
+                    break;
+                case PATM_CPUID_ARRAY_ENTRY_SIZE:
+                    *pFixup = sizeof(CPUMCPUIDLEAF);
+                    break;
+                case PATM_CPUID_UNKNOWN_METHOD:
+                    *pFixup = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
+                    break;
+                case PATM_CPUID_STD_PTR: /* Saved again patches only. */
+                    *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
+                    break;
+                case PATM_CPUID_EXT_PTR: /* Saved again patches only. */
+                    *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
+                    break;
+                case PATM_CPUID_CENTAUR_PTR: /* Saved again patches only. */
+                    *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
+                    break;
             }
         }
Index: /trunk/src/VBox/VMM/include/PATMA.h
===================================================================
--- /trunk/src/VBox/VMM/include/PATMA.h	(revision 54713)
+++ /trunk/src/VBox/VMM/include/PATMA.h	(revision 54714)
@@ -20,4 +20,5 @@
 
 /** @name Patch Fixup Types
+ * @remarks These fixups types are part of the saved state.
  * @{ */
 #define PATM_VMFLAGS                            0xF1ABCD00
@@ -34,6 +35,6 @@
 #define PATM_FIXUP                              0xF1ABCD07
 #define PATM_PENDINGACTION                      0xF1ABCD08
-#define PATM_CPUID_STD_PTR                      0xF1ABCD09
-#define PATM_CPUID_EXT_PTR                      0xF1ABCD0a
+#define PATM_CPUID_STD_PTR                      0xF1ABCD09  /**< Legacy, saved state only. */
+#define PATM_CPUID_EXT_PTR                      0xF1ABCD0a  /**< Legacy, saved state only. */
 #define PATM_CPUID_DEF_PTR                      0xF1ABCD0b
 #define PATM_STACKBASE                          0xF1ABCD0c  /**< Stack to store our private patch return addresses */
@@ -51,5 +52,9 @@
 #define PATM_CALL_PATCH_TARGET_ADDR             0xF1ABCD18
 #define PATM_CALL_RETURN_ADDR                   0xF1ABCD19
-#define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a
+#define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a  /**< Legacy, saved state only. */
+#define PATM_CPUID_ARRAY_PTR                    0xF1ABCD1b
+#define PATM_CPUID_ARRAY_END_PTR                0xF1ABCD1c
+#define PATM_CPUID_ARRAY_ENTRY_SIZE             0xF1ABCD1d
+#define PATM_CPUID_UNKNOWN_METHOD               0xF1ABCD1e
 
 /* Anything larger doesn't require a fixup */
@@ -67,4 +72,8 @@
 #define PATM_IRET_FUNCTION                      0xF1ABCE0A  /**< Relative address of global PATM iret function. */
 #define PATM_CPUID_CENTAUR_MAX                  0xF1ABCE0B
+
+/** Identifies an patch fixup type value (with reasonable accuracy). */
+#define PATM_IS_FIXUP_TYPE(a_uValue) \
+    ( ((a_uValue) & UINT32_C(0xfffffC00)) == UINT32_C(0xF1ABCC00) && ((a_uValue) & UINT32_C(0xff)) < UINT32_C(0x30) )
 /** @} */
 
Index: /trunk/src/VBox/VMM/include/PATMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/PATMInternal.h	(revision 54713)
+++ /trunk/src/VBox/VMM/include/PATMInternal.h	(revision 54714)
@@ -32,6 +32,8 @@
 /** @name Saved state version numbers.
  * @{ */
+/** New fixup type FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */
+#define PATM_SAVED_STATE_VERSION                    57
 /** Uses normal structure serialization with markers and everything. */
-#define PATM_SAVED_STATE_VERSION                    56
+#define PATM_SAVED_STATE_VERSION_NO_RAW_MEM         56
 /** Last version which saves structures as raw memory. */
 #define PATM_SAVED_STATE_VERSION_MEM                55
@@ -98,7 +100,20 @@
 #define PATM_MAX_INVALID_WRITES            16384
 
+/** @name FIXUP_XXX - RELOCREC::uType values.
+ * @{ */
+/** Absolute fixup.  With one exception (MMIO cache), this does not take any
+ * source or destination.  @sa FIXUP_ABSOLUTE_ASM.  */
 #define FIXUP_ABSOLUTE                     0
 #define FIXUP_REL_JMPTOPATCH               1
 #define FIXUP_REL_JMPTOGUEST               2
+/** Absolute fixup in patch assembly code template.
+ *
+ * The source and desination addresses both set to the patch fixup type (see
+ * PATM_IS_FIXUP_TYPE and friends in PATMA.h).  This is recent addition (CPUID
+ * subleaf code), so when loading older saved states this is usally represented
+ * as FIXUP_ABSOLUTE. */
+#define FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL   3
+/** @} */
+
 
 #define PATM_ILLEGAL_DESTINATION           0xDEADBEEF
Index: /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
===================================================================
--- /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp	(revision 54713)
+++ /trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp	(revision 54714)
@@ -4673,8 +4673,8 @@
                    szNameC,
                    CPUMR3CpuIdUnknownLeafMethodName(enmUnknownMethod),
-                   DefUnknown.eax,
-                   DefUnknown.ebx,
-                   DefUnknown.ecx,
-                   DefUnknown.edx,
+                   DefUnknown.uEax,
+                   DefUnknown.uEbx,
+                   DefUnknown.uEcx,
+                   DefUnknown.uEdx,
                    szMsrMask,
                    szNameC,
