Changeset 55229 in vbox
- Timestamp:
- Apr 14, 2015 6:35:43 AM (9 years ago)
- Location:
- trunk
- Files:
-
- 15 edited
-
include/VBox/vmm/cpum.h (modified) (3 diffs)
-
include/VBox/vmm/cpum.mac (modified) (1 diff)
-
include/VBox/vmm/cpumctx.h (modified) (2 diffs)
-
include/VBox/vmm/vm.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/CPUMAllRegs.cpp (modified) (6 diffs)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (6 diffs)
-
src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h (modified) (11 diffs)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp (modified) (15 diffs)
-
src/VBox/VMM/VMMR3/IEMR3.cpp (modified) (1 diff)
-
src/VBox/VMM/include/CPUMInternal.h (modified) (4 diffs)
-
src/VBox/VMM/include/CPUMInternal.mac (modified) (4 diffs)
-
src/VBox/VMM/include/IEMInternal.h (modified) (2 diffs)
-
src/VBox/VMM/testcase/tstIEMCheckMc.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r55062 r55229 68 68 /** The MWait Extensions bits (Std) */ 69 69 CPUMCPUIDFEATURE_MWAIT_EXTS, 70 /** The CR4.OSXSAVE bit CPUID mirroring, only use from CPUMSetGuestCR4. */ 71 CPUMCPUIDFEATURE_OSXSAVE, 70 72 /** 32bit hackishness. */ 71 73 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff … … 310 312 /** The leaf contains an APIC ID that needs changing to that of the current CPU. */ 311 313 #define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1) 314 /** The leaf contains an OSXSAVE which needs individual handling on each CPU. */ 315 #define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2) 312 316 /** Mask of the valid flags. */ 313 #define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0x 3)317 #define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0x7) 314 318 /** @} */ 315 319 … … 889 893 890 894 895 /** 896 * CPU features and quirks. 897 * This is mostly exploded CPUID info. 898 */ 899 typedef struct CPUMFEATURES 900 { 901 /** The CPU vendor (CPUMCPUVENDOR). */ 902 uint8_t enmCpuVendor; 903 /** The CPU family. */ 904 uint8_t uFamily; 905 /** The CPU model. */ 906 uint8_t uModel; 907 /** The CPU stepping. */ 908 uint8_t uStepping; 909 /** The microarchitecture. */ 910 #ifndef VBOX_FOR_DTRACE_LIB 911 CPUMMICROARCH enmMicroarch; 912 #else 913 uint32_t enmMicroarch; 914 #endif 915 /** The maximum physical address with of the CPU. */ 916 uint8_t cMaxPhysAddrWidth; 917 /** Alignment padding. */ 918 uint8_t abPadding[1]; 919 /** Max size of the extended state (or FPU state if no XSAVE). */ 920 uint16_t cbMaxExtendedState; 921 922 /** Supports MSRs. */ 923 uint32_t fMsr : 1; 924 /** Supports the page size extension (4/2 MB pages). */ 925 uint32_t fPse : 1; 926 /** Supports 36-bit page size extension (4 MB pages can map memory above 927 * 4GB). */ 928 uint32_t fPse36 : 1; 929 /** Supports physical address extension (PAE). */ 930 uint32_t fPae : 1; 931 /** Page attribute table (PAT) support (page level cache control). */ 932 uint32_t fPat : 1; 933 /** Supports the FXSAVE and FXRSTOR instructions. */ 934 uint32_t fFxSaveRstor : 1; 935 /** Supports the XSAVE and XRSTOR instructions. */ 936 uint32_t fXSaveRstor : 1; 937 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */ 938 uint32_t fOpSysXSaveRstor : 1; 939 /** Supports MMX. */ 940 uint32_t fMmx : 1; 941 /** Supports AMD extensions to MMX instructions. */ 942 uint32_t fAmdMmxExts : 1; 943 /** Supports SSE. */ 944 uint32_t fSse : 1; 945 /** Supports SSE2. */ 946 uint32_t fSse2 : 1; 947 /** Supports SSE3. */ 948 uint32_t fSse3 : 1; 949 /** Supports SSSE3. */ 950 uint32_t fSsse3 : 1; 951 /** Supports SSE4.1. */ 952 uint32_t fSse41 : 1; 953 /** Supports SSE4.2. */ 954 uint32_t fSse42 : 1; 955 /** Supports AVX. */ 956 uint32_t fAvx : 1; 957 /** Supports AVX2. */ 958 uint32_t fAvx2 : 1; 959 /** Supports AVX512 foundation. */ 960 uint32_t fAvx512Foundation : 1; 961 /** Supports RDTSC. */ 962 uint32_t fTsc : 1; 963 /** Intel SYSENTER/SYSEXIT support */ 964 uint32_t fSysEnter : 1; 965 /** First generation APIC. */ 966 uint32_t fApic : 1; 967 /** Second generation APIC. */ 968 uint32_t fX2Apic : 1; 969 /** Hypervisor present. */ 970 uint32_t fHypervisorPresent : 1; 971 /** MWAIT & MONITOR instructions supported. */ 972 uint32_t fMonitorMWait : 1; 973 /** MWAIT Extensions present. */ 974 uint32_t fMWaitExtensions : 1; 975 976 /** Supports AMD 3DNow instructions. */ 977 uint32_t f3DNow : 1; 978 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */ 979 uint32_t f3DNowPrefetch : 1; 980 981 /** AMD64: Supports long mode. */ 982 uint32_t fLongMode : 1; 983 /** AMD64: SYSCALL/SYSRET support. */ 984 uint32_t fSysCall : 1; 985 /** AMD64: No-execute page table bit. */ 986 uint32_t fNoExecute : 1; 987 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */ 988 uint32_t fLahfSahf : 1; 989 /** AMD64: Supports RDTSCP. */ 990 uint32_t fRdTscP : 1; 991 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */ 992 uint32_t fMovCr8In32Bit : 1; 993 994 /** Indicates that FPU instruction and data pointers may leak. 995 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer 996 * is only saved and restored if an exception is pending. */ 997 uint32_t fLeakyFxSR : 1; 998 999 /** Alignment padding / reserved for future use. */ 1000 uint32_t fPadding : 29; 1001 uint32_t auPadding[3]; 1002 } CPUMFEATURES; 1003 #ifndef VBOX_FOR_DTRACE_LIB 1004 AssertCompileSize(CPUMFEATURES, 32); 1005 #endif 1006 /** Pointer to a CPU feature structure. */ 1007 typedef CPUMFEATURES *PCPUMFEATURES; 1008 /** Pointer to a const CPU feature structure. */ 1009 typedef CPUMFEATURES const *PCCPUMFEATURES; 1010 1011 1012 891 1013 /** @name Guest Register Getters. 892 1014 * @{ */ -
trunk/include/VBox/vmm/cpum.mac
r55106 r55229 235 235 .msrApicBase resb 8 236 236 alignb 8 237 . xcr0 resq 1237 .aXcr resq 2 238 238 .fXStateMask resq 1 239 239 .pXStateR0 RTR0PTR_RES 1 -
trunk/include/VBox/vmm/cpumctx.h
r55117 r55229 399 399 /** @} */ 400 400 401 /** The XCR0 register. */402 uint64_t xcr0;401 /** The XCR0..XCR1 registers. */ 402 uint64_t aXcr[2]; 403 403 /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use 404 404 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */ … … 415 415 416 416 /** Size padding. */ 417 uint32_t au32SizePadding[HC_ARCH_BITS == 32 ? 1 5 : 13];417 uint32_t au32SizePadding[HC_ARCH_BITS == 32 ? 13 : 11]; 418 418 } CPUMCTX; 419 419 #pragma pack() -
trunk/include/VBox/vmm/vm.h
r55048 r55229 978 978 struct CPUM s; 979 979 #endif 980 #ifdef ___VBox_vmm_cpum_h 981 /** Read only info exposed about the host and guest CPUs. */ 982 struct 983 { 984 /** Padding for hidden fields. */ 985 uint8_t abHidden0[64]; 986 /** Host CPU feature information. */ 987 CPUMFEATURES HostFeatures; 988 /** Guest CPU feature information. */ 989 CPUMFEATURES GuestFeatures; 990 } const ro; 991 #endif 980 992 uint8_t padding[1536]; /* multiple of 64 */ 981 993 } cpum; -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r55062 r55229 49 49 # pragma optimize("y", off) 50 50 #endif 51 52 AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures); 53 AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures); 51 54 52 55 … … 742 745 VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4) 743 746 { 744 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)) 745 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))) 747 /* 748 * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27]. 749 */ 750 if ( (cr4 & X86_CR4_OSXSAVE) 751 != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) ) 752 { 753 PVM pVM = pVCpu->CTX_SUFF(pVM); 754 if (cr4 & X86_CR4_OSXSAVE) 755 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE); 756 else 757 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE); 758 } 759 760 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)) 761 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))) 746 762 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH; 763 747 764 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4; 748 765 pVCpu->cpum.s.Guest.cr4 = cr4; … … 1284 1301 * Deal with CPU specific information (currently only APIC ID). 1285 1302 */ 1286 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)1303 if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE)) 1287 1304 { 1288 1305 if (uLeaf == 1) 1289 1306 { 1290 /* Bits 31-24: Initial APIC ID*/1307 /* EBX: Bits 31-24: Initial APIC ID. */ 1291 1308 Assert(pVCpu->idCpu <= 255); 1292 1309 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */ 1293 1310 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24); 1311 1312 /* ECX: Bit 27: CR4.OSXSAVE mirror. */ 1313 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE) 1314 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0); 1294 1315 } 1295 1316 else if (uLeaf == 0xb) … … 1589 1610 break; 1590 1611 1612 /* 1613 * OSXSAVE - only used from CPUMSetGuestCR4. 1614 */ 1615 case CPUMCPUIDFEATURE_OSXSAVE: 1616 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor); 1617 1618 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001)); 1619 AssertLogRelReturnVoid(pLeaf); 1620 1621 /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */ 1622 if (pVM->cCpus == 1) 1623 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE; 1624 /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */ 1625 else 1626 ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE); 1627 break; 1628 1591 1629 default: 1592 1630 AssertMsgFailed(("enmFeature=%d\n", enmFeature)); … … 1626 1664 case CPUMCPUIDFEATURE_MWAIT_EXTS: return pVM->cpum.s.GuestFeatures.fMWaitExtensions; 1627 1665 1666 case CPUMCPUIDFEATURE_OSXSAVE: 1628 1667 case CPUMCPUIDFEATURE_INVALID: 1629 1668 case CPUMCPUIDFEATURE_32BIT_HACK: … … 1732 1771 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n")); 1733 1772 break; 1773 1774 /* 1775 * OSXSAVE - only used from CPUMSetGuestCR4. 1776 */ 1777 case CPUMCPUIDFEATURE_OSXSAVE: 1778 AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor); 1779 1780 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001)); 1781 AssertLogRelReturnVoid(pLeaf); 1782 1783 /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */ 1784 if (pVM->cCpus == 1) 1785 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE; 1786 /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */ 1787 break; 1788 1734 1789 1735 1790 default: -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r55105 r55229 296 296 297 297 /** 298 * Tests if an AMD CPUID feature (extended) is marked present - ECX. 299 */ 300 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) 301 302 /** 303 * Tests if an AMD CPUID feature (extended) is marked present - EDX. 304 */ 305 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) 306 307 /** 308 * Tests if at least on of the specified AMD CPUID features (extended) are 309 * marked present. 310 */ 311 #define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx)) 312 313 /** 314 * Checks if an Intel CPUID feature is present. 315 */ 316 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \ 317 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \ 318 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) ) 319 320 /** 321 * Checks if an Intel CPUID feature is present. 322 */ 323 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \ 324 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) ) 325 326 /** 327 * Checks if an Intel CPUID feature is present in the host CPU. 328 */ 329 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \ 330 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx ) 298 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU. 299 * @returns PCCPUMFEATURES 300 * @param a_pIemCpu The IEM state of the current CPU. 301 */ 302 #define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures)) 303 304 /** 305 * Returns a (const) pointer to the CPUMFEATURES for the host CPU. 306 * @returns PCCPUMFEATURES 307 * @param a_pIemCpu The IEM state of the current CPU. 308 */ 309 #define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures)) 331 310 332 311 /** … … 5110 5089 } 5111 5090 5112 5113 /**5114 * Checks if an Intel CPUID feature bit is set.5115 *5116 * @returns true / false.5117 *5118 * @param pIemCpu The IEM per CPU data.5119 * @param fEdx The EDX bit to test, or 0 if ECX.5120 * @param fEcx The ECX bit to test, or 0 if EDX.5121 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,5122 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.5123 */5124 static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)5125 {5126 uint32_t uEax, uEbx, uEcx, uEdx;5127 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, 0, &uEax, &uEbx, &uEcx, &uEdx);5128 return (fEcx && (uEcx & fEcx))5129 || (fEdx && (uEdx & fEdx));5130 }5131 5132 5133 /**5134 * Checks if an AMD CPUID feature bit is set.5135 *5136 * @returns true / false.5137 *5138 * @param pIemCpu The IEM per CPU data.5139 * @param fEdx The EDX bit to test, or 0 if ECX.5140 * @param fEcx The ECX bit to test, or 0 if EDX.5141 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,5142 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.5143 */5144 static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)5145 {5146 uint32_t uEax, uEbx, uEcx, uEdx;5147 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, 0, &uEax, &uEbx, &uEcx, &uEdx);5148 return (fEcx && (uEcx & fEcx))5149 || (fEdx && (uEdx & fEdx));5150 }5151 5152 5091 /** @} */ 5153 5092 … … 8318 8257 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 8319 8258 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \ 8320 || !IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) \8259 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \ 8321 8260 return iemRaiseUndefinedOpcode(pIemCpu); \ 8322 8261 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ … … 8326 8265 do { \ 8327 8266 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 8328 || !IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX)) \8267 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \ 8329 8268 return iemRaiseUndefinedOpcode(pIemCpu); \ 8330 8269 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ … … 8334 8273 do { \ 8335 8274 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 8336 || ( !IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE)\8337 && !IEM_ IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX)) ) \8275 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \ 8276 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \ 8338 8277 return iemRaiseUndefinedOpcode(pIemCpu); \ 8339 8278 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r55105 r55229 4981 4981 //if (xxx) 4982 4982 // fValid |= X86_CR4_VMXE; 4983 //if (xxx)4984 //fValid |= X86_CR4_OSXSAVE;4983 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor) 4984 fValid |= X86_CR4_OSXSAVE; 4985 4985 if (uNewCrX & ~(uint64_t)fValid) 4986 4986 { … … 5335 5335 * Check preconditions. 5336 5336 */ 5337 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))5337 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc) 5338 5338 return iemRaiseUndefinedOpcode(pIemCpu); 5339 5339 … … 5370 5370 * Check preconditions. 5371 5371 */ 5372 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))5372 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr) 5373 5373 return iemRaiseUndefinedOpcode(pIemCpu); 5374 5374 if (pIemCpu->uCpl != 0) … … 5419 5419 * Check preconditions. 5420 5420 */ 5421 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))5421 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr) 5422 5422 return iemRaiseUndefinedOpcode(pIemCpu); 5423 5423 if (pIemCpu->uCpl != 0) … … 5725 5725 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */ 5726 5726 } 5727 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))5727 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait) 5728 5728 { 5729 5729 Log2(("monitor: Not in CPUID\n")); … … 5781 5781 return iemRaiseUndefinedOpcode(pIemCpu); 5782 5782 } 5783 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))5783 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait) 5784 5784 { 5785 5785 Log2(("mwait: Not in CPUID\n")); -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r53423 r55229 1308 1308 { 1309 1309 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */ 1310 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW, 1311 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)) 1310 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNowPrefetch) 1312 1311 { 1313 1312 IEMOP_MNEMONIC("GrpP"); … … 1426 1425 FNIEMOP_DEF(iemOp_3Dnow) 1427 1426 { 1428 if (!IEM_ IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_3DNOW))1427 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNow) 1429 1428 { 1430 1429 IEMOP_MNEMONIC("3Dnow"); … … 1556 1555 { 1557 1556 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1558 if (!IEM_ IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))1557 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit) 1559 1558 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1560 1559 iCrReg |= 8; … … 1602 1601 { 1603 1602 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1604 if (!IEM_ IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))1603 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit) 1605 1604 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1606 1605 iCrReg |= 8; … … 4924 4923 { 4925 4924 IEMOP_MNEMONIC("fxsave m512"); 4926 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))4925 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor) 4927 4926 return IEMOP_RAISE_INVALID_OPCODE(); 4928 4927 … … 4944 4943 { 4945 4944 IEMOP_MNEMONIC("fxrstor m512"); 4946 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))4945 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor) 4947 4946 return IEMOP_RAISE_INVALID_OPCODE(); 4948 4947 … … 4984 4983 IEMOP_MNEMONIC("lfence"); 4985 4984 IEMOP_HLP_NO_LOCK_PREFIX(); 4986 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))4985 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) 4987 4986 return IEMOP_RAISE_INVALID_OPCODE(); 4988 4987 4989 4988 IEM_MC_BEGIN(0, 0); 4990 if (IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))4989 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2) 4991 4990 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence); 4992 4991 else … … 5003 5002 IEMOP_MNEMONIC("mfence"); 5004 5003 IEMOP_HLP_NO_LOCK_PREFIX(); 5005 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))5004 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) 5006 5005 return IEMOP_RAISE_INVALID_OPCODE(); 5007 5006 5008 5007 IEM_MC_BEGIN(0, 0); 5009 if (IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))5008 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2) 5010 5009 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence); 5011 5010 else … … 5022 5021 IEMOP_MNEMONIC("sfence"); 5023 5022 IEMOP_HLP_NO_LOCK_PREFIX(); 5024 if (!IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))5023 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) 5025 5024 return IEMOP_RAISE_INVALID_OPCODE(); 5026 5025 5027 5026 IEM_MC_BEGIN(0, 0); 5028 if (IEM_ IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))5027 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2) 5029 5028 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence); 5030 5029 else … … 10551 10550 IEMOP_HLP_NO_LOCK_PREFIX(); 10552 10551 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 10553 && !IEM_ IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))10552 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf) 10554 10553 return IEMOP_RAISE_INVALID_OPCODE(); 10555 10554 IEM_MC_BEGIN(0, 2); … … 10575 10574 IEMOP_HLP_NO_LOCK_PREFIX(); 10576 10575 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 10577 && !IEM_ IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))10576 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf) 10578 10577 return IEMOP_RAISE_INVALID_OPCODE(); 10579 10578 IEM_MC_BEGIN(0, 1); -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r55152 r55229 917 917 pFpuCtx->MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really 918 918 supports all bits, since a zero value here should be read as 0xffbf. */ 919 pCtx->aXcr[0] = XSAVE_C_X87; 920 if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_OFFSETOF(X86XSAVEAREA, Hdr)) 921 { 922 /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR 923 as we don't know what happened before. (Bother optimize later?) */ 924 pCtx->pXStateR3->Hdr.bmXState = XSAVE_C_X87 | XSAVE_C_SSE; 925 } 919 926 920 927 /* -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r55114 r55229 1603 1603 pFeatures->fLahfSahf = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1604 1604 pFeatures->fRdTscP = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1605 pFeatures->fMovCr8In32Bit = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_CMPL); 1606 pFeatures->f3DNow = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_3DNOW); 1607 pFeatures->f3DNowPrefetch = (pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF) 1608 || (pExtLeaf->uEdx & ( X86_CPUID_EXT_FEATURE_EDX_LONG_MODE 1609 | X86_CPUID_AMD_FEATURE_EDX_3DNOW)); 1605 1610 } 1606 1611 … … 1618 1623 pFeatures->fMmx |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX); 1619 1624 pFeatures->fTsc |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC); 1625 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX); 1620 1626 } 1621 1627 … … 2106 2112 CPUMISAEXTCFG enmSse41; 2107 2113 CPUMISAEXTCFG enmSse42; 2114 CPUMISAEXTCFG enmAvx; 2115 CPUMISAEXTCFG enmAvx2; 2116 CPUMISAEXTCFG enmXSave; 2108 2117 CPUMISAEXTCFG enmAesNi; 2109 2118 CPUMISAEXTCFG enmPClMul; … … 2441 2450 //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet. 2442 2451 | (pConfig->enmAesNi ? X86_CPUID_FEATURE_ECX_AES : 0) 2443 //| X86_CPUID_FEATURE_ECX_XSAVE - not implemented yet.2444 //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state 2445 //| X86_CPUID_FEATURE_ECX_AVX - not implemented yet.2452 | (pConfig->enmXSave ? X86_CPUID_FEATURE_ECX_XSAVE : 0 ) 2453 //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically. 2454 | (pConfig->enmAvx ? X86_CPUID_FEATURE_ECX_AVX : 0) 2446 2455 //| X86_CPUID_FEATURE_ECX_F16C - not implemented yet. 2447 2456 | (pConfig->enmRdRand ? X86_CPUID_FEATURE_ECX_RDRAND : 0) … … 2460 2469 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt); 2461 2470 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, F16C, X86_CPUID_FEATURE_ECX_F16C); 2462 PORTABLE_DISABLE_FEATURE_BIT ( 1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE);2463 PORTABLE_DISABLE_FEATURE_BIT ( 1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX);2471 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE, pConfig->enmXSave); 2472 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX, pConfig->enmAvx); 2464 2473 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand); 2465 2474 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16, pConfig->enmCmpXchg16b); … … 2489 2498 | X86_CPUID_FEATURE_ECX_PDCM 2490 2499 | X86_CPUID_FEATURE_ECX_DCA 2491 | X86_CPUID_FEATURE_ECX_XSAVE2492 2500 | X86_CPUID_FEATURE_ECX_OSXSAVE 2493 | X86_CPUID_FEATURE_ECX_AVX2494 2501 ))); 2495 2502 } … … 2524 2531 if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS) 2525 2532 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES; 2533 if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS) 2534 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE; 2535 if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS) 2536 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX; 2526 2537 if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS) 2527 2538 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND; … … 2818 2829 //| X86_CPUID_STEXT_FEATURE_EBX_BMI1 RT_BIT(3) 2819 2830 //| X86_CPUID_STEXT_FEATURE_EBX_HLE RT_BIT(4) 2820 //| X86_CPUID_STEXT_FEATURE_EBX_AVX2 RT_BIT(5)2831 | (pConfig->enmAvx2 ? X86_CPUID_STEXT_FEATURE_EBX_AVX2 : 0) 2821 2832 //| RT_BIT(6) - reserved 2822 2833 //| X86_CPUID_STEXT_FEATURE_EBX_SMEP RT_BIT(7) … … 2854 2865 { 2855 2866 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, FSGSBASE, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE); 2856 PORTABLE_DISABLE_FEATURE_BIT ( 1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2);2867 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2); 2857 2868 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMEP, X86_CPUID_STEXT_FEATURE_EBX_SMEP); 2858 2869 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, BMI2, X86_CPUID_STEXT_FEATURE_EBX_BMI2); … … 2870 2881 2871 2882 /* Force standard feature bits. */ 2883 if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS) 2884 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2; 2872 2885 if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS) 2873 2886 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED; … … 2987 3000 * Clear them all as we don't currently implement extended CPU state. 2988 3001 */ 2989 uSubLeaf = 0; 2990 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf)) != NULL) 2991 { 2992 pCurLeaf->uEax = 0; 2993 pCurLeaf->uEbx = 0; 2994 pCurLeaf->uEcx = 0; 2995 pCurLeaf->uEdx = 0; 2996 uSubLeaf++; 3002 /* Figure out the supported XCR0/XSS mask component. */ 3003 uint64_t fGuestXcr0Mask = 0; 3004 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0); 3005 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE)) 3006 { 3007 fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE; 3008 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX)) 3009 fGuestXcr0Mask |= XSAVE_C_YMM; 3010 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0); 3011 if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F)) 3012 fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK; 3013 fGuestXcr0Mask &= pCpum->fXStateHostMask; 3014 } 3015 pStdFeatureLeaf = NULL; 3016 pCpum->fXStateGuestMask = fGuestXcr0Mask; 3017 3018 /* Work the sub-leaves. */ 3019 for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++) 3020 { 3021 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf); 3022 if (pCurLeaf) 3023 { 3024 if (fGuestXcr0Mask) 3025 { 3026 switch (uSubLeaf) 3027 { 3028 case 0: 3029 pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask); 3030 pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask); 3031 continue; 3032 case 1: 3033 pCurLeaf->uEax &= 0; 3034 pCurLeaf->uEcx &= 0; 3035 pCurLeaf->uEdx &= 0; 3036 continue; 3037 default: 3038 if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf)) 3039 { 3040 AssertLogRel(!(pCurLeaf->uEcx & 1)); 3041 pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */ 3042 pCurLeaf->uEdx = 0; /* it's reserved... */ 3043 continue; 3044 } 3045 break; 3046 } 3047 } 3048 3049 /* Clear the leaf. */ 3050 pCurLeaf->uEax = 0; 3051 pCurLeaf->uEbx = 0; 3052 pCurLeaf->uEcx = 0; 3053 pCurLeaf->uEdx = 0; 3054 } 2997 3055 } 2998 3056 … … 3384 3442 3385 3443 /** 3444 * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted. 3445 * 3446 * @returns VBox status code (error message raised). 3447 * @param pVM The VM handle (for errors). 3448 * @param pIsaExts The /CPUM/IsaExts node (can be NULL). 3449 * @param pszValueName The value / extension name. 3450 * @param penmValue Where to return the choice. 3451 * @param enmDefault The default choice. 3452 * @param fAllowed Allowed choice. Applied both to the result and to 3453 * the default value. 3454 */ 3455 static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName, 3456 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed) 3457 { 3458 Assert(fAllowed == true || fAllowed == false); 3459 int rc; 3460 if (fAllowed) 3461 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault); 3462 else 3463 { 3464 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/); 3465 if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS) 3466 LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName)); 3467 *penmValue = CPUMISAEXTCFG_DISABLED; 3468 } 3469 return rc; 3470 } 3471 3472 3473 /** 3386 3474 * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/. 3387 3475 * … … 3554 3642 */ 3555 3643 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true); 3644 AssertLogRelRCReturn(rc, rc); 3645 3646 #if 0 /* Incomplete, so not yet enabled. */ 3647 bool const fMayHaveXSave = fNestedPagingAndFullGuestExec 3648 && pVM->cpum.s.HostFeatures.fXSaveRstor 3649 && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor 3650 && pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL /** @todo test and enable on AMD! */; 3651 #else 3652 bool const fMayHaveXSave = false; 3653 #endif 3654 /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends} 3655 * Expose XSAVE/XRSTOR to the guest if available. For the time being the 3656 * default is to only expose this to VMs with nested paging and AMD-V or 3657 * unrestricted guest execution mode. Not possible to force this one without 3658 * host support at the moment. 3659 */ 3660 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec, 3661 fMayHaveXSave /*fAllowed*/); 3662 AssertLogRelRCReturn(rc, rc); 3663 3664 /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends} 3665 * Expose the AVX instruction set extensions to the guest if available and 3666 * XSAVE is exposed too. For the time being the default is to only expose this 3667 * to VMs with nested paging and AMD-V or unrestricted guest execution mode. 3668 */ 3669 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec, 3670 fMayHaveXSave && pConfig->enmXSave /*fAllowed*/); 3671 AssertLogRelRCReturn(rc, rc); 3672 3673 /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends} 3674 * Expose the AVX2 instruction set extensions to the guest if available and 3675 * XSAVE is exposed too. For the time being the default is to only expose this 3676 * to VMs with nested paging and AMD-V or unrestricted guest execution mode. 3677 */ 3678 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec, 3679 fMayHaveXSave && pConfig->enmXSave /*fAllowed*/); 3556 3680 AssertLogRelRCReturn(rc, rc); 3557 3681 … … 4527 4651 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU 4528 4652 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU 4529 CPUID_GST_FEATURE_ RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU4653 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); 4530 4654 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU? 4531 4655 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C); … … 4647 4771 /** @todo check leaf 7 */ 4648 4772 4649 /** @todo XSAVE: Stricter XSAVE feature checks for all modes. */ 4773 /* CPUID(d) - XCR0 stuff - takes ECX as input. 4774 * ECX=0: EAX - Valid bits in XCR0[31:0]. 4775 * EBX - Maximum state size as per current XCR0 value. 4776 * ECX - Maximum state size for all supported features. 4777 * EDX - Valid bits in XCR0[63:32]. 4778 * ECX=1: EAX - Various X-features. 4779 * EBX - Maximum state size as per current XCR0|IA32_XSS value. 4780 * ECX - Valid bits in IA32_XSS[31:0]. 4781 * EDX - Valid bits in IA32_XSS[63:32]. 4782 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS, 4783 * if the bit invalid all four registers are set to zero. 4784 * EAX - The state size for this feature. 4785 * EBX - The state byte offset of this feature. 4786 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0). 4787 * EDX - Reserved, but is set to zero if invalid sub-leaf index. 4788 */ 4789 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 0); 4790 if ( pCurLeaf 4791 && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE) 4792 && ( pCurLeaf->uEax 4793 || pCurLeaf->uEbx 4794 || pCurLeaf->uEcx 4795 || pCurLeaf->uEdx) ) 4796 { 4797 uint64_t fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx); 4798 if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask) 4799 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, 4800 N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"), 4801 fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask); 4802 4803 /* We don't support any additional features yet. */ 4804 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 1); 4805 if (pCurLeaf && pCurLeaf->uEax) 4806 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, 4807 N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax); 4808 if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx)) 4809 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, 4810 N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"), 4811 RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx)); 4812 4813 4814 if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask) 4815 { 4816 LogRel(("CPUM: fXStateGuestMask=%#lx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask)); 4817 pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask; 4818 } 4819 4820 for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++) 4821 { 4822 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf); 4823 if (pCurLeaf) 4824 { 4825 /* If advertised, the state component offset and size must match the one used by host. */ 4826 if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx) 4827 { 4828 CPUMCPUID RawHost; 4829 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0, 4830 &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx); 4831 if ( RawHost.uEbx != pCurLeaf->uEbx 4832 || RawHost.uEax != pCurLeaf->uEax) 4833 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, 4834 N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"), 4835 uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax); 4836 } 4837 } 4838 } 4839 } 4650 4840 4651 4841 #undef CPUID_CHECK_RET -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r54737 r55229 71 71 if (idCpu == 0) 72 72 { 73 uint32_t uIgnored;74 CPUMGetGuestCpuId(pVCpu, 1, 0, &uIgnored, &uIgnored,75 &pVCpu->iem.s.fCpuIdStdFeaturesEcx, &pVCpu->iem.s.fCpuIdStdFeaturesEdx);76 73 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); 77 78 ASMCpuId_ECX_EDX(1, &pVCpu->iem.s.fHostCpuIdStdFeaturesEcx, &pVCpu->iem.s.fHostCpuIdStdFeaturesEdx);79 74 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); 80 75 } 81 76 else 82 77 { 83 pVCpu->iem.s.fCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEcx;84 pVCpu->iem.s.fCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEdx;85 78 pVCpu->iem.s.enmCpuVendor = pVM->aCpus[0].iem.s.enmCpuVendor; 86 pVCpu->iem.s.fHostCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEcx;87 pVCpu->iem.s.fHostCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEdx;88 79 pVCpu->iem.s.enmHostCpuVendor = pVM->aCpus[0].iem.s.enmHostCpuVendor; 89 80 } -
trunk/src/VBox/VMM/include/CPUMInternal.h
r55114 r55229 148 148 149 149 150 151 /**152 * CPU features and quirks.153 * This is mostly exploded CPUID info.154 */155 typedef struct CPUMFEATURES156 {157 /** The CPU vendor (CPUMCPUVENDOR). */158 uint8_t enmCpuVendor;159 /** The CPU family. */160 uint8_t uFamily;161 /** The CPU model. */162 uint8_t uModel;163 /** The CPU stepping. */164 uint8_t uStepping;165 /** The microarchitecture. */166 #ifndef VBOX_FOR_DTRACE_LIB167 CPUMMICROARCH enmMicroarch;168 #else169 uint32_t enmMicroarch;170 #endif171 /** The maximum physical address with of the CPU. */172 uint8_t cMaxPhysAddrWidth;173 /** Alignment padding. */174 uint8_t abPadding[1];175 /** Max size of the extended state (or FPU state if no XSAVE). */176 uint16_t cbMaxExtendedState;177 178 /** Supports MSRs. */179 uint32_t fMsr : 1;180 /** Supports the page size extension (4/2 MB pages). */181 uint32_t fPse : 1;182 /** Supports 36-bit page size extension (4 MB pages can map memory above183 * 4GB). */184 uint32_t fPse36 : 1;185 /** Supports physical address extension (PAE). */186 uint32_t fPae : 1;187 /** Page attribute table (PAT) support (page level cache control). */188 uint32_t fPat : 1;189 /** Supports the FXSAVE and FXRSTOR instructions. */190 uint32_t fFxSaveRstor : 1;191 /** Supports the XSAVE and XRSTOR instructions. */192 uint32_t fXSaveRstor : 1;193 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */194 uint32_t fOpSysXSaveRstor : 1;195 /** Supports MMX. */196 uint32_t fMmx : 1;197 /** Supports SSE. */198 uint32_t fSse : 1;199 /** Supports SSE2. */200 uint32_t fSse2 : 1;201 /** Supports SSE3. */202 uint32_t fSse3 : 1;203 /** Supports SSSE3. */204 uint32_t fSsse3 : 1;205 /** Supports SSE4.1. */206 uint32_t fSse41 : 1;207 /** Supports SSE4.2. */208 uint32_t fSse42 : 1;209 /** Supports AVX. */210 uint32_t fAvx : 1;211 /** Supports AVX2. */212 uint32_t fAvx2 : 1;213 /** Supports AVX512 foundation. */214 uint32_t fAvx512Foundation : 1;215 /** Supports RDTSC. */216 uint32_t fTsc : 1;217 /** Intel SYSENTER/SYSEXIT support */218 uint32_t fSysEnter : 1;219 /** First generation APIC. */220 uint32_t fApic : 1;221 /** Second generation APIC. */222 uint32_t fX2Apic : 1;223 /** Hypervisor present. */224 uint32_t fHypervisorPresent : 1;225 /** MWAIT & MONITOR instructions supported. */226 uint32_t fMonitorMWait : 1;227 /** MWAIT Extensions present. */228 uint32_t fMWaitExtensions : 1;229 230 /** AMD64: Supports long mode. */231 uint32_t fLongMode : 1;232 /** AMD64: SYSCALL/SYSRET support. */233 uint32_t fSysCall : 1;234 /** AMD64: No-execute page table bit. */235 uint32_t fNoExecute : 1;236 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */237 uint32_t fLahfSahf : 1;238 /** AMD64: Supports RDTSCP. */239 uint32_t fRdTscP : 1;240 241 /** Indicates that FPU instruction and data pointers may leak.242 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer243 * is only saved and restored if an exception is pending. */244 uint32_t fLeakyFxSR : 1;245 246 /** Alignment padding / reserved for future use. */247 uint32_t fPadding : 1;248 uint64_t auPadding[2];249 } CPUMFEATURES;250 #ifndef VBOX_FOR_DTRACE_LIB251 AssertCompileSize(CPUMFEATURES, 32);252 #endif253 /** Pointer to a CPU feature structure. */254 typedef CPUMFEATURES *PCPUMFEATURES;255 /** Pointer to a const CPU feature structure. */256 typedef CPUMFEATURES const *PCCPUMFEATURES;257 258 259 150 /** 260 151 * CPU info … … 509 400 * This is used to verify load order dependencies (PGM). */ 510 401 bool fPendingRestore; 511 uint8_t abPadding[HC_ARCH_BITS == 64 ? 6 : 2]; 402 uint8_t abPadding0[6]; 403 404 /** XSAVE/XRTOR components we can expose to the guest mask. */ 405 uint64_t fXStateGuestMask; 406 /** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed 407 * to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */ 408 uint64_t fXStateHostMask; 409 uint8_t abPadding1[24]; 410 411 /** Host CPU feature information. 412 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */ 413 CPUMFEATURES HostFeatures; 414 /** Guest CPU feature information. 415 * Externaly visible via that VM structure, aligned with HostFeatures. */ 416 CPUMFEATURES GuestFeatures; 417 /** Guest CPU info. */ 418 CPUMINFO GuestInfo; 419 512 420 513 421 /** The standard set of CpuId leaves. */ … … 517 425 /** The centaur set of CpuId leaves. */ 518 426 CPUMCPUID aGuestCpuIdPatmCentaur[4]; 519 520 #if HC_ARCH_BITS == 32521 uint8_t abPadding2[4];522 #endif523 524 /** Guest CPU info. */525 CPUMINFO GuestInfo;526 /** Guest CPU feature information. */527 CPUMFEATURES GuestFeatures;528 /** Host CPU feature information. */529 CPUMFEATURES HostFeatures;530 /** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed531 * to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */532 uint64_t fXStateHostMask;533 427 534 428 /** @name MSR statistics. … … 543 437 /** @} */ 544 438 } CPUM; 439 AssertCompileMemberOffset(CPUM, HostFeatures, 64); 440 AssertCompileMemberOffset(CPUM, GuestFeatures, 96); 545 441 /** Pointer to the CPUM instance data residing in the shared VM structure. */ 546 442 typedef CPUM *PCPUM; -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r55114 r55229 76 76 struc CPUM 77 77 ;... 78 .offCPUMCPU0 resd 179 .fHostUseFlags resd 178 .offCPUMCPU0 resd 1 79 .fHostUseFlags resd 1 80 80 81 81 ; CR4 masks 82 .CR4.AndMask resd 183 .CR4.OrMask resd 182 .CR4.AndMask resd 1 83 .CR4.OrMask resd 1 84 84 ; entered rawmode? 85 .u8PortableCpuIdLevel resb 1 86 .fPendingRestore resb 1 87 %if RTHCPTR_CB == 8 88 .abPadding resb 6 89 %else 90 .abPadding resb 2 91 %endif 85 .u8PortableCpuIdLevel resb 1 86 .fPendingRestore resb 1 87 88 alignb 8 89 .fXStateGuestMask resq 1 90 .fXStateHostMask resq 1 91 92 alignb 64 93 .HostFeatures resb 32 94 .GuestFeatures resb 32 95 .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12 92 96 93 97 ; Patch manager saved state compatability CPUID leaf arrays … … 96 100 .aGuestCpuIdPatmCentaur resb 16*4 97 101 98 %if HC_ARCH_BITS == 32 99 .abPadding2 resb 4 100 %endif 101 102 .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12 103 .GuestFeatures resb 32 104 .HostFeatures resb 32 105 .fXStateHostMask resq 1 106 107 .cMsrWrites resq 1 108 .cMsrWritesToIgnoredBits resq 1 109 .cMsrWritesRaiseGp resq 1 110 .cMsrWritesUnknown resq 1 111 .cMsrReads resq 1 112 .cMsrReadsRaiseGp resq 1 113 .cMsrReadsUnknown resq 1 102 alignb 8 103 .cMsrWrites resq 1 104 .cMsrWritesToIgnoredBits resq 1 105 .cMsrWritesRaiseGp resq 1 106 .cMsrWritesUnknown resq 1 107 .cMsrReads resq 1 108 .cMsrReadsRaiseGp resq 1 109 .cMsrReadsUnknown resq 1 114 110 endstruc 115 111 … … 218 214 .Guest.msrKERNELGSBASE resb 8 219 215 .Guest.msrApicBase resb 8 220 .Guest. xcr0 resq 1216 .Guest.aXcr resq 2 221 217 .Guest.fXStateMask resq 1 222 218 .Guest.pXStateR0 RTR0PTR_RES 1 … … 477 473 .Hyper.msrKERNELGSBASE resb 8 478 474 .Hyper.msrApicBase resb 8 479 .Hyper. xcr0 resq 1475 .Hyper.aXcr resq 2 480 476 .Hyper.fXStateMask resq 1 481 477 .Hyper.pXStateR0 RTR0PTR_RES 1 -
trunk/src/VBox/VMM/include/IEMInternal.h
r51562 r55229 373 373 /** @name Target CPU information. 374 374 * @{ */ 375 /** EDX value of CPUID(1).376 * @remarks Some bits are subject to change and must be queried dynamically. */377 uint32_t fCpuIdStdFeaturesEdx;378 /** ECX value of CPUID(1).379 * @remarks Some bits are subject to change and must be queried dynamically. */380 uint32_t fCpuIdStdFeaturesEcx;381 375 /** The CPU vendor. */ 382 376 CPUMCPUVENDOR enmCpuVendor; … … 385 379 /** @name Host CPU information. 386 380 * @{ */ 387 /** EDX value of CPUID(1). */388 uint32_t fHostCpuIdStdFeaturesEdx;389 /** ECX value of CPUID(1). */390 uint32_t fHostCpuIdStdFeaturesEcx;391 381 /** The CPU vendor. */ 392 382 CPUMCPUVENDOR enmHostCpuVendor; -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r53183 r55229 138 138 #define IEM_IS_LONG_MODE(a_pIemCpu) (g_fRandom) 139 139 #define IEM_IS_REAL_MODE(a_pIemCpu) (g_fRandom) 140 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) (g_fRandom)141 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) (g_fRandom)142 #define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) (g_fRandom)143 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) (g_fRandom)144 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) (g_fRandom)145 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) (g_fRandom)146 140 #define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (g_fRandom) 147 141 #define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (g_fRandom) 142 #define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) ((PCCPUMFEATURES)(uintptr_t)42) 143 #define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) ((PCCPUMFEATURES)(uintptr_t)88) 148 144 149 145 #define iemRecalEffOpSize(a_pIemCpu) do { } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.

