VirtualBox

Changeset 55229 in vbox


Ignore:
Timestamp:
Apr 14, 2015 6:35:43 AM (9 years ago)
Author:
vboxsync
Message:

CPUM,IEM: Expose GuestFeatures and HostFeatures (exploded CPUID), making IEM use it. Early XSAVE/AVX guest support preps.

Location:
trunk
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r55062 r55229  
    6868    /** The MWait Extensions bits (Std) */
    6969    CPUMCPUIDFEATURE_MWAIT_EXTS,
     70    /** The CR4.OSXSAVE bit CPUID mirroring, only use from CPUMSetGuestCR4. */
     71    CPUMCPUIDFEATURE_OSXSAVE,
    7072    /** 32bit hackishness. */
    7173    CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
     
    310312/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
    311313#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID            RT_BIT_32(1)
     314/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
     315#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE            RT_BIT_32(2)
    312316/** Mask of the valid flags. */
    313 #define CPUMCPUIDLEAF_F_VALID_MASK                  UINT32_C(0x3)
     317#define CPUMCPUIDLEAF_F_VALID_MASK                  UINT32_C(0x7)
    314318/** @} */
    315319
     
    889893
    890894
     895/**
     896 * CPU features and quirks.
     897 * This is mostly exploded CPUID info.
     898 */
     899typedef struct CPUMFEATURES
     900{
     901    /** The CPU vendor (CPUMCPUVENDOR). */
     902    uint8_t         enmCpuVendor;
     903    /** The CPU family. */
     904    uint8_t         uFamily;
     905    /** The CPU model. */
     906    uint8_t         uModel;
     907    /** The CPU stepping. */
     908    uint8_t         uStepping;
     909    /** The microarchitecture. */
     910#ifndef VBOX_FOR_DTRACE_LIB
     911    CPUMMICROARCH   enmMicroarch;
     912#else
     913    uint32_t        enmMicroarch;
     914#endif
     915    /** The maximum physical address with of the CPU. */
     916    uint8_t         cMaxPhysAddrWidth;
     917    /** Alignment padding. */
     918    uint8_t         abPadding[1];
     919    /** Max size of the extended state (or FPU state if no XSAVE). */
     920    uint16_t        cbMaxExtendedState;
     921
     922    /** Supports MSRs. */
     923    uint32_t        fMsr : 1;
     924    /** Supports the page size extension (4/2 MB pages). */
     925    uint32_t        fPse : 1;
     926    /** Supports 36-bit page size extension (4 MB pages can map memory above
     927     *  4GB). */
     928    uint32_t        fPse36 : 1;
     929    /** Supports physical address extension (PAE). */
     930    uint32_t        fPae : 1;
     931    /** Page attribute table (PAT) support (page level cache control). */
     932    uint32_t        fPat : 1;
     933    /** Supports the FXSAVE and FXRSTOR instructions. */
     934    uint32_t        fFxSaveRstor : 1;
     935    /** Supports the XSAVE and XRSTOR instructions. */
     936    uint32_t        fXSaveRstor : 1;
     937    /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
     938    uint32_t        fOpSysXSaveRstor : 1;
     939    /** Supports MMX. */
     940    uint32_t        fMmx : 1;
     941    /** Supports AMD extensions to MMX instructions. */
     942    uint32_t        fAmdMmxExts : 1;
     943    /** Supports SSE. */
     944    uint32_t        fSse : 1;
     945    /** Supports SSE2. */
     946    uint32_t        fSse2 : 1;
     947    /** Supports SSE3. */
     948    uint32_t        fSse3 : 1;
     949    /** Supports SSSE3. */
     950    uint32_t        fSsse3 : 1;
     951    /** Supports SSE4.1. */
     952    uint32_t        fSse41 : 1;
     953    /** Supports SSE4.2. */
     954    uint32_t        fSse42 : 1;
     955    /** Supports AVX. */
     956    uint32_t        fAvx : 1;
     957    /** Supports AVX2. */
     958    uint32_t        fAvx2 : 1;
     959    /** Supports AVX512 foundation. */
     960    uint32_t        fAvx512Foundation : 1;
     961    /** Supports RDTSC. */
     962    uint32_t        fTsc : 1;
     963    /** Intel SYSENTER/SYSEXIT support */
     964    uint32_t        fSysEnter : 1;
     965    /** First generation APIC. */
     966    uint32_t        fApic : 1;
     967    /** Second generation APIC. */
     968    uint32_t        fX2Apic : 1;
     969    /** Hypervisor present. */
     970    uint32_t        fHypervisorPresent : 1;
     971    /** MWAIT & MONITOR instructions supported. */
     972    uint32_t        fMonitorMWait : 1;
     973    /** MWAIT Extensions present. */
     974    uint32_t        fMWaitExtensions : 1;
     975
     976    /** Supports AMD 3DNow instructions. */
     977    uint32_t        f3DNow : 1;
     978    /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
     979    uint32_t        f3DNowPrefetch : 1;
     980
     981    /** AMD64: Supports long mode. */
     982    uint32_t        fLongMode : 1;
     983    /** AMD64: SYSCALL/SYSRET support. */
     984    uint32_t        fSysCall : 1;
     985    /** AMD64: No-execute page table bit. */
     986    uint32_t        fNoExecute : 1;
     987    /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
     988    uint32_t        fLahfSahf : 1;
     989    /** AMD64: Supports RDTSCP. */
     990    uint32_t        fRdTscP : 1;
     991    /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
     992    uint32_t        fMovCr8In32Bit : 1;
     993
     994    /** Indicates that FPU instruction and data pointers may leak.
     995     * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
     996     * is only saved and restored if an exception is pending. */
     997    uint32_t        fLeakyFxSR : 1;
     998
     999    /** Alignment padding / reserved for future use. */
     1000    uint32_t        fPadding : 29;
     1001    uint32_t        auPadding[3];
     1002} CPUMFEATURES;
     1003#ifndef VBOX_FOR_DTRACE_LIB
     1004AssertCompileSize(CPUMFEATURES, 32);
     1005#endif
     1006/** Pointer to a CPU feature structure. */
     1007typedef CPUMFEATURES *PCPUMFEATURES;
     1008/** Pointer to a const CPU feature structure. */
     1009typedef CPUMFEATURES const *PCCPUMFEATURES;
     1010
     1011
     1012
    8911013/** @name Guest Register Getters.
    8921014 * @{ */
  • trunk/include/VBox/vmm/cpum.mac

    r55106 r55229  
    235235    .msrApicBase        resb    8
    236236    alignb 8
    237     .xcr0               resq    1
     237    .aXcr               resq    2
    238238    .fXStateMask        resq    1
    239239    .pXStateR0      RTR0PTR_RES 1
  • trunk/include/VBox/vmm/cpumctx.h

    r55117 r55229  
    399399    /** @} */
    400400
    401     /** The XCR0 register. */
    402     uint64_t                    xcr0;
     401    /** The XCR0..XCR1 registers. */
     402    uint64_t                    aXcr[2];
    403403    /** The mask to pass to XSAVE/XRSTOR in EDX:EAX.  If zero we use
    404404     *  FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
     
    415415
    416416    /** Size padding. */
    417     uint32_t        au32SizePadding[HC_ARCH_BITS == 32 ? 15 : 13];
     417    uint32_t        au32SizePadding[HC_ARCH_BITS == 32 ? 13 : 11];
    418418} CPUMCTX;
    419419#pragma pack()
  • trunk/include/VBox/vmm/vm.h

    r55048 r55229  
    978978        struct CPUM s;
    979979#endif
     980#ifdef ___VBox_vmm_cpum_h
     981        /** Read only info exposed about the host and guest CPUs.   */
     982        struct
     983        {
     984            /** Padding for hidden fields. */
     985            uint8_t                 abHidden0[64];
     986            /** Host CPU feature information. */
     987            CPUMFEATURES            HostFeatures;
     988            /** Guest CPU feature information. */
     989            CPUMFEATURES            GuestFeatures;
     990        } const ro;
     991#endif
    980992        uint8_t     padding[1536];      /* multiple of 64 */
    981993    } cpum;
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r55062 r55229  
    4949# pragma optimize("y", off)
    5050#endif
     51
     52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures,  cpum.ro.HostFeatures);
     53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
    5154
    5255
     
    742745VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
    743746{
    744     if (    (cr4                     & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
    745         !=  (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
     747    /*
     748     * The CR4.OSXSAVE bit is reflected in CPUID(1).ECX[27].
     749     */
     750    if (   (cr4                     & X86_CR4_OSXSAVE)
     751        != (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE) )
     752    {
     753        PVM pVM = pVCpu->CTX_SUFF(pVM);
     754        if (cr4 & X86_CR4_OSXSAVE)
     755            CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
     756        else
     757            CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_OSXSAVE);
     758    }
     759
     760    if (   (cr4                     & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
     761        != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
    746762        pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
     763
    747764    pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
    748765    pVCpu->cpum.s.Guest.cr4 = cr4;
     
    12841301             * Deal with CPU specific information (currently only APIC ID).
    12851302             */
    1286             if (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
     1303            if (pLeaf->fFlags & (CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE))
    12871304            {
    12881305                if (uLeaf == 1)
    12891306                {
    1290                     /* Bits 31-24: Initial APIC ID */
     1307                    /* EBX: Bits 31-24: Initial APIC ID. */
    12911308                    Assert(pVCpu->idCpu <= 255);
    12921309                    AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
    12931310                    *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
     1311
     1312                    /* ECX: Bit 27: CR4.OSXSAVE mirror. */
     1313                    *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
     1314                          | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
    12941315                }
    12951316                else if (uLeaf == 0xb)
     
    15891610            break;
    15901611
     1612        /*
     1613         * OSXSAVE - only used from CPUMSetGuestCR4.
     1614         */
     1615        case CPUMCPUIDFEATURE_OSXSAVE:
     1616            AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
     1617
     1618            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
     1619            AssertLogRelReturnVoid(pLeaf);
     1620
     1621            /* UNI: Special case for single CPU to make life simple for CPUMPatchHlpCpuId. */
     1622            if (pVM->cCpus == 1)
     1623                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_OSXSAVE;
     1624            /* SMP: Set flag indicating OSXSAVE updating (superfluous because of the APIC ID, but that's fine). */
     1625            else
     1626                ASMAtomicOrU32(&pLeaf->fFlags, CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE);
     1627            break;
     1628
    15911629        default:
    15921630            AssertMsgFailed(("enmFeature=%d\n", enmFeature));
     
    16261664        case CPUMCPUIDFEATURE_MWAIT_EXTS:   return pVM->cpum.s.GuestFeatures.fMWaitExtensions;
    16271665
     1666        case CPUMCPUIDFEATURE_OSXSAVE:
    16281667        case CPUMCPUIDFEATURE_INVALID:
    16291668        case CPUMCPUIDFEATURE_32BIT_HACK:
     
    17321771            Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
    17331772            break;
     1773
     1774        /*
     1775         * OSXSAVE - only used from CPUMSetGuestCR4.
     1776         */
     1777        case CPUMCPUIDFEATURE_OSXSAVE:
     1778            AssertLogRelReturnVoid(pVM->cpum.s.HostFeatures.fXSaveRstor && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor);
     1779
     1780            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
     1781            AssertLogRelReturnVoid(pLeaf);
     1782
     1783            /* UNI: Special case for single CPU to make life easy for CPUMPatchHlpCpuId. */
     1784            if (pVM->cCpus == 1)
     1785                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_OSXSAVE;
     1786            /* else: SMP: We never set the OSXSAVE bit and leaving the CONTAINS_OSXSAVE flag is fine. */
     1787            break;
     1788
    17341789
    17351790        default:
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r55105 r55229  
    296296
    297297/**
    298  * Tests if an AMD CPUID feature (extended) is marked present - ECX.
    299  */
    300 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx)    iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
    301 
    302 /**
    303  * Tests if an AMD CPUID feature (extended) is marked present - EDX.
    304  */
    305 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx)    iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
    306 
    307 /**
    308  * Tests if at least on of the specified AMD CPUID features (extended) are
    309  * marked present.
    310  */
    311 #define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx)   iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
    312 
    313 /**
    314  * Checks if an Intel CPUID feature is present.
    315  */
    316 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx)  \
    317     (   ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
    318      || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
    319 
    320 /**
    321  * Checks if an Intel CPUID feature is present.
    322  */
    323 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx)  \
    324     ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
    325 
    326 /**
    327  * Checks if an Intel CPUID feature is present in the host CPU.
    328  */
    329 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx)  \
    330     ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
     298 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
     299 * @returns PCCPUMFEATURES
     300 * @param   a_pIemCpu       The IEM state of the current CPU.
     301 */
     302#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
     303
     304/**
     305 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
     306 * @returns PCCPUMFEATURES
     307 * @param   a_pIemCpu       The IEM state of the current CPU.
     308 */
     309#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu)  (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
    331310
    332311/**
     
    51105089}
    51115090
    5112 
    5113 /**
    5114  * Checks if an Intel CPUID feature bit is set.
    5115  *
    5116  * @returns true / false.
    5117  *
    5118  * @param   pIemCpu             The IEM per CPU data.
    5119  * @param   fEdx                The EDX bit to test, or 0 if ECX.
    5120  * @param   fEcx                The ECX bit to test, or 0 if EDX.
    5121  * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
    5122  *          IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
    5123  */
    5124 static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
    5125 {
    5126     uint32_t uEax, uEbx, uEcx, uEdx;
    5127     CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, 0, &uEax, &uEbx, &uEcx, &uEdx);
    5128     return (fEcx && (uEcx & fEcx))
    5129         || (fEdx && (uEdx & fEdx));
    5130 }
    5131 
    5132 
    5133 /**
    5134  * Checks if an AMD CPUID feature bit is set.
    5135  *
    5136  * @returns true / false.
    5137  *
    5138  * @param   pIemCpu             The IEM per CPU data.
    5139  * @param   fEdx                The EDX bit to test, or 0 if ECX.
    5140  * @param   fEcx                The ECX bit to test, or 0 if EDX.
    5141  * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
    5142  *          IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
    5143  */
    5144 static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
    5145 {
    5146     uint32_t uEax, uEbx, uEcx, uEdx;
    5147     CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, 0, &uEax, &uEbx, &uEcx, &uEdx);
    5148     return (fEcx && (uEcx & fEcx))
    5149         || (fEdx && (uEdx & fEdx));
    5150 }
    5151 
    51525091/** @}  */
    51535092
     
    83188257        if (   (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
    83198258            || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
    8320             || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
     8259            || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
    83218260            return iemRaiseUndefinedOpcode(pIemCpu); \
    83228261        if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
     
    83268265    do { \
    83278266        if (   ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
    8328             || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
     8267            || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
    83298268            return iemRaiseUndefinedOpcode(pIemCpu); \
    83308269        if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
     
    83348273    do { \
    83358274        if (   ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
    8336             || (   !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
    8337                 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
     8275            || (   !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
     8276                && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
    83388277            return iemRaiseUndefinedOpcode(pIemCpu); \
    83398278        if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r55105 r55229  
    49814981            //if (xxx)
    49824982            //    fValid |= X86_CR4_VMXE;
    4983             //if (xxx)
    4984             //    fValid |= X86_CR4_OSXSAVE;
     4983            if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
     4984                fValid |= X86_CR4_OSXSAVE;
    49854985            if (uNewCrX & ~(uint64_t)fValid)
    49864986            {
     
    53355335     * Check preconditions.
    53365336     */
    5337     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
     5337    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
    53385338        return iemRaiseUndefinedOpcode(pIemCpu);
    53395339
     
    53705370     * Check preconditions.
    53715371     */
    5372     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
     5372    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
    53735373        return iemRaiseUndefinedOpcode(pIemCpu);
    53745374    if (pIemCpu->uCpl != 0)
     
    54195419     * Check preconditions.
    54205420     */
    5421     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
     5421    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
    54225422        return iemRaiseUndefinedOpcode(pIemCpu);
    54235423    if (pIemCpu->uCpl != 0)
     
    57255725        return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
    57265726    }
    5727     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))
     5727    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
    57285728    {
    57295729        Log2(("monitor: Not in CPUID\n"));
     
    57815781        return iemRaiseUndefinedOpcode(pIemCpu);
    57825782    }
    5783     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))
     5783    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
    57845784    {
    57855785        Log2(("mwait: Not in CPUID\n"));
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h

    r53423 r55229  
    13081308{
    13091309    /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
    1310     if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
    1311                                                X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
     1310    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNowPrefetch)
    13121311    {
    13131312        IEMOP_MNEMONIC("GrpP");
     
    14261425FNIEMOP_DEF(iemOp_3Dnow)
    14271426{
    1428     if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_3DNOW))
     1427    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNow)
    14291428    {
    14301429        IEMOP_MNEMONIC("3Dnow");
     
    15561555    {
    15571556        /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
    1558         if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
     1557        if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit)
    15591558            return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
    15601559        iCrReg |= 8;
     
    16021601    {
    16031602        /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
    1604         if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
     1603        if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit)
    16051604            return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
    16061605        iCrReg |= 8;
     
    49244923{
    49254924    IEMOP_MNEMONIC("fxsave m512");
    4926     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
     4925    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor)
    49274926        return IEMOP_RAISE_INVALID_OPCODE();
    49284927
     
    49444943{
    49454944    IEMOP_MNEMONIC("fxrstor m512");
    4946     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR))
     4945    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor)
    49474946        return IEMOP_RAISE_INVALID_OPCODE();
    49484947
     
    49844983    IEMOP_MNEMONIC("lfence");
    49854984    IEMOP_HLP_NO_LOCK_PREFIX();
    4986     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
     4985    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
    49874986        return IEMOP_RAISE_INVALID_OPCODE();
    49884987
    49894988    IEM_MC_BEGIN(0, 0);
    4990     if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
     4989    if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
    49914990        IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
    49924991    else
     
    50035002    IEMOP_MNEMONIC("mfence");
    50045003    IEMOP_HLP_NO_LOCK_PREFIX();
    5005     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
     5004    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
    50065005        return IEMOP_RAISE_INVALID_OPCODE();
    50075006
    50085007    IEM_MC_BEGIN(0, 0);
    5009     if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
     5008    if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
    50105009        IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
    50115010    else
     
    50225021    IEMOP_MNEMONIC("sfence");
    50235022    IEMOP_HLP_NO_LOCK_PREFIX();
    5024     if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2))
     5023    if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
    50255024        return IEMOP_RAISE_INVALID_OPCODE();
    50265025
    50275026    IEM_MC_BEGIN(0, 0);
    5028     if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2))
     5027    if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
    50295028        IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
    50305029    else
     
    1055110550    IEMOP_HLP_NO_LOCK_PREFIX();
    1055210551    if (   pIemCpu->enmCpuMode == IEMMODE_64BIT
    10553         && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
     10552        && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf)
    1055410553        return IEMOP_RAISE_INVALID_OPCODE();
    1055510554    IEM_MC_BEGIN(0, 2);
     
    1057510574    IEMOP_HLP_NO_LOCK_PREFIX();
    1057610575    if (   pIemCpu->enmCpuMode == IEMMODE_64BIT
    10577         && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
     10576        && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf)
    1057810577        return IEMOP_RAISE_INVALID_OPCODE();
    1057910578    IEM_MC_BEGIN(0, 1);
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r55152 r55229  
    917917    pFpuCtx->MXCSR_MASK             = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
    918918                                                        supports all bits, since a zero value here should be read as 0xffbf. */
     919    pCtx->aXcr[0]                   = XSAVE_C_X87;
     920    if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_OFFSETOF(X86XSAVEAREA, Hdr))
     921    {
     922        /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR
     923           as we don't know what happened before.  (Bother optimize later?) */
     924        pCtx->pXStateR3->Hdr.bmXState = XSAVE_C_X87 | XSAVE_C_SSE;
     925    }
    919926
    920927    /*
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r55114 r55229  
    16031603            pFeatures->fLahfSahf        = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    16041604            pFeatures->fRdTscP          = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     1605            pFeatures->fMovCr8In32Bit   = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_CMPL);
     1606            pFeatures->f3DNow           = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     1607            pFeatures->f3DNowPrefetch   = (pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
     1608                                       || (pExtLeaf->uEdx & (  X86_CPUID_EXT_FEATURE_EDX_LONG_MODE
     1609                                                             | X86_CPUID_AMD_FEATURE_EDX_3DNOW));
    16051610        }
    16061611
     
    16181623            pFeatures->fMmx            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX);
    16191624            pFeatures->fTsc            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC);
     1625            pFeatures->fAmdMmxExts      = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
    16201626        }
    16211627
     
    21062112    CPUMISAEXTCFG   enmSse41;
    21072113    CPUMISAEXTCFG   enmSse42;
     2114    CPUMISAEXTCFG   enmAvx;
     2115    CPUMISAEXTCFG   enmAvx2;
     2116    CPUMISAEXTCFG   enmXSave;
    21082117    CPUMISAEXTCFG   enmAesNi;
    21092118    CPUMISAEXTCFG   enmPClMul;
     
    24412450                           //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet.
    24422451                           | (pConfig->enmAesNi ? X86_CPUID_FEATURE_ECX_AES : 0)
    2443                            //| X86_CPUID_FEATURE_ECX_XSAVE - not implemented yet.
    2444                            //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state
    2445                            //| X86_CPUID_FEATURE_ECX_AVX   - not implemented yet.
     2452                           | (pConfig->enmXSave ? X86_CPUID_FEATURE_ECX_XSAVE : 0 )
     2453                           //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically.
     2454                           | (pConfig->enmAvx ? X86_CPUID_FEATURE_ECX_AVX : 0)
    24462455                           //| X86_CPUID_FEATURE_ECX_F16C  - not implemented yet.
    24472456                           | (pConfig->enmRdRand ? X86_CPUID_FEATURE_ECX_RDRAND : 0)
     
    24602469        PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt);
    24612470        PORTABLE_DISABLE_FEATURE_BIT(    1, pStdFeatureLeaf->uEcx, F16C,   X86_CPUID_FEATURE_ECX_F16C);
    2462         PORTABLE_DISABLE_FEATURE_BIT(    1, pStdFeatureLeaf->uEcx, XSAVE,  X86_CPUID_FEATURE_ECX_XSAVE);
    2463         PORTABLE_DISABLE_FEATURE_BIT(    1, pStdFeatureLeaf->uEcx, AVX,    X86_CPUID_FEATURE_ECX_AVX);
     2471        PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE,  X86_CPUID_FEATURE_ECX_XSAVE,  pConfig->enmXSave);
     2472        PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX,    X86_CPUID_FEATURE_ECX_AVX,    pConfig->enmAvx);
    24642473        PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand);
    24652474        PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16,   X86_CPUID_FEATURE_ECX_CX16,   pConfig->enmCmpXchg16b);
     
    24892498                                          | X86_CPUID_FEATURE_ECX_PDCM
    24902499                                          | X86_CPUID_FEATURE_ECX_DCA
    2491                                           | X86_CPUID_FEATURE_ECX_XSAVE
    24922500                                          | X86_CPUID_FEATURE_ECX_OSXSAVE
    2493                                           | X86_CPUID_FEATURE_ECX_AVX
    24942501                                          )));
    24952502    }
     
    25242531    if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS)
    25252532        pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES;
     2533    if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2534        pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE;
     2535    if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2536        pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX;
    25262537    if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS)
    25272538        pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND;
     
    28182829                               //| X86_CPUID_STEXT_FEATURE_EBX_BMI1              RT_BIT(3)
    28192830                               //| X86_CPUID_STEXT_FEATURE_EBX_HLE               RT_BIT(4)
    2820                                //| X86_CPUID_STEXT_FEATURE_EBX_AVX2              RT_BIT(5)
     2831                               | (pConfig->enmAvx2 ? X86_CPUID_STEXT_FEATURE_EBX_AVX2 : 0)
    28212832                               //| RT_BIT(6) - reserved
    28222833                               //| X86_CPUID_STEXT_FEATURE_EBX_SMEP              RT_BIT(7)
     
    28542865                {
    28552866                    PORTABLE_DISABLE_FEATURE_BIT(    1, pCurLeaf->uEbx, FSGSBASE,   X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE);
    2856                     PORTABLE_DISABLE_FEATURE_BIT(    1, pCurLeaf->uEbx, AVX2,       X86_CPUID_STEXT_FEATURE_EBX_AVX2);
     2867                    PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2,       X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2);
    28572868                    PORTABLE_DISABLE_FEATURE_BIT(    1, pCurLeaf->uEbx, SMEP,       X86_CPUID_STEXT_FEATURE_EBX_SMEP);
    28582869                    PORTABLE_DISABLE_FEATURE_BIT(    1, pCurLeaf->uEbx, BMI2,       X86_CPUID_STEXT_FEATURE_EBX_BMI2);
     
    28702881
    28712882                /* Force standard feature bits. */
     2883                if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2884                    pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2;
    28722885                if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS)
    28732886                    pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED;
     
    29873000     * Clear them all as we don't currently implement extended CPU state.
    29883001     */
    2989     uSubLeaf = 0;
    2990     while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf)) != NULL)
    2991     {
    2992         pCurLeaf->uEax = 0;
    2993         pCurLeaf->uEbx = 0;
    2994         pCurLeaf->uEcx = 0;
    2995         pCurLeaf->uEdx = 0;
    2996         uSubLeaf++;
     3002    /* Figure out the supported XCR0/XSS mask component. */
     3003    uint64_t fGuestXcr0Mask = 0;
     3004    pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
     3005    if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
     3006    {
     3007        fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE;
     3008        if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX))
     3009            fGuestXcr0Mask |= XSAVE_C_YMM;
     3010        pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0);
     3011        if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F))
     3012            fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK;
     3013        fGuestXcr0Mask &= pCpum->fXStateHostMask;
     3014    }
     3015    pStdFeatureLeaf = NULL;
     3016    pCpum->fXStateGuestMask = fGuestXcr0Mask;
     3017
     3018    /* Work the sub-leaves. */
     3019    for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++)
     3020    {
     3021        pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf);
     3022        if (pCurLeaf)
     3023        {
     3024            if (fGuestXcr0Mask)
     3025            {
     3026                switch (uSubLeaf)
     3027                {
     3028                    case 0:
     3029                        pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask);
     3030                        pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask);
     3031                        continue;
     3032                    case 1:
     3033                        pCurLeaf->uEax &= 0;
     3034                        pCurLeaf->uEcx &= 0;
     3035                        pCurLeaf->uEdx &= 0;
     3036                        continue;
     3037                    default:
     3038                        if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf))
     3039                        {
     3040                            AssertLogRel(!(pCurLeaf->uEcx & 1));
     3041                            pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */
     3042                            pCurLeaf->uEdx = 0; /* it's reserved... */
     3043                            continue;
     3044                        }
     3045                        break;
     3046                }
     3047            }
     3048
     3049            /* Clear the leaf. */
     3050            pCurLeaf->uEax = 0;
     3051            pCurLeaf->uEbx = 0;
     3052            pCurLeaf->uEcx = 0;
     3053            pCurLeaf->uEdx = 0;
     3054        }
    29973055    }
    29983056
     
    33843442
    33853443/**
     3444 * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted.
     3445 *
     3446 * @returns VBox status code (error message raised).
     3447 * @param   pVM             The VM handle (for errors).
     3448 * @param   pIsaExts        The /CPUM/IsaExts node (can be NULL).
     3449 * @param   pszValueName    The value / extension name.
     3450 * @param   penmValue       Where to return the choice.
     3451 * @param   enmDefault      The default choice.
     3452 * @param   fAllowed        Allowed choice.  Applied both to the result and to
     3453 *                          the default value.
     3454 */
     3455static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
     3456                                      CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed)
     3457{
     3458    Assert(fAllowed == true || fAllowed == false);
     3459    int rc;
     3460    if (fAllowed)
     3461        rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
     3462    else
     3463    {
     3464        rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/);
     3465        if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS)
     3466            LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName));
     3467        *penmValue = CPUMISAEXTCFG_DISABLED;
     3468    }
     3469    return rc;
     3470}
     3471
     3472
     3473/**
    33863474 * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/.
    33873475 *
     
    35543642     */
    35553643    rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
     3644    AssertLogRelRCReturn(rc, rc);
     3645
     3646#if 0 /* Incomplete, so not yet enabled.  */
     3647    bool const fMayHaveXSave = fNestedPagingAndFullGuestExec
     3648                            && pVM->cpum.s.HostFeatures.fXSaveRstor
     3649                            && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
     3650                            && pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL /** @todo test and enable on AMD! */;
     3651#else
     3652    bool const fMayHaveXSave = false;
     3653#endif
     3654    /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
     3655     * Expose XSAVE/XRSTOR to the guest if available.  For the time being the
     3656     * default is to only expose this to VMs with nested paging and AMD-V or
     3657     * unrestricted guest execution mode.  Not possible to force this one without
     3658     * host support at the moment.
     3659     */
     3660    rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec,
     3661                                    fMayHaveXSave /*fAllowed*/);
     3662    AssertLogRelRCReturn(rc, rc);
     3663
     3664    /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends}
     3665     * Expose the AVX instruction set extensions to the guest if available and
     3666     * XSAVE is exposed too.  For the time being the default is to only expose this
     3667     * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
     3668     */
     3669    rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
     3670                                    fMayHaveXSave && pConfig->enmXSave /*fAllowed*/);
     3671    AssertLogRelRCReturn(rc, rc);
     3672
     3673    /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends}
     3674     * Expose the AVX2 instruction set extensions to the guest if available and
     3675     * XSAVE is exposed too. For the time being the default is to only expose this
     3676     * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
     3677     */
     3678    rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec,
     3679                                    fMayHaveXSave && pConfig->enmXSave /*fAllowed*/);
    35563680    AssertLogRelRCReturn(rc, rc);
    35573681
     
    45274651    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES);     // -> EMU
    45284652    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE);   // -> EMU
    4529     CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
     4653    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
    45304654    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX);     // -> EMU?
    45314655    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
     
    46474771    /** @todo check leaf 7   */
    46484772
    4649     /** @todo XSAVE: Stricter XSAVE feature checks for all modes. */
     4773    /* CPUID(d) - XCR0 stuff - takes ECX as input.
     4774     * ECX=0:   EAX - Valid bits in XCR0[31:0].
     4775     *          EBX - Maximum state size as per current XCR0 value.
     4776     *          ECX - Maximum state size for all supported features.
     4777     *          EDX - Valid bits in XCR0[63:32].
     4778     * ECX=1:   EAX - Various X-features.
     4779     *          EBX - Maximum state size as per current XCR0|IA32_XSS value.
     4780     *          ECX - Valid bits in IA32_XSS[31:0].
     4781     *          EDX - Valid bits in IA32_XSS[63:32].
     4782     * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
     4783     *        if the bit invalid all four registers are set to zero.
     4784     *          EAX - The state size for this feature.
     4785     *          EBX - The state byte offset of this feature.
     4786     *          ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
     4787     *          EDX - Reserved, but is set to zero if invalid sub-leaf index.
     4788     */
     4789    PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 0);
     4790    if (   pCurLeaf
     4791        && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE)
     4792        && (   pCurLeaf->uEax
     4793            || pCurLeaf->uEbx
     4794            || pCurLeaf->uEcx
     4795            || pCurLeaf->uEdx) )
     4796    {
     4797        uint64_t fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx);
     4798        if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask)
     4799            return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
     4800                                     N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"),
     4801                                     fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask);
     4802
     4803        /* We don't support any additional features yet. */
     4804        pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), 1);
     4805        if (pCurLeaf && pCurLeaf->uEax)
     4806            return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
     4807                                     N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax);
     4808        if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx))
     4809            return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
     4810                                     N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"),
     4811                                     RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx));
     4812
     4813
     4814        if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask)
     4815        {
     4816            LogRel(("CPUM: fXStateGuestMask=%#lx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask));
     4817            pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask;
     4818        }
     4819
     4820        for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++)
     4821        {
     4822            pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
     4823            if (pCurLeaf)
     4824            {
     4825                /* If advertised, the state component offset and size must match the one used by host. */
     4826                if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx)
     4827                {
     4828                    CPUMCPUID RawHost;
     4829                    ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0,
     4830                                   &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx);
     4831                    if (   RawHost.uEbx != pCurLeaf->uEbx
     4832                        || RawHost.uEax != pCurLeaf->uEax)
     4833                        return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
     4834                                                 N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"),
     4835                                                 uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax);
     4836                }
     4837            }
     4838        }
     4839    }
    46504840
    46514841#undef CPUID_CHECK_RET
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r54737 r55229  
    7171        if (idCpu == 0)
    7272        {
    73             uint32_t uIgnored;
    74             CPUMGetGuestCpuId(pVCpu, 1, 0, &uIgnored, &uIgnored,
    75                               &pVCpu->iem.s.fCpuIdStdFeaturesEcx, &pVCpu->iem.s.fCpuIdStdFeaturesEdx);
    7673            pVCpu->iem.s.enmCpuVendor             = CPUMGetGuestCpuVendor(pVM);
    77 
    78             ASMCpuId_ECX_EDX(1, &pVCpu->iem.s.fHostCpuIdStdFeaturesEcx, &pVCpu->iem.s.fHostCpuIdStdFeaturesEdx);
    7974            pVCpu->iem.s.enmHostCpuVendor         = CPUMGetHostCpuVendor(pVM);
    8075        }
    8176        else
    8277        {
    83             pVCpu->iem.s.fCpuIdStdFeaturesEcx     = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEcx;
    84             pVCpu->iem.s.fCpuIdStdFeaturesEdx     = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEdx;
    8578            pVCpu->iem.s.enmCpuVendor             = pVM->aCpus[0].iem.s.enmCpuVendor;
    86             pVCpu->iem.s.fHostCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEcx;
    87             pVCpu->iem.s.fHostCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEdx;
    8879            pVCpu->iem.s.enmHostCpuVendor         = pVM->aCpus[0].iem.s.enmHostCpuVendor;
    8980        }
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r55114 r55229  
    148148
    149149
    150 
    151 /**
    152  * CPU features and quirks.
    153  * This is mostly exploded CPUID info.
    154  */
    155 typedef struct CPUMFEATURES
    156 {
    157     /** The CPU vendor (CPUMCPUVENDOR). */
    158     uint8_t         enmCpuVendor;
    159     /** The CPU family. */
    160     uint8_t         uFamily;
    161     /** The CPU model. */
    162     uint8_t         uModel;
    163     /** The CPU stepping. */
    164     uint8_t         uStepping;
    165     /** The microarchitecture. */
    166 #ifndef VBOX_FOR_DTRACE_LIB
    167     CPUMMICROARCH   enmMicroarch;
    168 #else
    169     uint32_t        enmMicroarch;
    170 #endif
    171     /** The maximum physical address with of the CPU. */
    172     uint8_t         cMaxPhysAddrWidth;
    173     /** Alignment padding. */
    174     uint8_t         abPadding[1];
    175     /** Max size of the extended state (or FPU state if no XSAVE). */
    176     uint16_t        cbMaxExtendedState;
    177 
    178     /** Supports MSRs. */
    179     uint32_t        fMsr : 1;
    180     /** Supports the page size extension (4/2 MB pages). */
    181     uint32_t        fPse : 1;
    182     /** Supports 36-bit page size extension (4 MB pages can map memory above
    183      *  4GB). */
    184     uint32_t        fPse36 : 1;
    185     /** Supports physical address extension (PAE). */
    186     uint32_t        fPae : 1;
    187     /** Page attribute table (PAT) support (page level cache control). */
    188     uint32_t        fPat : 1;
    189     /** Supports the FXSAVE and FXRSTOR instructions. */
    190     uint32_t        fFxSaveRstor : 1;
    191     /** Supports the XSAVE and XRSTOR instructions. */
    192     uint32_t        fXSaveRstor : 1;
    193     /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
    194     uint32_t        fOpSysXSaveRstor : 1;
    195     /** Supports MMX. */
    196     uint32_t        fMmx : 1;
    197     /** Supports SSE. */
    198     uint32_t        fSse : 1;
    199     /** Supports SSE2. */
    200     uint32_t        fSse2 : 1;
    201     /** Supports SSE3. */
    202     uint32_t        fSse3 : 1;
    203     /** Supports SSSE3. */
    204     uint32_t        fSsse3 : 1;
    205     /** Supports SSE4.1. */
    206     uint32_t        fSse41 : 1;
    207     /** Supports SSE4.2. */
    208     uint32_t        fSse42 : 1;
    209     /** Supports AVX. */
    210     uint32_t        fAvx : 1;
    211     /** Supports AVX2. */
    212     uint32_t        fAvx2 : 1;
    213     /** Supports AVX512 foundation. */
    214     uint32_t        fAvx512Foundation : 1;
    215     /** Supports RDTSC. */
    216     uint32_t        fTsc : 1;
    217     /** Intel SYSENTER/SYSEXIT support */
    218     uint32_t        fSysEnter : 1;
    219     /** First generation APIC. */
    220     uint32_t        fApic : 1;
    221     /** Second generation APIC. */
    222     uint32_t        fX2Apic : 1;
    223     /** Hypervisor present. */
    224     uint32_t        fHypervisorPresent : 1;
    225     /** MWAIT & MONITOR instructions supported. */
    226     uint32_t        fMonitorMWait : 1;
    227     /** MWAIT Extensions present. */
    228     uint32_t        fMWaitExtensions : 1;
    229 
    230     /** AMD64: Supports long mode. */
    231     uint32_t        fLongMode : 1;
    232     /** AMD64: SYSCALL/SYSRET support. */
    233     uint32_t        fSysCall : 1;
    234     /** AMD64: No-execute page table bit. */
    235     uint32_t        fNoExecute : 1;
    236     /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
    237     uint32_t        fLahfSahf : 1;
    238     /** AMD64: Supports RDTSCP. */
    239     uint32_t        fRdTscP : 1;
    240 
    241     /** Indicates that FPU instruction and data pointers may leak.
    242      * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
    243      * is only saved and restored if an exception is pending. */
    244     uint32_t        fLeakyFxSR : 1;
    245 
    246     /** Alignment padding / reserved for future use. */
    247     uint32_t        fPadding : 1;
    248     uint64_t        auPadding[2];
    249 } CPUMFEATURES;
    250 #ifndef VBOX_FOR_DTRACE_LIB
    251 AssertCompileSize(CPUMFEATURES, 32);
    252 #endif
    253 /** Pointer to a CPU feature structure. */
    254 typedef CPUMFEATURES *PCPUMFEATURES;
    255 /** Pointer to a const CPU feature structure. */
    256 typedef CPUMFEATURES const *PCCPUMFEATURES;
    257 
    258 
    259150/**
    260151 * CPU info
     
    509400     * This is used to verify load order dependencies (PGM). */
    510401    bool                    fPendingRestore;
    511     uint8_t                 abPadding[HC_ARCH_BITS == 64 ? 6 : 2];
     402    uint8_t                 abPadding0[6];
     403
     404    /** XSAVE/XRTOR components we can expose to the guest mask. */
     405    uint64_t                fXStateGuestMask;
     406    /** XSAVE/XRSTOR host mask.  Only state components in this mask can be exposed
     407     * to the guest.  This is 0 if no XSAVE/XRSTOR bits can be exposed. */
     408    uint64_t                fXStateHostMask;
     409    uint8_t                 abPadding1[24];
     410
     411    /** Host CPU feature information.
     412     * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
     413    CPUMFEATURES            HostFeatures;
     414    /** Guest CPU feature information.
     415     * Externaly visible via that VM structure, aligned with HostFeatures. */
     416    CPUMFEATURES            GuestFeatures;
     417    /** Guest CPU info. */
     418    CPUMINFO                GuestInfo;
     419
    512420
    513421    /** The standard set of CpuId leaves. */
     
    517425    /** The centaur set of CpuId leaves. */
    518426    CPUMCPUID               aGuestCpuIdPatmCentaur[4];
    519 
    520 #if HC_ARCH_BITS == 32
    521     uint8_t                 abPadding2[4];
    522 #endif
    523 
    524     /** Guest CPU info. */
    525     CPUMINFO                GuestInfo;
    526     /** Guest CPU feature information. */
    527     CPUMFEATURES            GuestFeatures;
    528     /** Host CPU feature information. */
    529     CPUMFEATURES            HostFeatures;
    530     /** XSAVE/XRSTOR host mask.  Only state components in this mask can be exposed
    531      * to the guest.  This is 0 if no XSAVE/XRSTOR bits can be exposed. */
    532     uint64_t                fXStateHostMask;
    533427
    534428    /** @name MSR statistics.
     
    543437    /** @} */
    544438} CPUM;
     439AssertCompileMemberOffset(CPUM, HostFeatures, 64);
     440AssertCompileMemberOffset(CPUM, GuestFeatures, 96);
    545441/** Pointer to the CPUM instance data residing in the shared VM structure. */
    546442typedef CPUM *PCPUM;
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r55114 r55229  
    7676struc CPUM
    7777    ;...
    78     .offCPUMCPU0          resd    1
    79     .fHostUseFlags        resd    1
     78    .offCPUMCPU0                resd    1
     79    .fHostUseFlags              resd    1
    8080
    8181    ; CR4 masks
    82     .CR4.AndMask          resd    1
    83     .CR4.OrMask           resd    1
     82    .CR4.AndMask                resd    1
     83    .CR4.OrMask                 resd    1
    8484    ; entered rawmode?
    85     .u8PortableCpuIdLevel resb    1
    86     .fPendingRestore      resb    1
    87 %if RTHCPTR_CB == 8
    88     .abPadding            resb    6
    89 %else
    90     .abPadding            resb    2
    91 %endif
     85    .u8PortableCpuIdLevel       resb    1
     86    .fPendingRestore            resb    1
     87
     88    alignb 8
     89    .fXStateGuestMask           resq    1
     90    .fXStateHostMask            resq    1
     91
     92    alignb 64
     93    .HostFeatures               resb    32
     94    .GuestFeatures              resb    32
     95    .GuestInfo                  resb    RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12
    9296
    9397    ; Patch manager saved state compatability CPUID leaf arrays
     
    96100    .aGuestCpuIdPatmCentaur     resb    16*4
    97101
    98 %if HC_ARCH_BITS == 32
    99     .abPadding2           resb    4
    100 %endif
    101 
    102     .GuestInfo            resb    RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12
    103     .GuestFeatures        resb    32
    104     .HostFeatures         resb    32
    105     .fXStateHostMask      resq    1
    106 
    107     .cMsrWrites                 resq  1
    108     .cMsrWritesToIgnoredBits    resq  1
    109     .cMsrWritesRaiseGp          resq  1
    110     .cMsrWritesUnknown          resq  1
    111     .cMsrReads                  resq  1
    112     .cMsrReadsRaiseGp           resq  1
    113     .cMsrReadsUnknown           resq  1
     102    alignb 8
     103    .cMsrWrites                 resq    1
     104    .cMsrWritesToIgnoredBits    resq    1
     105    .cMsrWritesRaiseGp          resq    1
     106    .cMsrWritesUnknown          resq    1
     107    .cMsrReads                  resq    1
     108    .cMsrReadsRaiseGp           resq    1
     109    .cMsrReadsUnknown           resq    1
    114110endstruc
    115111
     
    218214    .Guest.msrKERNELGSBASE    resb    8
    219215    .Guest.msrApicBase        resb    8
    220     .Guest.xcr0               resq    1
     216    .Guest.aXcr               resq    2
    221217    .Guest.fXStateMask        resq    1
    222218    .Guest.pXStateR0      RTR0PTR_RES 1
     
    477473    .Hyper.msrKERNELGSBASE    resb    8
    478474    .Hyper.msrApicBase        resb    8
    479     .Hyper.xcr0               resq    1
     475    .Hyper.aXcr               resq    2
    480476    .Hyper.fXStateMask        resq    1
    481477    .Hyper.pXStateR0      RTR0PTR_RES 1
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r51562 r55229  
    373373    /** @name Target CPU information.
    374374     * @{ */
    375     /** EDX value of CPUID(1).
    376      * @remarks Some bits are subject to change and must be queried dynamically. */
    377     uint32_t                fCpuIdStdFeaturesEdx;
    378     /** ECX value of CPUID(1).
    379      * @remarks Some bits are subject to change and must be queried dynamically. */
    380     uint32_t                fCpuIdStdFeaturesEcx;
    381375    /** The CPU vendor. */
    382376    CPUMCPUVENDOR           enmCpuVendor;
     
    385379    /** @name Host CPU information.
    386380     * @{ */
    387     /** EDX value of CPUID(1). */
    388     uint32_t                fHostCpuIdStdFeaturesEdx;
    389     /** ECX value of CPUID(1). */
    390     uint32_t                fHostCpuIdStdFeaturesEcx;
    391381    /** The CPU vendor. */
    392382    CPUMCPUVENDOR           enmHostCpuVendor;
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r53183 r55229  
    138138#define IEM_IS_LONG_MODE(a_pIemCpu)                         (g_fRandom)
    139139#define IEM_IS_REAL_MODE(a_pIemCpu)                         (g_fRandom)
    140 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx)        (g_fRandom)
    141 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx)        (g_fRandom)
    142 #define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) (g_fRandom)
    143 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx)      (g_fRandom)
    144 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx)      (g_fRandom)
    145 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) (g_fRandom)
    146140#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu)                     (g_fRandom)
    147141#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu)                   (g_fRandom)
     142#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu)               ((PCCPUMFEATURES)(uintptr_t)42)
     143#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu)                ((PCCPUMFEATURES)(uintptr_t)88)
    148144
    149145#define iemRecalEffOpSize(a_pIemCpu)                        do { } while (0)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette