VirtualBox

Changeset 76227 in vbox


Ignore:
Timestamp:
Dec 14, 2018 10:43:07 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM, SUPDrv: Utilize common function to detect VMX/SVM support. No need to duplicate this code.

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r76222 r76227  
    21642164 * @{
    21652165 */
    2166 /** Unable to start VM execution. */
    2167 #define VERR_HM_UNKNOWN_CPU                         (-4100)
    2168 /** No CPUID support. */
    2169 #define VERR_HM_NO_CPUID                            (-4101)
    21702166/** Host is about to go into suspend mode. */
    2171 #define VERR_HM_SUSPEND_PENDING                     (-4102)
     2167#define VERR_HM_SUSPEND_PENDING                     (-4100)
    21722168/** Conflicting CFGM values. */
    21732169#define VERR_HM_CONFIG_MISMATCH                     (-4103)
  • trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp

    r76223 r76227  
    203203    { "SUPR0GetPagingMode",                     (void *)(uintptr_t)SUPR0GetPagingMode },
    204204    { "SUPR0GetSvmUsability",                   (void *)(uintptr_t)SUPR0GetSvmUsability },
     205    { "SUPR0GetVTSupport",                      (void *)(uintptr_t)SUPR0GetVTSupport },
    205206    { "SUPR0GetVmxUsability",                   (void *)(uintptr_t)SUPR0GetVmxUsability },
    206207    { "SUPR0GetRawModeUsability",               (void *)(uintptr_t)SUPR0GetRawModeUsability },
     
    41104111SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
    41114112{
    4112     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    41134113    Assert(pfCaps);
    41144114    *pfCaps = 0;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r74789 r76227  
    332332 *
    333333 * @returns VBox status code (will only fail if out of memory).
    334  * @param   uFeatEcx        Standard cpuid:1 feature ECX leaf.
    335  * @param   uFeatEdx        Standard cpuid:1 feature EDX leaf.
    336  */
    337 static int hmR0InitIntel(uint32_t uFeatEcx, uint32_t uFeatEdx)
    338 {
     334 */
     335static int hmR0InitIntel(void)
     336{
     337    /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */
     338    g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     339
    339340    /*
    340      * Check that all the required VT-x features are present.
    341      * We also assume all VT-x-enabled CPUs support fxsave/fxrstor.
     341     * First try use native kernel API for controlling VT-x.
     342     * (This is only supported by some Mac OS X kernels atm.)
    342343     */
    343     if (    (uFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
    344          && (uFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
    345          && (uFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
    346     {
    347         /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */
    348         g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     344    int rc = g_HmR0.rcInit = SUPR0EnableVTx(true /* fEnable */);
     345    g_HmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
     346    if (g_HmR0.vmx.fUsingSUPR0EnableVTx)
     347    {
     348        AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
     349        if (RT_SUCCESS(rc))
     350        {
     351            g_HmR0.vmx.fSupported = true;
     352            rc = SUPR0EnableVTx(false /* fEnable */);
     353            AssertLogRelRC(rc);
     354        }
     355    }
     356    else
     357    {
     358        HMR0FIRSTRC FirstRc;
     359        hmR0FirstRcInit(&FirstRc);
     360        g_HmR0.rcInit = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
     361        if (RT_SUCCESS(g_HmR0.rcInit))
     362            g_HmR0.rcInit = hmR0FirstRcGetStatus(&FirstRc);
     363    }
     364
     365    if (RT_SUCCESS(g_HmR0.rcInit))
     366    {
     367        /* Reread in case it was changed by SUPR0GetVmxUsability(). */
     368        g_HmR0.vmx.Msrs.u64FeatCtrl     = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    349369
    350370        /*
    351          * First try use native kernel API for controlling VT-x.
    352          * (This is only supported by some Mac OS X kernels atm.)
     371         * Read all relevant registers and MSRs.
    353372         */
    354         int rc = g_HmR0.rcInit = SUPR0EnableVTx(true /* fEnable */);
    355         g_HmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
    356         if (g_HmR0.vmx.fUsingSUPR0EnableVTx)
     373        g_HmR0.vmx.u64HostCr4           = ASMGetCR4();
     374        g_HmR0.vmx.u64HostEfer          = ASMRdMsr(MSR_K6_EFER);
     375        g_HmR0.vmx.Msrs.u64Basic        = ASMRdMsr(MSR_IA32_VMX_BASIC);
     376        /* KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
     377         * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
     378         * available if either VMX or SMX is supported. */
     379        if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_DUAL_MON))
     380            g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
     381        g_HmR0.vmx.Msrs.PinCtls.u       = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
     382        g_HmR0.vmx.Msrs.ProcCtls.u      = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
     383        g_HmR0.vmx.Msrs.ExitCtls.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
     384        g_HmR0.vmx.Msrs.EntryCtls.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
     385        g_HmR0.vmx.Msrs.u64Misc         = ASMRdMsr(MSR_IA32_VMX_MISC);
     386        g_HmR0.vmx.Msrs.u64Cr0Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
     387        g_HmR0.vmx.Msrs.u64Cr0Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
     388        g_HmR0.vmx.Msrs.u64Cr4Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
     389        g_HmR0.vmx.Msrs.u64Cr4Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
     390        g_HmR0.vmx.Msrs.u64VmcsEnum     = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     391        if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
    357392        {
    358             AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
     393            g_HmR0.vmx.Msrs.TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
     394            g_HmR0.vmx.Msrs.TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
     395            g_HmR0.vmx.Msrs.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
     396            g_HmR0.vmx.Msrs.TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
     397        }
     398
     399        /* VPID 16 bits ASID. */
     400        g_HmR0.uMaxAsid = 0x10000; /* exclusive */
     401
     402        if (g_HmR0.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     403        {
     404            g_HmR0.vmx.Msrs.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
     405            if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
     406                g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
     407
     408            if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
     409                g_HmR0.vmx.Msrs.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
     410        }
     411
     412        if (!g_HmR0.vmx.fUsingSUPR0EnableVTx)
     413        {
     414            /*
     415             * Enter root mode
     416             */
     417            RTR0MEMOBJ hScatchMemObj;
     418            rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, false /* fExecutable */);
     419            if (RT_FAILURE(rc))
     420            {
     421                LogRel(("hmR0InitIntel: RTR0MemObjAllocCont(,PAGE_SIZE,false) -> %Rrc\n", rc));
     422                return rc;
     423            }
     424
     425            void      *pvScatchPage      = RTR0MemObjAddress(hScatchMemObj);
     426            RTHCPHYS   HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
     427            ASMMemZeroPage(pvScatchPage);
     428
     429            /* Set revision dword at the beginning of the structure. */
     430            *(uint32_t *)pvScatchPage = RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
     431
     432            /* Make sure we don't get rescheduled to another cpu during this probe. */
     433            RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     434
     435            /*
     436             * Check CR4.VMXE.
     437             */
     438            g_HmR0.vmx.u64HostCr4 = ASMGetCR4();
     439            if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE))
     440            {
     441                /* In theory this bit could be cleared behind our back. Which would cause #UD
     442                   faults when we try to execute the VMX instructions... */
     443                ASMSetCR4(g_HmR0.vmx.u64HostCr4 | X86_CR4_VMXE);
     444            }
     445
     446            /*
     447             * The only way of checking if we're in VMX root mode or not is to try and enter it.
     448             * There is no instruction or control bit that tells us if we're in VMX root mode.
     449             * Therefore, try and enter VMX root mode here.
     450             */
     451            rc = VMXEnable(HCPhysScratchPage);
    359452            if (RT_SUCCESS(rc))
    360453            {
    361454                g_HmR0.vmx.fSupported = true;
    362                 rc = SUPR0EnableVTx(false /* fEnable */);
    363                 AssertLogRelRC(rc);
     455                VMXDisable();
     456            }
     457            else
     458            {
     459                /*
     460                 * KVM leaves the CPU in VMX root mode. Not only is  this not allowed,
     461                 * it will crash the host when we enter raw mode, because:
     462                 *
     463                 *   (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
     464                 *       this bit), and
     465                 *   (b) turning off paging causes a #GP  (unavoidable when switching
     466                 *       from long to 32 bits mode or 32 bits to PAE).
     467                 *
     468                 * They should fix their code, but until they do we simply refuse to run.
     469                 */
     470                g_HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE;
     471                Assert(g_HmR0.vmx.fSupported == false);
     472            }
     473
     474            /*
     475             * Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it was not
     476             * set before (some software could incorrectly think it is in VMX mode).
     477             */
     478            ASMSetCR4(g_HmR0.vmx.u64HostCr4);
     479            ASMSetFlags(fEFlags);
     480
     481            RTR0MemObjFree(hScatchMemObj, false);
     482        }
     483
     484        if (g_HmR0.vmx.fSupported)
     485        {
     486            rc = VMXR0GlobalInit();
     487            if (RT_FAILURE(rc))
     488                g_HmR0.rcInit = rc;
     489
     490            /*
     491             * Install the VT-x methods.
     492             */
     493            g_HmR0.pfnEnterSession      = VMXR0Enter;
     494            g_HmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;
     495            g_HmR0.pfnExportHostState   = VMXR0ExportHostState;
     496            g_HmR0.pfnRunGuestCode      = VMXR0RunGuestCode;
     497            g_HmR0.pfnEnableCpu         = VMXR0EnableCpu;
     498            g_HmR0.pfnDisableCpu        = VMXR0DisableCpu;
     499            g_HmR0.pfnInitVM            = VMXR0InitVM;
     500            g_HmR0.pfnTermVM            = VMXR0TermVM;
     501            g_HmR0.pfnSetupVM           = VMXR0SetupVM;
     502
     503            /*
     504             * Check for the VMX-Preemption Timer and adjust for the "VMX-Preemption
     505             * Timer Does Not Count Down at the Rate Specified" erratum.
     506             */
     507            if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER)
     508            {
     509                g_HmR0.vmx.fUsePreemptTimer   = true;
     510                g_HmR0.vmx.cPreemptTimerShift = RT_BF_GET(g_HmR0.vmx.Msrs.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
     511                if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
     512                    g_HmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
    364513            }
    365514        }
    366         else
    367         {
    368             HMR0FIRSTRC FirstRc;
    369             hmR0FirstRcInit(&FirstRc);
    370             g_HmR0.rcInit = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
    371             if (RT_SUCCESS(g_HmR0.rcInit))
    372                 g_HmR0.rcInit = hmR0FirstRcGetStatus(&FirstRc);
    373         }
    374         if (RT_SUCCESS(g_HmR0.rcInit))
    375         {
    376             /* Reread in case it was changed by SUPR0GetVmxUsability(). */
    377             g_HmR0.vmx.Msrs.u64FeatCtrl     = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    378 
    379             /*
    380              * Read all relevant registers and MSRs.
    381              */
    382             g_HmR0.vmx.u64HostCr4           = ASMGetCR4();
    383             g_HmR0.vmx.u64HostEfer          = ASMRdMsr(MSR_K6_EFER);
    384             g_HmR0.vmx.Msrs.u64Basic        = ASMRdMsr(MSR_IA32_VMX_BASIC);
    385             /* KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
    386              * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
    387              * available if either VMX or SMX is supported. */
    388             if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_DUAL_MON))
    389                 g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
    390             g_HmR0.vmx.Msrs.PinCtls.u       = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    391             g_HmR0.vmx.Msrs.ProcCtls.u      = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    392             g_HmR0.vmx.Msrs.ExitCtls.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    393             g_HmR0.vmx.Msrs.EntryCtls.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    394             g_HmR0.vmx.Msrs.u64Misc         = ASMRdMsr(MSR_IA32_VMX_MISC);
    395             g_HmR0.vmx.Msrs.u64Cr0Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
    396             g_HmR0.vmx.Msrs.u64Cr0Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
    397             g_HmR0.vmx.Msrs.u64Cr4Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
    398             g_HmR0.vmx.Msrs.u64Cr4Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    399             g_HmR0.vmx.Msrs.u64VmcsEnum     = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
    400             if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
    401             {
    402                 g_HmR0.vmx.Msrs.TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
    403                 g_HmR0.vmx.Msrs.TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
    404                 g_HmR0.vmx.Msrs.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
    405                 g_HmR0.vmx.Msrs.TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
    406             }
    407 
    408             /* VPID 16 bits ASID. */
    409             g_HmR0.uMaxAsid = 0x10000; /* exclusive */
    410 
    411             if (g_HmR0.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    412             {
    413                 g_HmR0.vmx.Msrs.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
    414                 if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
    415                     g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
    416 
    417                 if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
    418                     g_HmR0.vmx.Msrs.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
    419             }
    420 
    421             if (!g_HmR0.vmx.fUsingSUPR0EnableVTx)
    422             {
    423                 /*
    424                  * Enter root mode
    425                  */
    426                 RTR0MEMOBJ hScatchMemObj;
    427                 rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, false /* fExecutable */);
    428                 if (RT_FAILURE(rc))
    429                 {
    430                     LogRel(("hmR0InitIntel: RTR0MemObjAllocCont(,PAGE_SIZE,false) -> %Rrc\n", rc));
    431                     return rc;
    432                 }
    433 
    434                 void      *pvScatchPage      = RTR0MemObjAddress(hScatchMemObj);
    435                 RTHCPHYS   HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
    436                 ASMMemZeroPage(pvScatchPage);
    437 
    438                 /* Set revision dword at the beginning of the structure. */
    439                 *(uint32_t *)pvScatchPage = RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
    440 
    441                 /* Make sure we don't get rescheduled to another cpu during this probe. */
    442                 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    443 
    444                 /*
    445                  * Check CR4.VMXE.
    446                  */
    447                 g_HmR0.vmx.u64HostCr4 = ASMGetCR4();
    448                 if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE))
    449                 {
    450                     /* In theory this bit could be cleared behind our back. Which would cause #UD
    451                        faults when we try to execute the VMX instructions... */
    452                     ASMSetCR4(g_HmR0.vmx.u64HostCr4 | X86_CR4_VMXE);
    453                 }
    454 
    455                 /*
    456                  * The only way of checking if we're in VMX root mode or not is to try and enter it.
    457                  * There is no instruction or control bit that tells us if we're in VMX root mode.
    458                  * Therefore, try and enter VMX root mode here.
    459                  */
    460                 rc = VMXEnable(HCPhysScratchPage);
    461                 if (RT_SUCCESS(rc))
    462                 {
    463                     g_HmR0.vmx.fSupported = true;
    464                     VMXDisable();
    465                 }
    466                 else
    467                 {
    468                     /*
    469                      * KVM leaves the CPU in VMX root mode. Not only is  this not allowed,
    470                      * it will crash the host when we enter raw mode, because:
    471                      *
    472                      *   (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
    473                      *       this bit), and
    474                      *   (b) turning off paging causes a #GP  (unavoidable when switching
    475                      *       from long to 32 bits mode or 32 bits to PAE).
    476                      *
    477                      * They should fix their code, but until they do we simply refuse to run.
    478                      */
    479                     g_HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE;
    480                     Assert(g_HmR0.vmx.fSupported == false);
    481                 }
    482 
    483                 /*
    484                  * Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it was not
    485                  * set before (some software could incorrectly think it is in VMX mode).
    486                  */
    487                 ASMSetCR4(g_HmR0.vmx.u64HostCr4);
    488                 ASMSetFlags(fEFlags);
    489 
    490                 RTR0MemObjFree(hScatchMemObj, false);
    491             }
    492 
    493             if (g_HmR0.vmx.fSupported)
    494             {
    495                 rc = VMXR0GlobalInit();
    496                 if (RT_FAILURE(rc))
    497                     g_HmR0.rcInit = rc;
    498 
    499                 /*
    500                  * Install the VT-x methods.
    501                  */
    502                 g_HmR0.pfnEnterSession      = VMXR0Enter;
    503                 g_HmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;
    504                 g_HmR0.pfnExportHostState   = VMXR0ExportHostState;
    505                 g_HmR0.pfnRunGuestCode      = VMXR0RunGuestCode;
    506                 g_HmR0.pfnEnableCpu         = VMXR0EnableCpu;
    507                 g_HmR0.pfnDisableCpu        = VMXR0DisableCpu;
    508                 g_HmR0.pfnInitVM            = VMXR0InitVM;
    509                 g_HmR0.pfnTermVM            = VMXR0TermVM;
    510                 g_HmR0.pfnSetupVM           = VMXR0SetupVM;
    511 
    512                 /*
    513                  * Check for the VMX-Preemption Timer and adjust for the "VMX-Preemption
    514                  * Timer Does Not Count Down at the Rate Specified" erratum.
    515                  */
    516                 if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER)
    517                 {
    518                     g_HmR0.vmx.fUsePreemptTimer   = true;
    519                     g_HmR0.vmx.cPreemptTimerShift = RT_BF_GET(g_HmR0.vmx.Msrs.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
    520                     if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
    521                         g_HmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
    522                 }
    523             }
    524         }
     515    }
    525516#ifdef LOG_ENABLED
    526         else
    527             SUPR0Printf("hmR0InitIntelCpu failed with rc=%Rrc\n", g_HmR0.rcInit);
     517    else
     518        SUPR0Printf("hmR0InitIntelCpu failed with rc=%Rrc\n", g_HmR0.rcInit);
    528519#endif
     520    return VINF_SUCCESS;
     521}
     522
     523
     524/**
     525 * AMD-specific initialization code.
     526 *
     527 * @returns VBox status code (will only fail if out of memory).
     528 */
     529static int hmR0InitAmd(void)
     530{
     531    /* Call the global AMD-V initialization routine (should only fail in out-of-memory situations). */
     532    int rc = SVMR0GlobalInit();
     533    if (RT_FAILURE(rc))
     534    {
     535        g_HmR0.rcInit = rc;
     536        return rc;
     537    }
     538
     539    /*
     540     * Install the AMD-V methods.
     541     */
     542    g_HmR0.pfnEnterSession      = SVMR0Enter;
     543    g_HmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;
     544    g_HmR0.pfnExportHostState   = SVMR0ExportHostState;
     545    g_HmR0.pfnRunGuestCode      = SVMR0RunGuestCode;
     546    g_HmR0.pfnEnableCpu         = SVMR0EnableCpu;
     547    g_HmR0.pfnDisableCpu        = SVMR0DisableCpu;
     548    g_HmR0.pfnInitVM            = SVMR0InitVM;
     549    g_HmR0.pfnTermVM            = SVMR0TermVM;
     550    g_HmR0.pfnSetupVM           = SVMR0SetupVM;
     551
     552    /* Query AMD features. */
     553    uint32_t u32Dummy;
     554    ASMCpuId(0x8000000a, &g_HmR0.svm.u32Rev, &g_HmR0.uMaxAsid, &u32Dummy, &g_HmR0.svm.u32Features);
     555
     556    /*
     557     * We need to check if AMD-V has been properly initialized on all CPUs.
     558     * Some BIOSes might do a poor job.
     559     */
     560    HMR0FIRSTRC FirstRc;
     561    hmR0FirstRcInit(&FirstRc);
     562    rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
     563    AssertRC(rc);
     564    if (RT_SUCCESS(rc))
     565        rc = hmR0FirstRcGetStatus(&FirstRc);
     566#ifndef DEBUG_bird
     567    AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
     568              ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
     569#endif
     570    if (RT_SUCCESS(rc))
     571    {
     572        /* Read the HWCR MSR for diagnostics. */
     573        g_HmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
     574        g_HmR0.svm.fSupported = true;
    529575    }
    530576    else
    531         g_HmR0.rcInit = VERR_VMX_NO_VMX;
    532     return VINF_SUCCESS;
    533 }
    534 
    535 
    536 /**
    537  * AMD-specific initialization code.
    538  *
    539  * @returns VBox status code.
    540  * @param   uFeatEdx        Standard cpuid:1 feature EDX leaf.
    541  * @param   uExtFeatEcx     Extended cpuid:0x80000001 feature ECX leaf.
    542  * @param   uMaxExtLeaf     Extended cpuid:0x80000000 feature maximum valid leaf.
    543  */
    544 static int hmR0InitAmd(uint32_t uFeatEdx, uint32_t uExtFeatEcx, uint32_t uMaxExtLeaf)
    545 {
    546     /*
    547      * Read all SVM MSRs if SVM is available.
    548      * We also require all SVM-enabled CPUs to support rdmsr/wrmsr and fxsave/fxrstor.
    549      */
    550     int rc;
    551     if (   (uExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
    552         && (uFeatEdx    & X86_CPUID_FEATURE_EDX_MSR)
    553         && (uFeatEdx    & X86_CPUID_FEATURE_EDX_FXSR)
    554         && ASMIsValidExtRange(uMaxExtLeaf)
    555         && uMaxExtLeaf >= 0x8000000a)
    556     {
    557         /* Call the global AMD-V initialization routine. */
    558         rc = SVMR0GlobalInit();
    559         if (RT_FAILURE(rc))
    560         {
    561             g_HmR0.rcInit = rc;
    562             return rc;
    563         }
    564 
    565         /*
    566          * Install the AMD-V methods.
    567          */
    568         g_HmR0.pfnEnterSession      = SVMR0Enter;
    569         g_HmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;
    570         g_HmR0.pfnExportHostState   = SVMR0ExportHostState;
    571         g_HmR0.pfnRunGuestCode      = SVMR0RunGuestCode;
    572         g_HmR0.pfnEnableCpu         = SVMR0EnableCpu;
    573         g_HmR0.pfnDisableCpu        = SVMR0DisableCpu;
    574         g_HmR0.pfnInitVM            = SVMR0InitVM;
    575         g_HmR0.pfnTermVM            = SVMR0TermVM;
    576         g_HmR0.pfnSetupVM           = SVMR0SetupVM;
    577 
    578         /* Query AMD features. */
    579         uint32_t u32Dummy;
    580         ASMCpuId(0x8000000a, &g_HmR0.svm.u32Rev, &g_HmR0.uMaxAsid, &u32Dummy, &g_HmR0.svm.u32Features);
    581 
    582         /*
    583          * We need to check if AMD-V has been properly initialized on all CPUs.
    584          * Some BIOSes might do a poor job.
    585          */
    586         HMR0FIRSTRC FirstRc;
    587         hmR0FirstRcInit(&FirstRc);
    588         rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
    589         AssertRC(rc);
    590         if (RT_SUCCESS(rc))
    591             rc = hmR0FirstRcGetStatus(&FirstRc);
    592 #ifndef DEBUG_bird
    593         AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
    594                   ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
    595 #endif
    596         if (RT_SUCCESS(rc))
    597         {
    598             /* Read the HWCR MSR for diagnostics. */
    599             g_HmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
    600             g_HmR0.svm.fSupported = true;
    601         }
    602         else
    603         {
    604             g_HmR0.rcInit = rc;
    605             if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE)
    606                 rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */
    607         }
    608     }
    609     else
    610     {
    611         /* Don't fail if AMD-V is not supported. See @bugref{6785}. */
    612         rc = VINF_SUCCESS;
    613         g_HmR0.rcInit = VERR_SVM_NO_SVM;
     577    {
     578        g_HmR0.rcInit = rc;
     579        if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE)
     580            rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */
    614581    }
    615582    return rc;
     
    667634
    668635    /*
    669      * Check for VT-x and AMD-V capabilities.
     636     * Check for VT-x or AMD-V support.
     637     * Return failure only in out-of-memory situations.
    670638     */
    671     int rc;
    672     if (ASMHasCpuId())
    673     {
    674         /* Standard features. */
    675         uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
    676         ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
    677         if (ASMIsValidStdRange(uMaxLeaf))
     639    uint32_t fCaps = 0;
     640    int rc = SUPR0GetVTSupport(&fCaps);
     641    if (RT_SUCCESS(rc))
     642    {
     643        if (fCaps & SUPVTCAPS_VT_X)
    678644        {
    679             uint32_t uFeatEcx, uFeatEdx, uDummy;
    680             ASMCpuId(1, &uDummy, &uDummy, &uFeatEcx, &uFeatEdx);
    681 
    682             /* Go to CPU specific initialization code. */
    683             if (   ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
    684                 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
    685             {
    686                 rc = hmR0InitIntel(uFeatEcx, uFeatEdx);
    687                 if (RT_FAILURE(rc))
    688                     return rc;
    689             }
    690             else if (ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
    691             {
    692                 /* Query extended features for SVM capability. */
    693                 uint32_t       uExtFeatEcx;
    694                 uint32_t const uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
    695                 if (ASMIsValidExtRange(uMaxExtLeaf))
    696                     ASMCpuId(0x80000001, &uDummy, &uDummy, &uExtFeatEcx, &uDummy);
    697                 else
    698                     uExtFeatEcx = 0;
    699 
    700                 rc = hmR0InitAmd(uFeatEdx, uExtFeatEcx, uMaxExtLeaf);
    701                 if (RT_FAILURE(rc))
    702                     return rc;
    703             }
    704             else
    705                 g_HmR0.rcInit = VERR_HM_UNKNOWN_CPU;
     645            rc = hmR0InitIntel();
     646            if (RT_FAILURE(rc))
     647                return rc;
    706648        }
    707649        else
    708             g_HmR0.rcInit = VERR_HM_UNKNOWN_CPU;
     650        {
     651            Assert(fCaps & SUPVTCAPS_AMD_V);
     652            rc = hmR0InitAmd();
     653            if (RT_FAILURE(rc))
     654                return rc;
     655        }
    709656    }
    710657    else
    711         g_HmR0.rcInit = VERR_HM_NO_CPUID;
     658        g_HmR0.rcInit = VERR_UNSUPPORTED_CPU;
    712659
    713660    /*
     
    724671    }
    725672
    726     /* We return success here because module init shall not fail if HM
    727        fails to initialize. */
     673    /* We return success here because module init shall not fail if HM fails to initialize. */
    728674    return VINF_SUCCESS;
    729675}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette