VirtualBox

Changeset 31352 in vbox


Ignore:
Timestamp:
Aug 4, 2010 9:45:01 AM (14 years ago)
Author:
vboxsync
Message:

Don't assume a thread can't migrate to another CPU. The code that requires this must (and already does) disable preemption.

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r30329 r31352  
    6060        return 0;
    6161
    62     /* RTMpCpuId had better be cheap. */
    63     RTCPUID idHostCpu = RTMpCpuId();
     62    /* RTThreadGetNativeSelf had better be cheap. */
     63    RTNATIVETHREAD hThread = RTThreadNativeSelf();
    6464
    6565    /** @todo optimize for large number of VCPUs when that becomes more common. */
     
    6868        PVMCPU pVCpu = &pVM->aCpus[idCpu];
    6969
    70         if (pVCpu->idHostCpu == idHostCpu)
     70        if (pVCpu->hNativeThread == hThread)
    7171            return pVCpu->idCpu;
    7272    }
     
    9999        return &pVM->aCpus[0];
    100100
    101     /* RTMpCpuId had better be cheap. */
    102     RTCPUID idHostCpu = RTMpCpuId();
     101    /* RTThreadGetNativeSelf had better be cheap. */
     102    RTNATIVETHREAD hThread = RTThreadNativeSelf();
    103103
    104104    /** @todo optimize for large number of VCPUs when that becomes more common. */
     
    107107        PVMCPU pVCpu = &pVM->aCpus[idCpu];
    108108
    109         if (pVCpu->idHostCpu == idHostCpu)
     109        if (pVCpu->hNativeThread == hThread)
    110110            return pVCpu;
    111111    }
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r31351 r31352  
    965965            PVMCPU pVCpu = &pVM->aCpus[idCpu];
    966966
    967             /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account).
    968              * Also grab the fast mutex before disabling preemption.
    969              */
     967            /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
    970968            int rc = GMMR0CheckSharedModulesStart(pVM);
    971969            if (rc == VINF_SUCCESS)
    972970            {
    973 # ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    974                 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    975                 RTThreadPreemptDisable(&PreemptState);
    976 # elif !defined(RT_OS_WINDOWS)
    977                 RTCCUINTREG uFlags = ASMIntDisableFlags();
    978 # else
    979                 AssertCompileFailed();
    980 # endif
    981                 /* Select a valid VCPU context. */
    982                 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
    983 
    984971                rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
    985 
    986                 /* Clear the VCPU context. */
    987                 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
    988 # ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    989                 RTThreadPreemptRestore(&PreemptState);
    990 # elif !defined(RT_OS_WINDOWS)
    991                 ASMSetFlags(uFlags);
    992 # else
    993                 AssertCompileFailed();
    994 # endif
    995 
     972                Assert(     rc == VINF_SUCCESS
     973                       ||   (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
    996974                GMMR0CheckSharedModulesEnd(pVM);
    997975            }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette