VirtualBox

Changeset 20008 in vbox


Ignore:
Timestamp:
May 25, 2009 6:34:43 PM (15 years ago)
Author:
vboxsync
Message:

PDMCritSect: rewrite, ring-0 unlocking not yet enabled.

Location:
trunk
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pdmcritsect.h

    r19595 r20008  
    6565VMMDECL(void)       PDMCritSectLeave(PPDMCRITSECT pCritSect);
    6666VMMDECL(bool)       PDMCritSectIsOwner(PCPDMCRITSECT pCritSect);
    67 VMMDECL(bool)       PDMCritSectIsLocked(PCPDMCRITSECT pCritSect);
    6867VMMDECL(bool)       PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu);
     68VMMDECL(bool)       PDMCritSectIsOwned(PCPDMCRITSECT pCritSect);
    6969VMMDECL(bool)       PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect);
    7070VMMDECL(uint32_t)   PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect);
     
    7272VMMR3DECL(int)      PDMR3CritSectDelete(PPDMCRITSECT pCritSect);
    7373VMMDECL(int)        PDMR3CritSectTerm(PVM pVM);
    74 VMMR3DECL(void)     PDMR3CritSectFF(PVMCPU pVCpu);
     74VMMDECL(void)       PDMCritSectFF(PVMCPU pVCpu);
    7575VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNames);
    7676
  • trunk/include/iprt/critsect.h

    r19589 r20008  
    235235RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect);
    236236
     237/**
     238 * Checks the caller is the owner of the critical section.
     239 *
     240 * @returns true if owner.
     241 * @returns false if not owner.
     242 * @param   pCritSect   The critical section.
     243 */
     244DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect)
     245{
     246    return pCritSect->NativeThreadOwner == RTThreadNativeSelf();
     247}
     248
     249#endif /* IN_RING3 */
     250
     251/**
     252 * Checks the section is owned by anyone.
     253 *
     254 * @returns true if owned.
     255 * @returns false if not owned.
     256 * @param   pCritSect   The critical section.
     257 */
     258DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect)
     259{
     260    return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD;
     261}
     262
     263/**
     264 * Gets the thread id of the critical section owner.
     265 *
     266 * @returns Thread id of the owner thread if owned.
     267 * @returns NIL_RTNATIVETHREAD is not owned.
     268 * @param   pCritSect   The critical section.
     269 */
     270DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect)
     271{
     272    return pCritSect->NativeThreadOwner;
     273}
    237274
    238275/**
     
    248285}
    249286
    250 
    251 /**
    252  * Checks the caller is the owner of the critical section.
    253  *
    254  * @returns true if owner.
    255  * @returns false if not owner.
    256  * @param   pCritSect   The critical section.
    257  */
    258 DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect)
    259 {
    260     return pCritSect->NativeThreadOwner == RTThreadNativeSelf();
    261 }
    262 
    263 
    264 /**
    265  * Checks the section is owned by anyone.
    266  *
    267  * @returns true if owned.
    268  * @returns false if not owned.
    269  * @param   pCritSect   The critical section.
    270  */
    271 DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect)
    272 {
    273     return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD;
    274 }
    275 
    276 
    277 /**
    278  * Gets the thread id of the critical section owner.
    279  *
    280  * @returns Thread id of the owner thread if owned.
    281  * @returns NIL_RTNATIVETHREAD is not owned.
    282  * @param   pCritSect   The critical section.
    283  */
    284 DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect)
    285 {
    286     return pCritSect->NativeThreadOwner;
    287 }
    288 
    289 #endif /* IN_RING3 */
    290 
    291287/**
    292288 * Gets the recursion depth.
  • trunk/include/iprt/thread.h

    r19906 r20008  
    4747 * @{
    4848 */
     49
     50/**
     51 * The thread state.
     52 */
     53typedef enum RTTHREADSTATE
     54{
     55    /** The usual invalid 0 value. */
     56    RTTHREADSTATE_INVALID = 0,
     57    /** The thread is being initialized. */
     58    RTTHREADSTATE_INITIALIZING,
     59    /** The thread has terminated */
     60    RTTHREADSTATE_TERMINATED,
     61    /** Probably running. */
     62    RTTHREADSTATE_RUNNING,
     63    /** Waiting on a critical section. */
     64    RTTHREADSTATE_CRITSECT,
     65    /** Waiting on a mutex. */
     66    RTTHREADSTATE_MUTEX,
     67    /** Waiting on a event semaphore. */
     68    RTTHREADSTATE_EVENT,
     69    /** Waiting on a event multiple wakeup semaphore. */
     70    RTTHREADSTATE_EVENTMULTI,
     71    /** Waiting on a read write semaphore, read (shared) access. */
     72    RTTHREADSTATE_RW_READ,
     73    /** Waiting on a read write semaphore, write (exclusive) access. */
     74    RTTHREADSTATE_RW_WRITE,
     75    /** The thread is sleeping. */
     76    RTTHREADSTATE_SLEEP,
     77    /** The usual 32-bit size hack. */
     78    RTTHREADSTATE_32BIT_HACK = 0x7fffffff
     79} RTTHREADSTATE;
     80
     81/** Checks if a thread state indicates that the thread is sleeping. */
     82#define RTTHREAD_IS_SLEEPING(enmState) (    (enmState) == RTTHREADSTATE_CRITSECT \
     83                                        ||  (enmState) == RTTHREADSTATE_MUTEX \
     84                                        ||  (enmState) == RTTHREADSTATE_EVENT \
     85                                        ||  (enmState) == RTTHREADSTATE_EVENTMULTI \
     86                                        ||  (enmState) == RTTHREADSTATE_RW_READ \
     87                                        ||  (enmState) == RTTHREADSTATE_RW_WRITE \
     88                                        ||  (enmState) == RTTHREADSTATE_SLEEP \
     89                                       )
    4990
    5091/**
     
    534575RTDECL(void) RTThreadReadLockDec(RTTHREAD Thread);
    535576
     577/**
     578 * Unblocks a thread.
     579 *
     580 * This function is paired with rtThreadBlocking.
     581 *
     582 * @param   hThread     The current thread.
     583 * @param   enmCurState The current state, used to check for nested blocking.
     584 *                      The new state will be running.
     585 */
     586RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState);
     587
     588/**
     589 * Change the thread state to blocking and do deadlock detection.
     590 *
     591 * This is a RT_STRICT method for debugging locks and detecting deadlocks.
     592 *
     593 * @param   hThread     The current thread.
     594 * @param   enmState    The sleep state.
     595 * @param   u64Block    The block data. A pointer or handle.
     596 * @param   pszFile     Where we are blocking.
     597 * @param   uLine       Where we are blocking.
     598 * @param   uId         Where we are blocking.
     599 */
     600RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block,
     601                              const char *pszFile, unsigned uLine, RTUINTPTR uId);
     602
     603
    536604
    537605/** @name Thread Local Storage
  • trunk/src/VBox/Runtime/common/misc/thread.cpp

    r15880 r20008  
    14061406 * This is a RT_STRICT method for debugging locks and detecting deadlocks.
    14071407 *
    1408  * @param   pThread     This thread.
     1408 * @param   hThread     The current thread.
    14091409 * @param   enmState    The sleep state.
    14101410 * @param   u64Block    The block data. A pointer or handle.
     
    14131413 * @param   uId         Where we are blocking.
    14141414 */
    1415 void rtThreadBlocking(PRTTHREADINT pThread, RTTHREADSTATE enmState, uint64_t u64Block,
    1416                      const char *pszFile, unsigned uLine, RTUINTPTR uId)
    1417 {
     1415RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block,
     1416                              const char *pszFile, unsigned uLine, RTUINTPTR uId)
     1417{
     1418    PRTTHREADINT pThread = hThread;
    14181419    Assert(RTTHREAD_IS_SLEEPING(enmState));
    14191420    if (pThread && pThread->enmState == RTTHREADSTATE_RUNNING)
     
    15021503 * This function is paired with rtThreadBlocking.
    15031504 *
    1504  * @param   pThread     The current thread.
     1505 * @param   hThread     The current thread.
    15051506 * @param   enmCurState The current state, used to check for nested blocking.
    15061507 *                      The new state will be running.
    15071508 */
    1508 void rtThreadUnblocked(PRTTHREADINT pThread, RTTHREADSTATE enmCurState)
    1509 {
    1510     if (pThread && pThread->enmState == enmCurState)
    1511         ASMAtomicWriteSize(&pThread->enmState, RTTHREADSTATE_RUNNING);
     1509RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState)
     1510{
     1511    if (hThread && hThread->enmState == enmCurState)
     1512        ASMAtomicWriteSize(&hThread->enmState, RTTHREADSTATE_RUNNING);
    15121513}
    15131514
  • trunk/src/VBox/Runtime/generic/critsect-generic.cpp

    r19989 r20008  
    251251     */
    252252    pCritSect->cNestings = 1;
    253     ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
     253    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    254254#ifdef RTCRITSECT_STRICT
    255255    pCritSect->Strict.pszEnterFile = pszFile;
    256256    pCritSect->Strict.u32EnterLine = uLine;
    257257    pCritSect->Strict.uEnterId     = uId;
    258     ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
     258    ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    259259#endif
    260260
     
    319319        {
    320320#ifdef RTCRITSECT_STRICT
    321             rtThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
     321            RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
    322322#endif
    323323            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
    324324#ifdef RTCRITSECT_STRICT
    325             rtThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
     325            RTThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
    326326#endif
    327327            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
     
    338338     */
    339339    pCritSect->cNestings = 1;
    340     ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
     340    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    341341#ifdef RTCRITSECT_STRICT
    342342    pCritSect->Strict.pszEnterFile = pszFile;
    343343    pCritSect->Strict.u32EnterLine = uLine;
    344344    pCritSect->Strict.uEnterId     = uId;
    345     ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
     345    ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    346346    RTThreadWriteLockInc(ThreadSelf);
    347347#endif
     
    383383        if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */
    384384            RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner);
    385         ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
    386 #endif
    387         ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
     385        ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
     386#endif
     387        ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
    388388        if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
    389389        {
  • trunk/src/VBox/Runtime/include/internal/thread.h

    r8645 r20008  
    4545
    4646
    47 /**
    48  * The thread state.
    49  */
    50 typedef enum RTTHREADSTATE
    51 {
    52     /** The usual invalid 0 value. */
    53     RTTHREADSTATE_INVALID = 0,
    54     /** The thread is being initialized. */
    55     RTTHREADSTATE_INITIALIZING,
    56     /** The thread has terminated */
    57     RTTHREADSTATE_TERMINATED,
    58     /** Probably running. */
    59     RTTHREADSTATE_RUNNING,
    60     /** Waiting on a critical section. */
    61     RTTHREADSTATE_CRITSECT,
    62     /** Waiting on a mutex. */
    63     RTTHREADSTATE_MUTEX,
    64     /** Waiting on a event semaphore. */
    65     RTTHREADSTATE_EVENT,
    66     /** Waiting on a event multiple wakeup semaphore. */
    67     RTTHREADSTATE_EVENTMULTI,
    68     /** Waiting on a read write semaphore, read (shared) access. */
    69     RTTHREADSTATE_RW_READ,
    70     /** Waiting on a read write semaphore, write (exclusive) access. */
    71     RTTHREADSTATE_RW_WRITE,
    72     /** The thread is sleeping. */
    73     RTTHREADSTATE_SLEEP,
    74     /** The usual 32-bit size hack. */
    75     RTTHREADSTATE_32BIT_HACK = 0x7fffffff
    76 } RTTHREADSTATE;
    77 
    78 
    79 /** Checks if a thread state indicates that the thread is sleeping. */
    80 #define RTTHREAD_IS_SLEEPING(enmState) (    (enmState) == RTTHREADSTATE_CRITSECT \
    81                                         ||  (enmState) == RTTHREADSTATE_MUTEX \
    82                                         ||  (enmState) == RTTHREADSTATE_EVENT \
    83                                         ||  (enmState) == RTTHREADSTATE_EVENTMULTI \
    84                                         ||  (enmState) == RTTHREADSTATE_RW_READ \
    85                                         ||  (enmState) == RTTHREADSTATE_RW_WRITE \
    86                                         ||  (enmState) == RTTHREADSTATE_SLEEP \
    87                                        )
    8847
    8948/** Max thread name length. */
  • trunk/src/VBox/VMM/EM.cpp

    r19873 r20008  
    32983298{
    32993299    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
    3300         PDMR3CritSectFF(pVCpu);
     3300        PDMCritSectFF(pVCpu);
    33013301
    33023302    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
  • trunk/src/VBox/VMM/PDM.cpp

    r19793 r20008  
    335335     * Initialize sub compontents.
    336336     */
    337     int rc = pdmR3CritSectInit(pVM);
     337    int rc = RTCritSectInit(&pVM->pdm.s.MiscCritSect);
    338338    if (RT_SUCCESS(rc))
    339     {
     339        rc = pdmR3CritSectInit(pVM);
     340    if (RT_SUCCESS(rc))
    340341        rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.CritSect, "PDM");
     342    if (RT_SUCCESS(rc))
     343        rc = pdmR3LdrInitU(pVM->pUVM);
     344    if (RT_SUCCESS(rc))
     345        rc = pdmR3DrvInit(pVM);
     346    if (RT_SUCCESS(rc))
     347        rc = pdmR3DevInit(pVM);
     348#ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
     349    if (RT_SUCCESS(rc))
     350        rc = pdmR3AsyncCompletionInit(pVM);
     351#endif
     352    if (RT_SUCCESS(rc))
     353    {
     354        /*
     355         * Register the saved state data unit.
     356         */
     357        rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128,
     358                                   NULL, pdmR3Save, NULL,
     359                                   pdmR3LoadPrep, pdmR3Load, NULL);
    341360        if (RT_SUCCESS(rc))
    342             rc = pdmR3LdrInitU(pVM->pUVM);
    343         if (RT_SUCCESS(rc))
    344         {
    345             rc = pdmR3DrvInit(pVM);
    346             if (RT_SUCCESS(rc))
    347             {
    348                 rc = pdmR3DevInit(pVM);
    349                 if (RT_SUCCESS(rc))
    350                 {
    351 #ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
    352                     rc = pdmR3AsyncCompletionInit(pVM);
    353                     if (RT_SUCCESS(rc))
    354 #endif
    355                     {
    356                         /*
    357                          * Register the saved state data unit.
    358                          */
    359                         rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128,
    360                                                    NULL, pdmR3Save, NULL,
    361                                                    pdmR3LoadPrep, pdmR3Load, NULL);
    362                         if (RT_SUCCESS(rc))
    363                         {
    364                             LogFlow(("PDM: Successfully initialized\n"));
    365                             return rc;
    366                         }
    367 
    368                     }
    369                 }
    370             }
     361        {
     362            LogFlow(("PDM: Successfully initialized\n"));
     363            return rc;
    371364        }
    372365    }
     
    600593     */
    601594    PDMR3CritSectDelete(&pVM->pdm.s.CritSect);
     595    /* The MiscCritSect is deleted by PDMR3CritSectTerm. */
    602596
    603597    LogFlow(("PDMR3Term: returns %Rrc\n", VINF_SUCCESS));
  • trunk/src/VBox/VMM/PDMCritSect.cpp

    r19991 r20008  
    2020 */
    2121
    22 //#define PDM_WITH_R3R0_CRIT_SECT
    2322
    2423/*******************************************************************************
     
    3332#include <VBox/err.h>
    3433#include <VBox/log.h>
    35 #ifdef PDM_WITH_R3R0_CRIT_SECT
    36 # include <VBox/sup.h>
    37 #endif
     34#include <VBox/sup.h>
    3835#include <iprt/asm.h>
    3936#include <iprt/assert.h>
     
    7269void pdmR3CritSectRelocate(PVM pVM)
    7370{
     71    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
    7472    for (PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
    7573         pCur;
    7674         pCur = pCur->pNext)
    7775        pCur->pVMRC = pVM->pVMRC;
     76    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
    7877}
    7978
     
    9291{
    9392    int rc = VINF_SUCCESS;
     93    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
    9494    while (pVM->pdm.s.pCritSects)
    9595    {
     
    9999            rc = rc2;
    100100    }
     101    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
     102    RTCritSectDelete(&pVM->pdm.s.MiscCritSect);
    101103    return rc;
    102104}
     
    117119    VM_ASSERT_EMT(pVM);
    118120
    119 #ifdef PDM_WITH_R3R0_CRIT_SECT
    120121    /*
    121122     * Allocate the semaphore.
     
    123124    AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.EventSem));
    124125    int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.EventSem);
    125 #else
    126     int rc = RTCritSectInit(&pCritSect->Core);
    127 #endif
    128126    if (RT_SUCCESS(rc))
    129127    {
    130 #ifdef PDM_WITH_R3R0_CRIT_SECT
    131128        /*
    132129         * Initialize the structure (first bit is c&p from RTCritSectInitEx).
     
    141138        pCritSect->Core.Strict.u32EnterLine  = 0;
    142139        pCritSect->Core.Strict.uEnterId      = 0;
    143 #endif
    144140        pCritSect->pVMR3                     = pVM;
    145141        pCritSect->pVMR0                     = pVM->pVMR0;
     
    172168 * @param   pCritSect       Pointer to the critical section.
    173169 * @param   pszName         The name of the critical section (for statistics).
     170 * @thread  EMT(0)
    174171 */
    175172VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, const char *pszName)
     
    205202 *
    206203 * @returns Return code from RTCritSectDelete.
     204 *
    207205 * @param   pVM         The VM handle.
    208206 * @param   pCritSect   The critical section.
    209207 * @param   pPrev       The previous critical section in the list.
    210208 * @param   fFinal      Set if this is the final call and statistics shouldn't be deregistered.
     209 *
     210 * @remarks Caller must've entered the MiscCritSect.
    211211 */
    212212static int pdmR3CritSectDeleteOne(PVM pVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal)
    213213{
    214 #ifdef PDM_WITH_R3R0_CRIT_SECT
    215214    /*
    216215     * Assert free waiters and so on (c&p from RTCritSectDelete).
     
    220219    Assert(pCritSect->Core.cLockers == -1);
    221220    Assert(pCritSect->Core.NativeThreadOwner == NIL_RTNATIVETHREAD);
    222 #endif
     221    Assert(RTCritSectIsOwner(&pVM->pdm.s.MiscCritSect));
    223222
    224223    /*
     
    234233     * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
    235234     */
    236 #ifdef PDM_WITH_R3R0_CRIT_SECT
    237235    ASMAtomicWriteU32(&pCritSect->Core.u32Magic, 0);
    238236    SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->Core.EventSem;
     
    243241    int rc = SUPSemEventClose(pVM->pSession, hEvent);
    244242    AssertRC(rc);
    245 #endif
    246243    pCritSect->pNext   = NULL;
    247244    pCritSect->pvKey   = NULL;
     
    260257#endif
    261258    }
    262 #ifndef PDM_WITH_R3R0_CRIT_SECT
    263     int rc = RTCritSectDelete(&pCritSect->Core);
    264 #endif
    265259    return rc;
    266260}
     
    282276     * Iterate the list and match key.
    283277     */
    284     int             rc = VINF_SUCCESS;
     278    int             rc    = VINF_SUCCESS;
    285279    PPDMCRITSECTINT pPrev = NULL;
    286     PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
     280    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
     281    PPDMCRITSECTINT pCur  = pVM->pdm.s.pCritSects;
    287282    while (pCur)
    288283    {
     
    299294        pCur = pCur->pNext;
    300295    }
     296    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
    301297    return rc;
    302298}
     
    330326     * Find and unlink it.
    331327     */
    332     PVM             pVM = pCritSect->s.pVMR3;
     328    PVM             pVM   = pCritSect->s.pVMR3;
    333329    AssertReleaseReturn(pVM, VERR_INTERNAL_ERROR);
    334330    PPDMCRITSECTINT pPrev = NULL;
    335     PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects;
     331    RTCritSectEnter(&pVM->pdm.s.MiscCritSect);
     332    PPDMCRITSECTINT pCur  = pVM->pdm.s.pCritSects;
    336333    while (pCur)
    337334    {
    338335        if (pCur == &pCritSect->s)
    339             return pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */);
     336        {
     337            int rc = pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */);
     338            RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
     339            return rc;
     340        }
    340341
    341342        /* next */
     
    343344        pCur = pCur->pNext;
    344345    }
     346    RTCritSectLeave(&pVM->pdm.s.MiscCritSect);
    345347    AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect));
    346348    return VERR_INTERNAL_ERROR;
    347 }
    348 
    349 
    350 /**
    351  * Process the critical sections queued for ring-3 'leave'.
    352  *
    353  * @param   pVCpu         The VMCPU handle.
    354  */
    355 VMMR3DECL(void) PDMR3CritSectFF(PVMCPU pVCpu)
    356 {
    357     Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
    358 
    359     const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
    360     for (RTUINT i = 0; i < c; i++)
    361     {
    362         PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
    363 #ifdef PDM_WITH_R3R0_CRIT_SECT
    364         int rc = pdmCritSectLeave(pCritSect);
    365 #else
    366         int rc = RTCritSectLeave(&pCritSect->s.Core);
    367 #endif
    368         LogFlow(("PDMR3CritSectFF: %p - %Rrc\n", pCritSect, rc));
    369         AssertRC(rc);
    370     }
    371 
    372     pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
    373     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
    374349}
    375350
     
    382357 * @returns VERR_NOT_OWNER if we're not the critsect owner.
    383358 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
     359 *
    384360 * @param   pCritSect       The critical section.
    385361 * @param   EventToSignal   The semapore that should be signalled.
  • trunk/src/VBox/VMM/PDMInternal.h

    r20001 r20008  
    4343 * @{
    4444 */
     45
     46/** @def PDM_WITH_R3R0_CRIT_SECT
     47 * Enables or disabled ring-3/ring-0 critical sections. */
     48#if defined(DOXYGEN_RUNNING) || 1
     49# define PDM_WITH_R3R0_CRIT_SECT
     50#endif
     51
    4552
    4653/*******************************************************************************
     
    227234} PDMCRITSECTINT;
    228235typedef PDMCRITSECTINT *PPDMCRITSECTINT;
     236
     237/** Indicates that the critical section is queued for unlock.
     238 * PDMCritSectIsOwner and PDMCritSectIsOwned optimizations. */
     239#define PDMCRITSECT_FLAGS_PENDING_UNLOCK    RT_BIT_32(17)
    229240
    230241
     
    805816{
    806817    /** The number of entries in the apQueuedCritSectsLeaves table that's currnetly in use. */
    807     RTUINT                          cQueuedCritSectLeaves;
    808     RTUINT                          uPadding0; /**< Alignment padding.*/
     818    uint32_t                        cQueuedCritSectLeaves;
     819    uint32_t                        uPadding0; /**< Alignment padding.*/
    809820    /** Critical sections queued in RC/R0 because of contention preventing leave to complete. (R3 Ptrs)
    810821     * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */
     
    907918     * the PIC, APIC, IOAPIC and PCI devices pluss some PDM functions. */
    908919    PDMCRITSECT                     CritSect;
     920    /** The PDM miscellancous lock.
     921     * This is used to protect things like critsect init/delete that formerly was
     922     * serialized by there only being one EMT.
     923     */
     924    RTCRITSECT                      MiscCritSect;
    909925
    910926    /** Number of times a critical section leave requesed needed to be queued for ring-3 execution. */
  • trunk/src/VBox/VMM/VMM.cpp

    r19732 r20008  
    15501550static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu)
    15511551{
    1552     /* We must also check for pending releases or else we can deadlock when acquiring a new lock here.
    1553      * On return we go straight back to R0/GC.
     1552    /*
     1553     * We must also check for pending critsect exits or else we can deadlock
     1554     * when entering other critsects here.
    15541555     */
    15551556    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
    1556         PDMR3CritSectFF(pVCpu);
     1557        PDMCritSectFF(pVCpu);
    15571558
    15581559    switch (pVCpu->vmm.s.enmCallHostOperation)
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r19740 r20008  
    4040
    4141
     42/*******************************************************************************
     43*   Defined Constants And Macros                                               *
     44*******************************************************************************/
     45/** The number loops to spin for in ring-3. */
     46#define PDMCRITSECT_SPIN_COUNT_R3       20
     47/** The number loops to spin for in ring-0. */
     48#define PDMCRITSECT_SPIN_COUNT_R0       256
     49/** The number loops to spin for in the raw-mode context. */
     50#define PDMCRITSECT_SPIN_COUNT_RC       256
     51
     52/** @def PDMCRITSECT_STRICT
     53 * Enables/disables PDM critsect strictness like deadlock detection. */
     54#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
     55# define PDMCRITSECT_STRICT
     56#endif
     57
     58
     59/**
     60 * Gets the ring-3 native thread handle of the calling thread.
     61 *
     62 * @returns native thread handle (ring-3).
     63 * @param   pCritSect           The critical section. This is used in R0 and RC.
     64 */
     65DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
     66{
     67#ifdef IN_RING3
     68    NOREF(pCritSect);
     69    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
     70#else
     71    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
     72                    VERR_SEM_DESTROYED);
     73    PVM             pVM         = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
     74    PVMCPU          pVCpu       = VMMGetCpu(pVM);             AssertPtr(pVCpu);
     75    RTNATIVETHREAD  hNativeSelf = pVCpu->hNativeThread;       Assert(hNativeSelf != NIL_RTNATIVETHREAD);
     76#endif
     77    return hNativeSelf;
     78}
     79
     80
     81/**
     82 * Tail code called when we've wont the battle for the lock.
     83 *
     84 * @returns VINF_SUCCESS.
     85 *
     86 * @param   pCritSect       The critical section.
     87 * @param   hNativeSelf     The native handle of this thread.
     88 */
     89DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
     90{
     91    AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
     92    Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
     93
     94    pCritSect->s.Core.cNestings = 1;
     95    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
     96
     97# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
     98    pCritSect->s.Core.Strict.pszEnterFile = NULL;
     99    pCritSect->s.Core.Strict.u32EnterLine = 0;
     100    pCritSect->s.Core.Strict.uEnterId     = 0;
     101    RTTHREAD hSelf = RTThreadSelf();
     102    ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
     103    RTThreadWriteLockInc(hSelf);
     104# endif
     105
     106    STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     107    return VINF_SUCCESS;
     108}
     109
     110
     111#ifdef IN_RING3
     112/**
     113 * Deals with the contended case in ring-3.
     114 *
     115 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
     116 * @param   pCritSect           The critsect.
     117 * @param   hNativeSelf         The native thread handle.
     118 */
     119static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
     120{
     121    /*
     122     * Start waiting.
     123     */
     124    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
     125        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
     126    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
     127
     128    /*
     129     * The wait loop.
     130     */
     131    PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
     132    SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     133# ifdef PDMCRITSECT_STRICT
     134    RTTHREAD        hSelf    = RTThreadSelf();
     135    if (hSelf == NIL_RTTHREAD)
     136        RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
     137# endif
     138    for (;;)
     139    {
     140# ifdef PDMCRITSECT_STRICT
     141        RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
     142# endif
     143        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
     144# ifdef PDMCRITSECT_STRICT
     145        RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
     146# endif
     147        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
     148            return VERR_SEM_DESTROYED;
     149        if (rc == VINF_SUCCESS)
     150            return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
     151        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
     152    }
     153    /* won't get here */
     154}
     155#endif /* IN_RING3 */
     156
     157
    42158/**
    43159 * Enters a PDM critical section.
     
    54170{
    55171    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    56 #ifdef IN_RING3
    57     NOREF(rcBusy);
    58 
    59     STAM_REL_STATS({if (pCritSect->s.Core.cLockers >= 0 && !RTCritSectIsOwner(&pCritSect->s.Core))
    60                         STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); });
    61     int rc = RTCritSectEnter(&pCritSect->s.Core);
    62     STAM_STATS({ if (pCritSect->s.Core.cNestings == 1) STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); });
    63     return rc;
    64 
    65 #else  /* !IN_RING3 */
    66     AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
    67                     VERR_SEM_DESTROYED);
    68     PVM pVM = pCritSect->s.CTX_SUFF(pVM);
    69     Assert(pVM);
    70     PVMCPU pVCpu = VMMGetCpu(pVM);
    71     Assert(pVCpu);
    72 
    73     /*
    74      * Try to take the lock.
    75      */
     172
     173    /*
     174     * If the critical section has already been destroyed, then inform the caller.
     175     */
     176    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
     177
     178    /*
     179     * See if we're lucky.
     180     */
     181    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
     182    /* Not owned ... */
    76183    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
    77     {
    78         pCritSect->s.Core.cNestings = 1;
    79         Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
    80         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
    81         STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     184        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
     185
     186    /* ... or nested. */
     187    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
     188    {
     189        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
     190        pCritSect->s.Core.cNestings++;
     191        pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
    82192        return VINF_SUCCESS;
    83193    }
    84194
    85195    /*
    86      * Nested?
    87      */
    88     if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread)
    89     {
    90         pCritSect->s.Core.cNestings++;
    91         ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
    92         return VINF_SUCCESS;
    93     }
    94 
    95     /*
    96      * Failed.
    97      */
     196     * Spin for a bit without incrementing the counter.
     197     */
     198    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     199     *        cpu systems. */
     200    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
     201    while (cSpinsLeft-- > 0)
     202    {
     203        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
     204            return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
     205        /** @todo need pause/nop instruction here! */
     206    }
     207
     208#ifdef IN_RING3
     209    /*
     210     * Take the slow path.
     211     */
     212    return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
     213#else
     214    /*
     215     * Return busy.
     216     */
     217    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
    98218    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    99     STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
    100219    return rcBusy;
    101 #endif /* !IN_RING3 */
     220#endif
    102221}
    103222
     
    115234VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
    116235{
    117 #ifdef IN_RING3
    118     return RTCritSectTryEnter(&pCritSect->s.Core);
    119 #else   /* !IN_RING3 (same code as PDMCritSectEnter except for the log statement) */
    120     AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
    121                     VERR_SEM_DESTROYED);
    122     PVM pVM = pCritSect->s.CTX_SUFF(pVM);
    123     Assert(pVM);
    124     PVMCPU pVCpu = VMMGetCpu(pVM);
    125     Assert(pVCpu);
    126 
    127     /*
    128      * Try to take the lock.
    129      */
     236    /*
     237     * If the critical section has already been destroyed, then inform the caller.
     238     */
     239    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
     240
     241    /*
     242     * See if we're lucky.
     243     */
     244    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
     245    /* Not owned ... */
    130246    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
    131     {
    132         pCritSect->s.Core.cNestings = 1;
    133         Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
    134         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
    135         STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     247        return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
     248
     249    /* ... or nested. */
     250    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
     251    {
     252        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
     253        pCritSect->s.Core.cNestings++;
     254        pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
    136255        return VINF_SUCCESS;
    137256    }
    138257
    139     /*
    140      * Nested?
    141      */
    142     if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread)
    143     {
    144         pCritSect->s.Core.cNestings++;
    145         ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
    146         return VINF_SUCCESS;
    147     }
    148 
    149     /*
    150      * Failed.
    151      */
     258    /* no spinning */
     259
     260    /*
     261     * Return busy.
     262     */
     263#ifdef IN_RING3
     264    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
     265#else
     266    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     267#endif
    152268    LogFlow(("PDMCritSectTryEnter: locked\n"));
    153     STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
    154269    return VERR_SEM_BUSY;
    155 #endif /* !IN_RING3 */
    156270}
    157271
     
    190304VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
    191305{
    192 #ifdef IN_RING3
    193 # ifdef VBOX_WITH_STATISTICS
    194     if (pCritSect->s.Core.cNestings == 1)
    195         STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
    196 # endif
    197     RTSEMEVENT EventToSignal = pCritSect->s.EventToSignal;
    198     if (RT_LIKELY(EventToSignal == NIL_RTSEMEVENT))
    199     {
    200         int rc = RTCritSectLeave(&pCritSect->s.Core);
    201         AssertRC(rc);
    202     }
    203     else
    204     {
    205         pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
    206         int rc = RTCritSectLeave(&pCritSect->s.Core);
    207         AssertRC(rc);
    208         LogBird(("signalling %#x\n", EventToSignal));
    209         rc = RTSemEventSignal(EventToSignal);
    210         AssertRC(rc);
    211     }
    212 
    213 #else /* !IN_RING3 */
    214     Assert(VALID_PTR(pCritSect));
    215306    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
    216     Assert(pCritSect->s.Core.cNestings > 0);
    217     Assert(pCritSect->s.Core.cLockers >= 0);
    218     PVM pVM = pCritSect->s.CTX_SUFF(pVM);
    219     Assert(pVM);
    220 
    221 #ifdef VBOX_STRICT
    222     PVMCPU pVCpu = VMMGetCpu(pVM);
    223     Assert(pVCpu);
    224     AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread));
    225 #endif
    226 
    227     /*
    228      * Deal with nested attempts first.
    229      * (We're exploiting nesting to avoid queuing multiple R3 leaves for the same section.)
    230      */
    231     pCritSect->s.Core.cNestings--;
    232     if (pCritSect->s.Core.cNestings > 0)
    233     {
     307    Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
     308    Assert(pCritSect->s.Core.cNestings >= 1);
     309
     310    /*
     311     * Nested leave.
     312     */
     313    if (pCritSect->s.Core.cNestings > 1)
     314    {
     315        pCritSect->s.Core.cNestings--;
    234316        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
    235317        return;
    236318    }
    237 #ifndef VBOX_STRICT
    238     PVMCPU pVCpu = VMMGetCpu(pVM);
    239 #endif
     319
     320#if defined(IN_RING3) /// @todo enable this later - || defined(IN_RING0)
     321    /*
     322     * Leave for real.
     323     */
     324    /* update members. */
     325# ifdef IN_RING3
     326    RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
     327    pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
     328#  if defined(PDMCRITSECT_STRICT)
     329    if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
     330        RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
     331    ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
     332#  endif
     333# endif
     334    pCritSect->s.Core.fFlags    &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
     335    Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
     336    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
     337    pCritSect->s.Core.cNestings--;
     338
     339    /* stop and decrement lockers. */
     340    STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     341    if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
     342    {
     343        /* Someone is waiting, wake up one of them. */
     344        SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     345        PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
     346        int rc = SUPSemEventSignal(pSession, hEvent);
     347        AssertRC(rc);
     348    }
     349
     350# ifdef IN_RING3
     351    /* Signal exit event. */
     352    if (hEventToSignal != NIL_RTSEMEVENT)
     353    {
     354        LogBird(("Signalling %#x\n", hEventToSignal));
     355        int rc = RTSemEventSignal(hEventToSignal);
     356        AssertRC(rc);
     357    }
     358# endif
     359
     360#else  /* IN_RC */
    240361    /*
    241362     * Try leave it.
     
    243364    if (pCritSect->s.Core.cLockers == 0)
    244365    {
     366        pCritSect->s.Core.cNestings  = 0;
     367        RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
     368        pCritSect->s.Core.fFlags    &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
    245369        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     370
    246371        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
    247372        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
     
    249374
    250375        /* darn, someone raced in on us. */
    251         Assert(pVCpu->hNativeThread);
    252         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
     376        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
    253377        STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
    254     }
    255     pCritSect->s.Core.cNestings = 1;
     378        pCritSect->s.Core.cNestings = 1;
     379    }
     380    pCritSect->s.Core.fFlags |= PDMCRITSECT_FLAGS_PENDING_UNLOCK;
    256381
    257382    /*
    258383     * Queue the request.
    259384     */
    260     RTUINT i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
     385    PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
     386    PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
     387    uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
    261388    LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
    262389    AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
     
    266393    STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
    267394    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    268 #endif /* !IN_RING3 */
    269 }
     395#endif /* IN_RC */
     396}
     397
     398
     399#if defined(IN_RING3) || defined(IN_RING0)
     400/**
     401 * Process the critical sections queued for ring-3 'leave'.
     402 *
     403 * @param   pVCpu         The VMCPU handle.
     404 */
     405VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
     406{
     407    Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
     408
     409    const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
     410    for (RTUINT i = 0; i < c; i++)
     411    {
     412# ifdef IN_RING3
     413        PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
     414# else
     415        PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
     416# endif
     417
     418        PDMCritSectLeave(pCritSect);
     419        LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
     420    }
     421
     422    pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
     423    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
     424}
     425#endif /* IN_RING3 || IN_RING0 */
    270426
    271427
     
    282438    return RTCritSectIsOwner(&pCritSect->s.Core);
    283439#else
    284     PVM     pVM = pCritSect->s.CTX_SUFF(pVM);
    285     PVMCPU  pVCpu = VMMGetCpu(pVM);
    286     Assert(pVM); Assert(pVCpu);
     440    PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
     441    PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
    287442    if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
    288443        return false;
    289 
    290     /* Make sure the critical section is not scheduled to be unlocked. */
    291     if (    !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PDM_CRITSECT)
    292         ||  RTCritSectGetRecursion(&pCritSect->s.Core) > 1)
    293         return true;
    294 
    295     for (unsigned i = 0; i < pVCpu->pdm.s.cQueuedCritSectLeaves; i++)
    296     {
    297         if (pVCpu->pdm.s.apQueuedCritSectsLeaves[i] == MMHyperCCToR3(pVM, (void *)pCritSect))
    298             return false;   /* scheduled for release; pretend it's not owned by us. */
    299     }
    300     return true;
    301 #endif
    302 }
     444    return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
     445#endif
     446}
     447
    303448
    304449/**
     
    317462#else
    318463    PVM pVM = pCritSect->s.CTX_SUFF(pVM);
    319     Assert(pVM);
     464    AssertPtr(pVM);
    320465    Assert(idCpu < pVM->cCPUs);
    321     return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread;
    322 #endif
    323 }
     466    return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
     467        && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
     468#endif
     469}
     470
    324471
    325472/**
    326473 * Checks if somebody currently owns the critical section.
    327  * Note: This doesn't prove that no deadlocks will occur later on; it's just a debugging tool
    328474 *
    329475 * @returns true if locked.
    330476 * @returns false if not locked.
     477 *
    331478 * @param   pCritSect   The critical section.
    332  */
    333 VMMDECL(bool) PDMCritSectIsLocked(PCPDMCRITSECT pCritSect)
    334 {
    335     return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD;
    336 }
     479 *
     480 * @remarks This doesn't prove that no deadlocks will occur later on; it's
     481 *          just a debugging tool
     482 */
     483VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
     484{
     485    return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
     486        && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
     487}
     488
    337489
    338490/**
     
    345497VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
    346498{
    347     return pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC;
     499    return RTCritSectIsInitialized(&pCritSect->s.Core);
    348500}
    349501
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r19874 r20008  
    20332033VMMDECL(bool) PGMIsLocked(PVM pVM)
    20342034{
    2035     return PDMCritSectIsLocked(&pVM->pgm.s.CritSect);
    2036 }
     2035    return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
     2036}
     2037
    20372038
    20382039/**
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette