VirtualBox

Changeset 90677 in vbox for trunk


Ignore:
Timestamp:
Aug 13, 2021 10:30:37 AM (3 years ago)
Author:
vboxsync
Message:

VMM/PDMCritSectRwEnterShared: Implemented waiting in ring-0/HM context. bugref:6695

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectBoth.cpp

    r90532 r90677  
    5959
    6060        pdmCritSectRwLeaveSharedQueued(pVM, pCritSectRw);
    61         LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
     61        LogIt(RTLOGGRPFLAGS_FLOW, LOG_GROUP_PDM_CRITSECTRW, ("PDMR3CritSectFF: %p (shared)\n", pCritSectRw));
    6262    }
    6363
     
    7575
    7676        pdmCritSectRwLeaveExclQueued(pVM, pCritSectRw);
    77         LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
     77        LogIt(RTLOGGRPFLAGS_FLOW, LOG_GROUP_PDM_CRITSECTRW, ("PDMR3CritSectFF: %p (exclusive)\n", pCritSectRw));
    7878    }
    7979
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp

    r90673 r90677  
    2020*   Header Files                                                                                                                 *
    2121*********************************************************************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_PDM_CRITSECT
     22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
    2323#include "PDMInternal.h"
    2424#include <VBox/vmm/pdmcritsectrw.h>
     
    9898
    9999
     100/*********************************************************************************************************************************
     101*   Internal Functions                                                                                                           *
     102*********************************************************************************************************************************/
     103static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
     104
     105
    100106#ifdef RTASM_HAVE_CMP_WRITE_U128
    101107
     
    193199
    194200
    195 #ifdef IN_RING0
    196 /**
    197  * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
    198  *
    199  * @param   pVM         The cross context VM structure.
    200  */
    201 static void pdmR0CritSectRwYieldToRing3(PVMCC pVM)
    202 {
    203     PVMCPUCC pVCpu = VMMGetCpu(pVM);
    204     AssertPtrReturnVoid(pVCpu);
    205     int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
    206     AssertRC(rc);
    207 }
    208 #endif /* IN_RING0 */
    209 
    210 
    211 /**
    212  * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
    213  * and returns @a rc.
    214  */
    215 DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedBailOut(PPDMCRITSECTRW pThis, int rc)
    216 {
    217     for (;;)
    218     {
    219         uint64_t       u64State    = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
    220         uint64_t const u64OldState = u64State;
    221         uint64_t       c           = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
    222         AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
    223         c--;
    224         uint64_t       cWait       = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
    225         AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
    226         cWait--;
    227         u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
    228         u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
    229         if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
    230             return rc;
    231 
    232         ASMNopPause();
    233         AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
    234     }
    235 }
    236 
    237 
    238201/**
    239202 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
     
    255218}
    256219
     220/**
     221 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
     222 * that decrement the wait count and maybe resets the semaphore.
     223 */
     224DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
     225                                                          PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
     226{
     227    for (;;)
     228    {
     229        uint64_t const u64OldState = u64State;
     230        uint64_t       cWait       = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
     231        AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
     232        AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
     233                     pdmCritSectRwCorrupted(pThis, "Invalid read count"));
     234        cWait--;
     235        u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
     236        u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
     237
     238        if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
     239        {
     240            if (cWait == 0)
     241            {
     242                if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
     243                {
     244                    int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
     245                    AssertRCReturn(rc, rc);
     246                }
     247            }
     248            return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
     249        }
     250
     251        ASMNopPause();
     252        AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
     253        ASMNopPause();
     254
     255        u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
     256    }
     257    /* not reached */
     258}
     259
     260
     261#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
     262/**
     263 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
     264 * and returns @a rc.
     265 *
     266 * @note May return VINF_SUCCESS if we race the exclusive leave function and
     267 *       come out on the bottom.
     268 *
     269 *       Ring-3 only calls in a case where it is _not_ acceptable to take the
     270 *       lock, so even if we get the lock we'll have to leave.  In the ring-0
     271 *       contexts, we can safely return VINF_SUCCESS in case of a race.
     272 */
     273DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
     274                                                            PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
     275{
     276#ifdef IN_RING0
     277    uint64_t const tsStart    = RTTimeNanoTS();
     278    uint64_t       cNsElapsed = 0;
     279#endif
     280    for (;;)
     281    {
     282        uint64_t u64State    = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
     283        uint64_t u64OldState = u64State;
     284
     285        uint64_t cWait       = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
     286        AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
     287        cWait--;
     288
     289        uint64_t c           = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
     290        AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
     291
     292        if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
     293        {
     294            c--;
     295            u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
     296            u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
     297            if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
     298                return rc;
     299        }
     300        else
     301        {
     302            /*
     303             * The direction changed, so we can actually get the lock now.
     304             *
     305             * This means that we _have_ to wait on the semaphore to be signalled
     306             * so we can properly reset it.  Otherwise the stuff gets out of wack,
     307             * because signalling and resetting will race one another.  An
     308             * exception would be if we're not the last reader waiting and don't
     309             * need to worry about the resetting.
     310             *
     311             * An option would be to do the resetting in PDMCritSectRwEnterExcl,
     312             * but that would still leave a racing PDMCritSectRwEnterShared
     313             * spinning hard for a little bit, which isn't great...
     314             */
     315            if (cWait == 0)
     316            {
     317# ifdef IN_RING0
     318                /* Do timeout processing first to avoid redoing the above. */
     319                uint32_t cMsWait;
     320                if (cNsElapsed <= RT_NS_10SEC)
     321                    cMsWait = 32;
     322                else
     323                {
     324                    u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
     325                    u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
     326                    if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
     327                    {
     328                        LogFunc(("%p: giving up\n", pThis));
     329                        return rc;
     330                    }
     331                    cMsWait = 2;
     332                }
     333
     334                int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
     335                Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
     336                           RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
     337# else
     338                RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
     339                int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
     340                RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
     341# endif
     342                if (rcWait == VINF_SUCCESS)
     343                {
     344# ifdef IN_RING0
     345                    return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
     346# else
     347                    /* ring-3: Cannot return VINF_SUCCESS. */
     348                    Assert(RT_FAILURE_NP(rc));
     349                    int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
     350                    if (RT_SUCCESS(rc2))
     351                        rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
     352                    return rc;
     353# endif
     354                }
     355                AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
     356                                ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
     357                                RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
     358            }
     359            else
     360            {
     361                u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
     362                u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
     363                if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
     364                    return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
     365            }
     366
     367# ifdef IN_RING0
     368            /* Calculate the elapsed time here to avoid redoing state work. */
     369            cNsElapsed = RTTimeNanoTS() - tsStart;
     370# endif
     371        }
     372
     373        ASMNopPause();
     374        AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
     375        ASMNopPause();
     376    }
     377}
     378#endif /* IN_RING0  || (IN_RING3 && PDMCRITSECTRW_STRICT) */
     379
    257380
    258381/**
     
    263386                                             int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
    264387{
    265     RT_NOREF(pVCpu, rcBusy);
    266 
    267 # if !defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    268     hThreadSelf = RTThreadSelf();
     388    PSUPDRVSESSION const    pSession          = pVM->pSession;
     389    SUPSEMEVENTMULTI const  hEventMulti       = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
     390# ifdef IN_RING0
     391    uint64_t const          tsStart           = RTTimeNanoTS();
     392    uint64_t                cNsMaxTotal       = RT_NS_5MIN;
     393    uint32_t                cMsMaxOne         = RT_MS_5SEC;
     394    bool                    fNonInterruptible = false;
    269395# endif
    270396
     
    280406                                                   RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
    281407        if (RT_FAILURE(rc))
    282             return pdmCritSectRwEnterSharedBailOut(pThis, rc);
     408            return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
    283409#  else
    284410        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
    285411#  endif
    286412# endif
     413
    287414        for (;;)
    288415        {
    289             rc = SUPSemEventMultiWaitNoResume(pVM->pSession,
    290                                               (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
    291                                               RT_INDEFINITE_WAIT);
    292             if (   rc != VERR_INTERRUPTED
    293                 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
     416            /*
     417             * We always wait with a timeout so we can re-check the structure sanity
     418             * and not get stuck waiting on a corrupt or deleted section.
     419             */
     420# ifdef IN_RING3
     421            rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
     422# else
     423            rc = !fNonInterruptible
     424               ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
     425               : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
     426            Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
     427                       RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
     428# endif
     429            if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
     430            { /* likely */ }
     431            else
     432            {
     433# ifdef IN_RING3
     434                RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
     435# endif
     436                return VERR_SEM_DESTROYED;
     437            }
     438            if (RT_LIKELY(rc == VINF_SUCCESS))
    294439                break;
     440
     441            /*
     442             * Timeout and interrupted waits needs careful handling in ring-0
     443             * because we're cooperating with ring-3 on this critical section
     444             * and thus need to make absolutely sure we won't get stuck here.
     445             *
     446             * The r0 interrupted case means something is pending (termination,
     447             * signal, APC, debugger, whatever), so we must try our best to
     448             * return to the caller and to ring-3 so it can be dealt with.
     449             */
     450            if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
     451            {
    295452# ifdef IN_RING0
    296             pdmR0CritSectRwYieldToRing3(pVM);
    297 # endif
    298         }
     453                uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
     454                int const      rcTerm     = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
     455                AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
     456                          ("rcTerm=%Rrc\n", rcTerm));
     457                if (rcTerm == VERR_NOT_SUPPORTED)
     458                    cNsMaxTotal = RT_NS_1MIN;
     459
     460                if (rc == VERR_TIMEOUT)
     461                {
     462                    /* Try return get out of here with a non-VINF_SUCCESS status if
     463                       the thread is terminating or if the timeout has been exceeded. */
     464                    STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
     465                    if (   rcTerm == VINF_THREAD_IS_TERMINATING
     466                        || cNsElapsed > cNsMaxTotal)
     467                        return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
     468                                                               pSrcPos, fNoVal, hThreadSelf);
     469                }
     470                else
     471                {
     472                    /* For interrupt cases, we must return if we can.  If rcBusy is VINF_SUCCESS,
     473                       we will try non-interruptible sleep for a while to help resolve the issue
     474                       w/o guru'ing. */
     475                    STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
     476                    if (   rcTerm != VINF_THREAD_IS_TERMINATING
     477                        && rcBusy == VINF_SUCCESS
     478                        && pVCpu != NULL
     479                        && cNsElapsed <= cNsMaxTotal)
     480                    {
     481                        if (!fNonInterruptible)
     482                        {
     483                            STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
     484                            fNonInterruptible   = true;
     485                            cMsMaxOne           = 32;
     486                            uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
     487                            if (cNsLeft > RT_NS_10SEC)
     488                                cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
     489                        }
     490                    }
     491                    else
     492                        return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
     493                                                               pSrcPos, fNoVal, hThreadSelf);
     494                }
     495# else  /* IN_RING3 */
     496                RT_NOREF(pVM, pVCpu, rcBusy);
     497# endif /* IN_RING3 */
     498            }
     499            /*
     500             * Any other return code is fatal.
     501             */
     502            else
     503            {
     504# ifdef IN_RING3
     505                RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
     506# endif
     507                AssertMsgFailed(("rc=%Rrc\n", rc));
     508                return RT_FAILURE_NP(rc) ? rc : -rc;
     509            }
     510        }
     511
    299512# ifdef IN_RING3
    300513        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
    301514# endif
    302         if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
    303         { /* likely */ }
    304         else
    305             return VERR_SEM_DESTROYED;
    306         if (RT_SUCCESS(rc))
    307         { /* likely */ }
    308         else
    309             return pdmCritSectRwEnterSharedBailOut(pThis, rc);
    310515
    311516        /*
     
    319524             * Decrement the wait count and maybe reset the semaphore (if we're last).
    320525             */
    321             for (;;)
    322             {
    323                 uint64_t const u64OldState = u64State;
    324                 uint64_t       cWait       = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
    325                 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
    326                 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
    327                              pdmCritSectRwCorrupted(pThis, "Invalid read count"));
    328                 cWait--;
    329                 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
    330                 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
    331 
    332                 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
    333                 {
    334                     if (cWait == 0)
    335                     {
    336                         if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
    337                         {
    338                             rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
    339                             AssertRCReturn(rc, rc);
    340                         }
    341                     }
    342                     return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
    343                 }
    344 
    345                 ASMNopPause();
    346                 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
    347                 ASMNopPause();
    348 
    349                 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
    350             }
    351             /* not reached */
    352         }
    353 
    354         AssertMsg(iLoop < 1, ("%u\n", iLoop));
     526            return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
     527        }
     528
     529        AssertMsg(iLoop < 1,
     530                  ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
    355531        RTThreadYield();
    356532    }
     
    379555    AssertPtr(pThis);
    380556    AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
    381 #ifdef IN_RING3
    382     RT_NOREF(rcBusy);
    383 #endif
    384557
    385558#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     
    468641
    469642#if defined(IN_RING3) || defined(IN_RING0)
    470 # ifdef IN_RING0
    471             if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
    472                 && ASMIntAreEnabled())
    473 # endif
     643            /*
     644             * Add ourselves to the queue and wait for the direction to change.
     645             */
     646            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
     647            c++;
     648            Assert(c < RTCSRW_CNT_MASK / 2);
     649            AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
     650
     651            uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
     652            cWait++;
     653            Assert(cWait <= c);
     654            Assert(cWait < RTCSRW_CNT_MASK / 2);
     655            AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
     656
     657            u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
     658            u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
     659
     660            if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
    474661            {
    475662                /*
    476                  * Add ourselves to the queue and wait for the direction to change.
     663                 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
    477664                 */
    478                 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
    479                 c++;
    480                 Assert(c < RTCSRW_CNT_MASK / 2);
    481                 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
    482 
    483                 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
    484                 cWait++;
    485                 Assert(cWait <= c);
    486                 Assert(cWait < RTCSRW_CNT_MASK / 2);
    487                 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
    488 
    489                 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
    490                 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
    491 
    492                 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
     665# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
     666                return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
     667# elif defined(IN_RING3)
     668                return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
     669# else /* IN_RING0 */
     670                /*
     671                 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
     672                 * account when waiting on contended locks.
     673                 */
     674                PVMCPUCC pVCpu = VMMGetCpu(pVM);
     675                if (pVCpu)
    493676                {
    494                     return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
     677                    VMMR0EMTBLOCKCTX Ctx;
     678                    int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
     679                    if (rc == VINF_SUCCESS)
     680                    {
     681                        Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     682
     683                        rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
     684
     685                        VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
     686                    }
     687                    else
     688                    {
     689                        //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
     690                        rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
     691                    }
     692                    return rc;
    495693                }
     694
     695                /* Non-EMT. */
     696                Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     697                return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
     698# endif /* IN_RING0 */
    496699            }
    497 #endif /* IN_RING3 || IN_RING3 */
    498 #ifndef IN_RING3
    499 # ifdef IN_RING0
    500             else
    501 # endif
     700
     701#else  /* !IN_RING3 && !IN_RING0 */
     702            /*
     703             * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
     704             * back to ring-3 and do it there or return rcBusy.
     705             */
     706# error "Unused code."
     707            STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
     708            if (rcBusy == VINF_SUCCESS)
    502709            {
    503                 /*
    504                  * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
    505                  * back to ring-3 and do it there or return rcBusy.
    506                  */
    507                 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
    508                 if (rcBusy == VINF_SUCCESS)
    509                 {
    510                     PVMCPUCC  pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
    511                     /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
    512                      *        back to ring-3. Goes for both kind of crit sects. */
    513                     return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
    514                 }
    515                 return rcBusy;
     710                PVMCPUCC  pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
     711                /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
     712                 *        back to ring-3. Goes for both kind of crit sects. */
     713                return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
    516714            }
    517 #endif /* !IN_RING3 */
     715            return rcBusy;
     716#endif /* !IN_RING3 && !IN_RING0 */
    518717        }
    519718
     
    9721171#  endif
    9731172# endif
     1173
    9741174        for (;;)
    9751175        {
     
    9961196                return VERR_SEM_DESTROYED;
    9971197            }
    998             if (rc == VINF_SUCCESS)
    999             {
    1000 # ifdef IN_RING3
    1001                 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
    1002 # endif
     1198            if (RT_LIKELY(rc == VINF_SUCCESS))
    10031199                break;
    1004             }
    10051200
    10061201            /*
     
    10131208             * return to the caller and to ring-3 so it can be dealt with.
    10141209             */
    1015             if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
     1210            if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
    10161211            {
    10171212# ifdef IN_RING0
     
    10741269        }
    10751270
     1271# ifdef IN_RING3
     1272        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
     1273# endif
     1274
    10761275        /*
    10771276         * Try take exclusive write ownership.
  • trunk/src/VBox/VMM/VMMR3/PDMCritSect.cpp

    r90672 r90677  
    2020*   Header Files                                                                                                                 *
    2121*********************************************************************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
     22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
    2323#include "PDMInternal.h"
    2424#include <VBox/vmm/pdmcritsect.h>
     
    7878                 "Number of VERR_TIMEOUT returns in exclusive mode.");
    7979    STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Excl-Non-interruptible-Waits-VINF_SUCCESS",
     80                 STAMUNIT_OCCURENCES, "Number of non-interruptible waits for rcBusy=VINF_SUCCESS in exclusive mode");
     81
     82    STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwEnterSharedWhileAborting, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-EnterSharedWhileAborting", STAMUNIT_OCCURENCES,
     83                 "Number of times we've got the critical section ownership in shared mode while trying to abort a wait due to VERR_INTERRUPTED or VERR_TIMEOUT.");
     84    STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedVerrInterrupted, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-VERR_INTERRUPTED", STAMUNIT_OCCURENCES,
     85                 "Number of VERR_INTERRUPTED returns in exclusive mode.");
     86    STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedVerrTimeout, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-VERR_TIMEOUT", STAMUNIT_OCCURENCES,
     87                 "Number of VERR_TIMEOUT returns in exclusive mode.");
     88    STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits, STAMTYPE_COUNTER, "/PDM/CritSectsRw/00-Shared-Non-interruptible-Waits-VINF_SUCCESS",
    8089                 STAMUNIT_OCCURENCES, "Number of non-interruptible waits for rcBusy=VINF_SUCCESS in exclusive mode");
    8190
     
    220229                pUVM->pdm.s.pCritSects = pCritSect;
    221230                RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
     231                Log(("pdmR3CritSectInitOne: %p %s\n", pCritSect, pszName));
    222232
    223233                return VINF_SUCCESS;
     
    328338                    pUVM->pdm.s.pRwCritSects = pCritSect;
    329339                    RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
     340                    LogIt(RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP_PDM_CRITSECTRW, ("pdmR3CritSectRwInitOne: %p %s\n", pCritSect, pszName));
    330341
    331342                    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/include/PDMInternal.h

    r90672 r90677  
    14981498    STAMCOUNTER                     StatCritSectRwExclVerrInterrupted;
    14991499    STAMCOUNTER                     StatCritSectRwExclNonInterruptibleWaits;
     1500
     1501    STAMCOUNTER                     StatCritSectRwEnterSharedWhileAborting;
     1502    STAMCOUNTER                     StatCritSectRwSharedVerrTimeout;
     1503    STAMCOUNTER                     StatCritSectRwSharedVerrInterrupted;
     1504    STAMCOUNTER                     StatCritSectRwSharedNonInterruptibleWaits;
    15001505} PDM;
    15011506AssertCompileMemberAlignment(PDM, CritSect, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette