Changeset 20008 in vbox
- Timestamp:
- May 25, 2009 6:34:43 PM (15 years ago)
- Location:
- trunk
- Files:
-
- 13 edited
-
include/VBox/pdmcritsect.h (modified) (2 diffs)
-
include/iprt/critsect.h (modified) (2 diffs)
-
include/iprt/thread.h (modified) (2 diffs)
-
src/VBox/Runtime/common/misc/thread.cpp (modified) (3 diffs)
-
src/VBox/Runtime/generic/critsect-generic.cpp (modified) (4 diffs)
-
src/VBox/Runtime/include/internal/thread.h (modified) (1 diff)
-
src/VBox/VMM/EM.cpp (modified) (1 diff)
-
src/VBox/VMM/PDM.cpp (modified) (2 diffs)
-
src/VBox/VMM/PDMCritSect.cpp (modified) (19 diffs)
-
src/VBox/VMM/PDMInternal.h (modified) (4 diffs)
-
src/VBox/VMM/VMM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PDMAllCritSect.cpp (modified) (10 diffs)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pdmcritsect.h
r19595 r20008 65 65 VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect); 66 66 VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect); 67 VMMDECL(bool) PDMCritSectIsLocked(PCPDMCRITSECT pCritSect);68 67 VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu); 68 VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect); 69 69 VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect); 70 70 VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect); … … 72 72 VMMR3DECL(int) PDMR3CritSectDelete(PPDMCRITSECT pCritSect); 73 73 VMMDECL(int) PDMR3CritSectTerm(PVM pVM); 74 VMM R3DECL(void) PDMR3CritSectFF(PVMCPU pVCpu);74 VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu); 75 75 VMMR3DECL(uint32_t) PDMR3CritSectCountOwned(PVM pVM, char *pszNames, size_t cbNames); 76 76 -
trunk/include/iprt/critsect.h
r19589 r20008 235 235 RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect); 236 236 237 /** 238 * Checks the caller is the owner of the critical section. 239 * 240 * @returns true if owner. 241 * @returns false if not owner. 242 * @param pCritSect The critical section. 243 */ 244 DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect) 245 { 246 return pCritSect->NativeThreadOwner == RTThreadNativeSelf(); 247 } 248 249 #endif /* IN_RING3 */ 250 251 /** 252 * Checks the section is owned by anyone. 253 * 254 * @returns true if owned. 255 * @returns false if not owned. 256 * @param pCritSect The critical section. 257 */ 258 DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect) 259 { 260 return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD; 261 } 262 263 /** 264 * Gets the thread id of the critical section owner. 265 * 266 * @returns Thread id of the owner thread if owned. 267 * @returns NIL_RTNATIVETHREAD is not owned. 268 * @param pCritSect The critical section. 269 */ 270 DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect) 271 { 272 return pCritSect->NativeThreadOwner; 273 } 237 274 238 275 /** … … 248 285 } 249 286 250 251 /**252 * Checks the caller is the owner of the critical section.253 *254 * @returns true if owner.255 * @returns false if not owner.256 * @param pCritSect The critical section.257 */258 DECLINLINE(bool) RTCritSectIsOwner(PCRTCRITSECT pCritSect)259 {260 return pCritSect->NativeThreadOwner == RTThreadNativeSelf();261 }262 263 264 /**265 * Checks the section is owned by anyone.266 *267 * @returns true if owned.268 * @returns false if not owned.269 * @param pCritSect The critical section.270 */271 DECLINLINE(bool) RTCritSectIsOwned(PCRTCRITSECT pCritSect)272 {273 return pCritSect->NativeThreadOwner != NIL_RTNATIVETHREAD;274 }275 276 277 /**278 * Gets the thread id of the critical section owner.279 *280 * @returns Thread id of the owner thread if owned.281 * @returns NIL_RTNATIVETHREAD is not owned.282 * @param pCritSect The critical section.283 */284 DECLINLINE(RTNATIVETHREAD) RTCritSectGetOwner(PCRTCRITSECT pCritSect)285 {286 return pCritSect->NativeThreadOwner;287 }288 289 #endif /* IN_RING3 */290 291 287 /** 292 288 * Gets the recursion depth. -
trunk/include/iprt/thread.h
r19906 r20008 47 47 * @{ 48 48 */ 49 50 /** 51 * The thread state. 52 */ 53 typedef enum RTTHREADSTATE 54 { 55 /** The usual invalid 0 value. */ 56 RTTHREADSTATE_INVALID = 0, 57 /** The thread is being initialized. */ 58 RTTHREADSTATE_INITIALIZING, 59 /** The thread has terminated */ 60 RTTHREADSTATE_TERMINATED, 61 /** Probably running. */ 62 RTTHREADSTATE_RUNNING, 63 /** Waiting on a critical section. */ 64 RTTHREADSTATE_CRITSECT, 65 /** Waiting on a mutex. */ 66 RTTHREADSTATE_MUTEX, 67 /** Waiting on a event semaphore. */ 68 RTTHREADSTATE_EVENT, 69 /** Waiting on a event multiple wakeup semaphore. */ 70 RTTHREADSTATE_EVENTMULTI, 71 /** Waiting on a read write semaphore, read (shared) access. */ 72 RTTHREADSTATE_RW_READ, 73 /** Waiting on a read write semaphore, write (exclusive) access. */ 74 RTTHREADSTATE_RW_WRITE, 75 /** The thread is sleeping. */ 76 RTTHREADSTATE_SLEEP, 77 /** The usual 32-bit size hack. */ 78 RTTHREADSTATE_32BIT_HACK = 0x7fffffff 79 } RTTHREADSTATE; 80 81 /** Checks if a thread state indicates that the thread is sleeping. */ 82 #define RTTHREAD_IS_SLEEPING(enmState) ( (enmState) == RTTHREADSTATE_CRITSECT \ 83 || (enmState) == RTTHREADSTATE_MUTEX \ 84 || (enmState) == RTTHREADSTATE_EVENT \ 85 || (enmState) == RTTHREADSTATE_EVENTMULTI \ 86 || (enmState) == RTTHREADSTATE_RW_READ \ 87 || (enmState) == RTTHREADSTATE_RW_WRITE \ 88 || (enmState) == RTTHREADSTATE_SLEEP \ 89 ) 49 90 50 91 /** … … 534 575 RTDECL(void) RTThreadReadLockDec(RTTHREAD Thread); 535 576 577 /** 578 * Unblocks a thread. 579 * 580 * This function is paired with rtThreadBlocking. 581 * 582 * @param hThread The current thread. 583 * @param enmCurState The current state, used to check for nested blocking. 584 * The new state will be running. 585 */ 586 RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState); 587 588 /** 589 * Change the thread state to blocking and do deadlock detection. 590 * 591 * This is a RT_STRICT method for debugging locks and detecting deadlocks. 592 * 593 * @param hThread The current thread. 594 * @param enmState The sleep state. 595 * @param u64Block The block data. A pointer or handle. 596 * @param pszFile Where we are blocking. 597 * @param uLine Where we are blocking. 598 * @param uId Where we are blocking. 599 */ 600 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block, 601 const char *pszFile, unsigned uLine, RTUINTPTR uId); 602 603 536 604 537 605 /** @name Thread Local Storage -
trunk/src/VBox/Runtime/common/misc/thread.cpp
r15880 r20008 1406 1406 * This is a RT_STRICT method for debugging locks and detecting deadlocks. 1407 1407 * 1408 * @param pThread Thisthread.1408 * @param hThread The current thread. 1409 1409 * @param enmState The sleep state. 1410 1410 * @param u64Block The block data. A pointer or handle. … … 1413 1413 * @param uId Where we are blocking. 1414 1414 */ 1415 void rtThreadBlocking(PRTTHREADINT pThread, RTTHREADSTATE enmState, uint64_t u64Block, 1416 const char *pszFile, unsigned uLine, RTUINTPTR uId) 1417 { 1415 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block, 1416 const char *pszFile, unsigned uLine, RTUINTPTR uId) 1417 { 1418 PRTTHREADINT pThread = hThread; 1418 1419 Assert(RTTHREAD_IS_SLEEPING(enmState)); 1419 1420 if (pThread && pThread->enmState == RTTHREADSTATE_RUNNING) … … 1502 1503 * This function is paired with rtThreadBlocking. 1503 1504 * 1504 * @param pThread The current thread.1505 * @param hThread The current thread. 1505 1506 * @param enmCurState The current state, used to check for nested blocking. 1506 1507 * The new state will be running. 1507 1508 */ 1508 void rtThreadUnblocked(PRTTHREADINT pThread, RTTHREADSTATE enmCurState)1509 { 1510 if ( pThread && pThread->enmState == enmCurState)1511 ASMAtomicWriteSize(& pThread->enmState, RTTHREADSTATE_RUNNING);1509 RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState) 1510 { 1511 if (hThread && hThread->enmState == enmCurState) 1512 ASMAtomicWriteSize(&hThread->enmState, RTTHREADSTATE_RUNNING); 1512 1513 } 1513 1514 -
trunk/src/VBox/Runtime/generic/critsect-generic.cpp
r19989 r20008 251 251 */ 252 252 pCritSect->cNestings = 1; 253 ASMAtomic XchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);253 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); 254 254 #ifdef RTCRITSECT_STRICT 255 255 pCritSect->Strict.pszEnterFile = pszFile; 256 256 pCritSect->Strict.u32EnterLine = uLine; 257 257 pCritSect->Strict.uEnterId = uId; 258 ASMAtomic XchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */258 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf); 259 259 #endif 260 260 … … 319 319 { 320 320 #ifdef RTCRITSECT_STRICT 321 rtThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);321 RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId); 322 322 #endif 323 323 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT); 324 324 #ifdef RTCRITSECT_STRICT 325 rtThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);325 RTThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT); 326 326 #endif 327 327 if (pCritSect->u32Magic != RTCRITSECT_MAGIC) … … 338 338 */ 339 339 pCritSect->cNestings = 1; 340 ASMAtomic XchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);340 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); 341 341 #ifdef RTCRITSECT_STRICT 342 342 pCritSect->Strict.pszEnterFile = pszFile; 343 343 pCritSect->Strict.u32EnterLine = uLine; 344 344 pCritSect->Strict.uEnterId = uId; 345 ASMAtomic XchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */345 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf); 346 346 RTThreadWriteLockInc(ThreadSelf); 347 347 #endif … … 383 383 if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */ 384 384 RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner); 385 ASMAtomic XchgSize(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);386 #endif 387 ASMAtomic XchgSize(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);385 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD); 386 #endif 387 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD); 388 388 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0) 389 389 { -
trunk/src/VBox/Runtime/include/internal/thread.h
r8645 r20008 45 45 46 46 47 /**48 * The thread state.49 */50 typedef enum RTTHREADSTATE51 {52 /** The usual invalid 0 value. */53 RTTHREADSTATE_INVALID = 0,54 /** The thread is being initialized. */55 RTTHREADSTATE_INITIALIZING,56 /** The thread has terminated */57 RTTHREADSTATE_TERMINATED,58 /** Probably running. */59 RTTHREADSTATE_RUNNING,60 /** Waiting on a critical section. */61 RTTHREADSTATE_CRITSECT,62 /** Waiting on a mutex. */63 RTTHREADSTATE_MUTEX,64 /** Waiting on a event semaphore. */65 RTTHREADSTATE_EVENT,66 /** Waiting on a event multiple wakeup semaphore. */67 RTTHREADSTATE_EVENTMULTI,68 /** Waiting on a read write semaphore, read (shared) access. */69 RTTHREADSTATE_RW_READ,70 /** Waiting on a read write semaphore, write (exclusive) access. */71 RTTHREADSTATE_RW_WRITE,72 /** The thread is sleeping. */73 RTTHREADSTATE_SLEEP,74 /** The usual 32-bit size hack. */75 RTTHREADSTATE_32BIT_HACK = 0x7fffffff76 } RTTHREADSTATE;77 78 79 /** Checks if a thread state indicates that the thread is sleeping. */80 #define RTTHREAD_IS_SLEEPING(enmState) ( (enmState) == RTTHREADSTATE_CRITSECT \81 || (enmState) == RTTHREADSTATE_MUTEX \82 || (enmState) == RTTHREADSTATE_EVENT \83 || (enmState) == RTTHREADSTATE_EVENTMULTI \84 || (enmState) == RTTHREADSTATE_RW_READ \85 || (enmState) == RTTHREADSTATE_RW_WRITE \86 || (enmState) == RTTHREADSTATE_SLEEP \87 )88 47 89 48 /** Max thread name length. */ -
trunk/src/VBox/VMM/EM.cpp
r19873 r20008 3298 3298 { 3299 3299 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) 3300 PDM R3CritSectFF(pVCpu);3300 PDMCritSectFF(pVCpu); 3301 3301 3302 3302 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION)) -
trunk/src/VBox/VMM/PDM.cpp
r19793 r20008 335 335 * Initialize sub compontents. 336 336 */ 337 int rc = pdmR3CritSectInit(pVM);337 int rc = RTCritSectInit(&pVM->pdm.s.MiscCritSect); 338 338 if (RT_SUCCESS(rc)) 339 { 339 rc = pdmR3CritSectInit(pVM); 340 if (RT_SUCCESS(rc)) 340 341 rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.CritSect, "PDM"); 342 if (RT_SUCCESS(rc)) 343 rc = pdmR3LdrInitU(pVM->pUVM); 344 if (RT_SUCCESS(rc)) 345 rc = pdmR3DrvInit(pVM); 346 if (RT_SUCCESS(rc)) 347 rc = pdmR3DevInit(pVM); 348 #ifdef VBOX_WITH_PDM_ASYNC_COMPLETION 349 if (RT_SUCCESS(rc)) 350 rc = pdmR3AsyncCompletionInit(pVM); 351 #endif 352 if (RT_SUCCESS(rc)) 353 { 354 /* 355 * Register the saved state data unit. 356 */ 357 rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128, 358 NULL, pdmR3Save, NULL, 359 pdmR3LoadPrep, pdmR3Load, NULL); 341 360 if (RT_SUCCESS(rc)) 342 rc = pdmR3LdrInitU(pVM->pUVM); 343 if (RT_SUCCESS(rc)) 344 { 345 rc = pdmR3DrvInit(pVM); 346 if (RT_SUCCESS(rc)) 347 { 348 rc = pdmR3DevInit(pVM); 349 if (RT_SUCCESS(rc)) 350 { 351 #ifdef VBOX_WITH_PDM_ASYNC_COMPLETION 352 rc = pdmR3AsyncCompletionInit(pVM); 353 if (RT_SUCCESS(rc)) 354 #endif 355 { 356 /* 357 * Register the saved state data unit. 358 */ 359 rc = SSMR3RegisterInternal(pVM, "pdm", 1, PDM_SAVED_STATE_VERSION, 128, 360 NULL, pdmR3Save, NULL, 361 pdmR3LoadPrep, pdmR3Load, NULL); 362 if (RT_SUCCESS(rc)) 363 { 364 LogFlow(("PDM: Successfully initialized\n")); 365 return rc; 366 } 367 368 } 369 } 370 } 361 { 362 LogFlow(("PDM: Successfully initialized\n")); 363 return rc; 371 364 } 372 365 } … … 600 593 */ 601 594 PDMR3CritSectDelete(&pVM->pdm.s.CritSect); 595 /* The MiscCritSect is deleted by PDMR3CritSectTerm. */ 602 596 603 597 LogFlow(("PDMR3Term: returns %Rrc\n", VINF_SUCCESS)); -
trunk/src/VBox/VMM/PDMCritSect.cpp
r19991 r20008 20 20 */ 21 21 22 //#define PDM_WITH_R3R0_CRIT_SECT23 22 24 23 /******************************************************************************* … … 33 32 #include <VBox/err.h> 34 33 #include <VBox/log.h> 35 #ifdef PDM_WITH_R3R0_CRIT_SECT 36 # include <VBox/sup.h> 37 #endif 34 #include <VBox/sup.h> 38 35 #include <iprt/asm.h> 39 36 #include <iprt/assert.h> … … 72 69 void pdmR3CritSectRelocate(PVM pVM) 73 70 { 71 RTCritSectEnter(&pVM->pdm.s.MiscCritSect); 74 72 for (PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects; 75 73 pCur; 76 74 pCur = pCur->pNext) 77 75 pCur->pVMRC = pVM->pVMRC; 76 RTCritSectLeave(&pVM->pdm.s.MiscCritSect); 78 77 } 79 78 … … 92 91 { 93 92 int rc = VINF_SUCCESS; 93 RTCritSectEnter(&pVM->pdm.s.MiscCritSect); 94 94 while (pVM->pdm.s.pCritSects) 95 95 { … … 99 99 rc = rc2; 100 100 } 101 RTCritSectLeave(&pVM->pdm.s.MiscCritSect); 102 RTCritSectDelete(&pVM->pdm.s.MiscCritSect); 101 103 return rc; 102 104 } … … 117 119 VM_ASSERT_EMT(pVM); 118 120 119 #ifdef PDM_WITH_R3R0_CRIT_SECT120 121 /* 121 122 * Allocate the semaphore. … … 123 124 AssertCompile(sizeof(SUPSEMEVENT) == sizeof(pCritSect->Core.EventSem)); 124 125 int rc = SUPSemEventCreate(pVM->pSession, (PSUPSEMEVENT)&pCritSect->Core.EventSem); 125 #else126 int rc = RTCritSectInit(&pCritSect->Core);127 #endif128 126 if (RT_SUCCESS(rc)) 129 127 { 130 #ifdef PDM_WITH_R3R0_CRIT_SECT131 128 /* 132 129 * Initialize the structure (first bit is c&p from RTCritSectInitEx). … … 141 138 pCritSect->Core.Strict.u32EnterLine = 0; 142 139 pCritSect->Core.Strict.uEnterId = 0; 143 #endif144 140 pCritSect->pVMR3 = pVM; 145 141 pCritSect->pVMR0 = pVM->pVMR0; … … 172 168 * @param pCritSect Pointer to the critical section. 173 169 * @param pszName The name of the critical section (for statistics). 170 * @thread EMT(0) 174 171 */ 175 172 VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, const char *pszName) … … 205 202 * 206 203 * @returns Return code from RTCritSectDelete. 204 * 207 205 * @param pVM The VM handle. 208 206 * @param pCritSect The critical section. 209 207 * @param pPrev The previous critical section in the list. 210 208 * @param fFinal Set if this is the final call and statistics shouldn't be deregistered. 209 * 210 * @remarks Caller must've entered the MiscCritSect. 211 211 */ 212 212 static int pdmR3CritSectDeleteOne(PVM pVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal) 213 213 { 214 #ifdef PDM_WITH_R3R0_CRIT_SECT215 214 /* 216 215 * Assert free waiters and so on (c&p from RTCritSectDelete). … … 220 219 Assert(pCritSect->Core.cLockers == -1); 221 220 Assert(pCritSect->Core.NativeThreadOwner == NIL_RTNATIVETHREAD); 222 #endif 221 Assert(RTCritSectIsOwner(&pVM->pdm.s.MiscCritSect)); 223 222 224 223 /* … … 234 233 * In case someone is waiting we'll signal the semaphore cLockers + 1 times. 235 234 */ 236 #ifdef PDM_WITH_R3R0_CRIT_SECT237 235 ASMAtomicWriteU32(&pCritSect->Core.u32Magic, 0); 238 236 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->Core.EventSem; … … 243 241 int rc = SUPSemEventClose(pVM->pSession, hEvent); 244 242 AssertRC(rc); 245 #endif246 243 pCritSect->pNext = NULL; 247 244 pCritSect->pvKey = NULL; … … 260 257 #endif 261 258 } 262 #ifndef PDM_WITH_R3R0_CRIT_SECT263 int rc = RTCritSectDelete(&pCritSect->Core);264 #endif265 259 return rc; 266 260 } … … 282 276 * Iterate the list and match key. 283 277 */ 284 int rc = VINF_SUCCESS;278 int rc = VINF_SUCCESS; 285 279 PPDMCRITSECTINT pPrev = NULL; 286 PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects; 280 RTCritSectEnter(&pVM->pdm.s.MiscCritSect); 281 PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects; 287 282 while (pCur) 288 283 { … … 299 294 pCur = pCur->pNext; 300 295 } 296 RTCritSectLeave(&pVM->pdm.s.MiscCritSect); 301 297 return rc; 302 298 } … … 330 326 * Find and unlink it. 331 327 */ 332 PVM pVM = pCritSect->s.pVMR3;328 PVM pVM = pCritSect->s.pVMR3; 333 329 AssertReleaseReturn(pVM, VERR_INTERNAL_ERROR); 334 330 PPDMCRITSECTINT pPrev = NULL; 335 PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects; 331 RTCritSectEnter(&pVM->pdm.s.MiscCritSect); 332 PPDMCRITSECTINT pCur = pVM->pdm.s.pCritSects; 336 333 while (pCur) 337 334 { 338 335 if (pCur == &pCritSect->s) 339 return pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */); 336 { 337 int rc = pdmR3CritSectDeleteOne(pVM, pCur, pPrev, false /* not final */); 338 RTCritSectLeave(&pVM->pdm.s.MiscCritSect); 339 return rc; 340 } 340 341 341 342 /* next */ … … 343 344 pCur = pCur->pNext; 344 345 } 346 RTCritSectLeave(&pVM->pdm.s.MiscCritSect); 345 347 AssertReleaseMsgFailed(("pCritSect=%p wasn't found!\n", pCritSect)); 346 348 return VERR_INTERNAL_ERROR; 347 }348 349 350 /**351 * Process the critical sections queued for ring-3 'leave'.352 *353 * @param pVCpu The VMCPU handle.354 */355 VMMR3DECL(void) PDMR3CritSectFF(PVMCPU pVCpu)356 {357 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);358 359 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;360 for (RTUINT i = 0; i < c; i++)361 {362 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];363 #ifdef PDM_WITH_R3R0_CRIT_SECT364 int rc = pdmCritSectLeave(pCritSect);365 #else366 int rc = RTCritSectLeave(&pCritSect->s.Core);367 #endif368 LogFlow(("PDMR3CritSectFF: %p - %Rrc\n", pCritSect, rc));369 AssertRC(rc);370 }371 372 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;373 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);374 349 } 375 350 … … 382 357 * @returns VERR_NOT_OWNER if we're not the critsect owner. 383 358 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 359 * 384 360 * @param pCritSect The critical section. 385 361 * @param EventToSignal The semapore that should be signalled. -
trunk/src/VBox/VMM/PDMInternal.h
r20001 r20008 43 43 * @{ 44 44 */ 45 46 /** @def PDM_WITH_R3R0_CRIT_SECT 47 * Enables or disabled ring-3/ring-0 critical sections. */ 48 #if defined(DOXYGEN_RUNNING) || 1 49 # define PDM_WITH_R3R0_CRIT_SECT 50 #endif 51 45 52 46 53 /******************************************************************************* … … 227 234 } PDMCRITSECTINT; 228 235 typedef PDMCRITSECTINT *PPDMCRITSECTINT; 236 237 /** Indicates that the critical section is queued for unlock. 238 * PDMCritSectIsOwner and PDMCritSectIsOwned optimizations. */ 239 #define PDMCRITSECT_FLAGS_PENDING_UNLOCK RT_BIT_32(17) 229 240 230 241 … … 805 816 { 806 817 /** The number of entries in the apQueuedCritSectsLeaves table that's currnetly in use. */ 807 RTUINTcQueuedCritSectLeaves;808 RTUINTuPadding0; /**< Alignment padding.*/818 uint32_t cQueuedCritSectLeaves; 819 uint32_t uPadding0; /**< Alignment padding.*/ 809 820 /** Critical sections queued in RC/R0 because of contention preventing leave to complete. (R3 Ptrs) 810 821 * We will return to Ring-3 ASAP, so this queue doesn't have to be very long. */ … … 907 918 * the PIC, APIC, IOAPIC and PCI devices pluss some PDM functions. */ 908 919 PDMCRITSECT CritSect; 920 /** The PDM miscellancous lock. 921 * This is used to protect things like critsect init/delete that formerly was 922 * serialized by there only being one EMT. 923 */ 924 RTCRITSECT MiscCritSect; 909 925 910 926 /** Number of times a critical section leave requesed needed to be queued for ring-3 execution. */ -
trunk/src/VBox/VMM/VMM.cpp
r19732 r20008 1550 1550 static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu) 1551 1551 { 1552 /* We must also check for pending releases or else we can deadlock when acquiring a new lock here. 1553 * On return we go straight back to R0/GC. 1552 /* 1553 * We must also check for pending critsect exits or else we can deadlock 1554 * when entering other critsects here. 1554 1555 */ 1555 1556 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) 1556 PDM R3CritSectFF(pVCpu);1557 PDMCritSectFF(pVCpu); 1557 1558 1558 1559 switch (pVCpu->vmm.s.enmCallHostOperation) -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r19740 r20008 40 40 41 41 42 /******************************************************************************* 43 * Defined Constants And Macros * 44 *******************************************************************************/ 45 /** The number loops to spin for in ring-3. */ 46 #define PDMCRITSECT_SPIN_COUNT_R3 20 47 /** The number loops to spin for in ring-0. */ 48 #define PDMCRITSECT_SPIN_COUNT_R0 256 49 /** The number loops to spin for in the raw-mode context. */ 50 #define PDMCRITSECT_SPIN_COUNT_RC 256 51 52 /** @def PDMCRITSECT_STRICT 53 * Enables/disables PDM critsect strictness like deadlock detection. */ 54 #if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING) 55 # define PDMCRITSECT_STRICT 56 #endif 57 58 59 /** 60 * Gets the ring-3 native thread handle of the calling thread. 61 * 62 * @returns native thread handle (ring-3). 63 * @param pCritSect The critical section. This is used in R0 and RC. 64 */ 65 DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect) 66 { 67 #ifdef IN_RING3 68 NOREF(pCritSect); 69 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 70 #else 71 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic), 72 VERR_SEM_DESTROYED); 73 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); 74 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 75 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD); 76 #endif 77 return hNativeSelf; 78 } 79 80 81 /** 82 * Tail code called when we've wont the battle for the lock. 83 * 84 * @returns VINF_SUCCESS. 85 * 86 * @param pCritSect The critical section. 87 * @param hNativeSelf The native handle of this thread. 88 */ 89 DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf) 90 { 91 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner)); 92 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK)); 93 94 pCritSect->s.Core.cNestings = 1; 95 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf); 96 97 # if defined(PDMCRITSECT_STRICT) && defined(IN_RING3) 98 pCritSect->s.Core.Strict.pszEnterFile = NULL; 99 pCritSect->s.Core.Strict.u32EnterLine = 0; 100 pCritSect->s.Core.Strict.uEnterId = 0; 101 RTTHREAD hSelf = RTThreadSelf(); 102 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf); 103 RTThreadWriteLockInc(hSelf); 104 # endif 105 106 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 107 return VINF_SUCCESS; 108 } 109 110 111 #ifdef IN_RING3 112 /** 113 * Deals with the contended case in ring-3. 114 * 115 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED. 116 * @param pCritSect The critsect. 117 * @param hNativeSelf The native thread handle. 118 */ 119 static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf) 120 { 121 /* 122 * Start waiting. 123 */ 124 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0) 125 return pdmCritSectEnterFirst(pCritSect, hNativeSelf); 126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); 127 128 /* 129 * The wait loop. 130 */ 131 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession; 132 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; 133 # ifdef PDMCRITSECT_STRICT 134 RTTHREAD hSelf = RTThreadSelf(); 135 if (hSelf == NIL_RTTHREAD) 136 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf); 137 # endif 138 for (;;) 139 { 140 # ifdef PDMCRITSECT_STRICT 141 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0); 142 # endif 143 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT); 144 # ifdef PDMCRITSECT_STRICT 145 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT); 146 # endif 147 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC)) 148 return VERR_SEM_DESTROYED; 149 if (rc == VINF_SUCCESS) 150 return pdmCritSectEnterFirst(pCritSect, hNativeSelf); 151 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc)); 152 } 153 /* won't get here */ 154 } 155 #endif /* IN_RING3 */ 156 157 42 158 /** 43 159 * Enters a PDM critical section. … … 54 170 { 55 171 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */ 56 #ifdef IN_RING3 57 NOREF(rcBusy); 58 59 STAM_REL_STATS({if (pCritSect->s.Core.cLockers >= 0 && !RTCritSectIsOwner(&pCritSect->s.Core)) 60 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); }); 61 int rc = RTCritSectEnter(&pCritSect->s.Core); 62 STAM_STATS({ if (pCritSect->s.Core.cNestings == 1) STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); }); 63 return rc; 64 65 #else /* !IN_RING3 */ 66 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic), 67 VERR_SEM_DESTROYED); 68 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 69 Assert(pVM); 70 PVMCPU pVCpu = VMMGetCpu(pVM); 71 Assert(pVCpu); 72 73 /* 74 * Try to take the lock. 75 */ 172 173 /* 174 * If the critical section has already been destroyed, then inform the caller. 175 */ 176 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED); 177 178 /* 179 * See if we're lucky. 180 */ 181 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); 182 /* Not owned ... */ 76 183 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 77 { 78 pCritSect->s.Core.cNestings = 1; 79 Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD); 80 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread); 81 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 184 return pdmCritSectEnterFirst(pCritSect, hNativeSelf); 185 186 /* ... or nested. */ 187 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf) 188 { 189 ASMAtomicIncS32(&pCritSect->s.Core.cLockers); 190 pCritSect->s.Core.cNestings++; 191 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK; 82 192 return VINF_SUCCESS; 83 193 } 84 194 85 195 /* 86 * Nested? 87 */ 88 if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread) 89 { 90 pCritSect->s.Core.cNestings++; 91 ASMAtomicIncS32(&pCritSect->s.Core.cLockers); 92 return VINF_SUCCESS; 93 } 94 95 /* 96 * Failed. 97 */ 196 * Spin for a bit without incrementing the counter. 197 */ 198 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI 199 * cpu systems. */ 200 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_); 201 while (cSpinsLeft-- > 0) 202 { 203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf); 205 /** @todo need pause/nop instruction here! */ 206 } 207 208 #ifdef IN_RING3 209 /* 210 * Take the slow path. 211 */ 212 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf); 213 #else 214 /* 215 * Return busy. 216 */ 217 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); 98 218 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy)); 99 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);100 219 return rcBusy; 101 #endif /* !IN_RING3 */220 #endif 102 221 } 103 222 … … 115 234 VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect) 116 235 { 117 #ifdef IN_RING3 118 return RTCritSectTryEnter(&pCritSect->s.Core); 119 #else /* !IN_RING3 (same code as PDMCritSectEnter except for the log statement) */ 120 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic), 121 VERR_SEM_DESTROYED); 122 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 123 Assert(pVM); 124 PVMCPU pVCpu = VMMGetCpu(pVM); 125 Assert(pVCpu); 126 127 /* 128 * Try to take the lock. 129 */ 236 /* 237 * If the critical section has already been destroyed, then inform the caller. 238 */ 239 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED); 240 241 /* 242 * See if we're lucky. 243 */ 244 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); 245 /* Not owned ... */ 130 246 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 131 { 132 pCritSect->s.Core.cNestings = 1; 133 Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD); 134 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread); 135 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 247 return pdmCritSectEnterFirst(pCritSect, hNativeSelf); 248 249 /* ... or nested. */ 250 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf) 251 { 252 ASMAtomicIncS32(&pCritSect->s.Core.cLockers); 253 pCritSect->s.Core.cNestings++; 254 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK; 136 255 return VINF_SUCCESS; 137 256 } 138 257 139 /* 140 * Nested? 141 */ 142 if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread) 143 { 144 pCritSect->s.Core.cNestings++; 145 ASMAtomicIncS32(&pCritSect->s.Core.cLockers); 146 return VINF_SUCCESS; 147 } 148 149 /* 150 * Failed. 151 */ 258 /* no spinning */ 259 260 /* 261 * Return busy. 262 */ 263 #ifdef IN_RING3 264 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3); 265 #else 266 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); 267 #endif 152 268 LogFlow(("PDMCritSectTryEnter: locked\n")); 153 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);154 269 return VERR_SEM_BUSY; 155 #endif /* !IN_RING3 */156 270 } 157 271 … … 190 304 VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect) 191 305 { 192 #ifdef IN_RING3193 # ifdef VBOX_WITH_STATISTICS194 if (pCritSect->s.Core.cNestings == 1)195 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);196 # endif197 RTSEMEVENT EventToSignal = pCritSect->s.EventToSignal;198 if (RT_LIKELY(EventToSignal == NIL_RTSEMEVENT))199 {200 int rc = RTCritSectLeave(&pCritSect->s.Core);201 AssertRC(rc);202 }203 else204 {205 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;206 int rc = RTCritSectLeave(&pCritSect->s.Core);207 AssertRC(rc);208 LogBird(("signalling %#x\n", EventToSignal));209 rc = RTSemEventSignal(EventToSignal);210 AssertRC(rc);211 }212 213 #else /* !IN_RING3 */214 Assert(VALID_PTR(pCritSect));215 306 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC); 216 Assert(pCritSect->s.Core.cNestings > 0); 217 Assert(pCritSect->s.Core.cLockers >= 0); 218 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 219 Assert(pVM); 220 221 #ifdef VBOX_STRICT 222 PVMCPU pVCpu = VMMGetCpu(pVM); 223 Assert(pVCpu); 224 AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread)); 225 #endif 226 227 /* 228 * Deal with nested attempts first. 229 * (We're exploiting nesting to avoid queuing multiple R3 leaves for the same section.) 230 */ 231 pCritSect->s.Core.cNestings--; 232 if (pCritSect->s.Core.cNestings > 0) 233 { 307 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect)); 308 Assert(pCritSect->s.Core.cNestings >= 1); 309 310 /* 311 * Nested leave. 312 */ 313 if (pCritSect->s.Core.cNestings > 1) 314 { 315 pCritSect->s.Core.cNestings--; 234 316 ASMAtomicDecS32(&pCritSect->s.Core.cLockers); 235 317 return; 236 318 } 237 #ifndef VBOX_STRICT 238 PVMCPU pVCpu = VMMGetCpu(pVM); 239 #endif 319 320 #if defined(IN_RING3) /// @todo enable this later - || defined(IN_RING0) 321 /* 322 * Leave for real. 323 */ 324 /* update members. */ 325 # ifdef IN_RING3 326 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal; 327 pCritSect->s.EventToSignal = NIL_RTSEMEVENT; 328 # if defined(PDMCRITSECT_STRICT) 329 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD) 330 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner); 331 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD); 332 # endif 333 # endif 334 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK; 335 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD); 336 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); 337 pCritSect->s.Core.cNestings--; 338 339 /* stop and decrement lockers. */ 340 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); 341 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0) 342 { 343 /* Someone is waiting, wake up one of them. */ 344 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; 345 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession; 346 int rc = SUPSemEventSignal(pSession, hEvent); 347 AssertRC(rc); 348 } 349 350 # ifdef IN_RING3 351 /* Signal exit event. */ 352 if (hEventToSignal != NIL_RTSEMEVENT) 353 { 354 LogBird(("Signalling %#x\n", hEventToSignal)); 355 int rc = RTSemEventSignal(hEventToSignal); 356 AssertRC(rc); 357 } 358 # endif 359 360 #else /* IN_RC */ 240 361 /* 241 362 * Try leave it. … … 243 364 if (pCritSect->s.Core.cLockers == 0) 244 365 { 366 pCritSect->s.Core.cNestings = 0; 367 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner; 368 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK; 245 369 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); 370 246 371 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); 247 372 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0)) … … 249 374 250 375 /* darn, someone raced in on us. */ 251 Assert(pVCpu->hNativeThread); 252 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread); 376 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread); 253 377 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 254 } 255 pCritSect->s.Core.cNestings = 1; 378 pCritSect->s.Core.cNestings = 1; 379 } 380 pCritSect->s.Core.fFlags |= PDMCRITSECT_FLAGS_PENDING_UNLOCK; 256 381 257 382 /* 258 383 * Queue the request. 259 384 */ 260 RTUINT i = pVCpu->pdm.s.cQueuedCritSectLeaves++; 385 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); 386 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 387 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++; 261 388 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); 262 389 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves)); … … 266 393 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 267 394 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock); 268 #endif /* !IN_RING3 */ 269 } 395 #endif /* IN_RC */ 396 } 397 398 399 #if defined(IN_RING3) || defined(IN_RING0) 400 /** 401 * Process the critical sections queued for ring-3 'leave'. 402 * 403 * @param pVCpu The VMCPU handle. 404 */ 405 VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu) 406 { 407 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0); 408 409 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves; 410 for (RTUINT i = 0; i < c; i++) 411 { 412 # ifdef IN_RING3 413 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i]; 414 # else 415 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]); 416 # endif 417 418 PDMCritSectLeave(pCritSect); 419 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect)); 420 } 421 422 pVCpu->pdm.s.cQueuedCritSectLeaves = 0; 423 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT); 424 } 425 #endif /* IN_RING3 || IN_RING0 */ 270 426 271 427 … … 282 438 return RTCritSectIsOwner(&pCritSect->s.Core); 283 439 #else 284 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 285 PVMCPU pVCpu = VMMGetCpu(pVM); 286 Assert(pVM); Assert(pVCpu); 440 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); 441 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 287 442 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread) 288 443 return false; 289 290 /* Make sure the critical section is not scheduled to be unlocked. */ 291 if ( !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PDM_CRITSECT) 292 || RTCritSectGetRecursion(&pCritSect->s.Core) > 1) 293 return true; 294 295 for (unsigned i = 0; i < pVCpu->pdm.s.cQueuedCritSectLeaves; i++) 296 { 297 if (pVCpu->pdm.s.apQueuedCritSectsLeaves[i] == MMHyperCCToR3(pVM, (void *)pCritSect)) 298 return false; /* scheduled for release; pretend it's not owned by us. */ 299 } 300 return true; 301 #endif 302 } 444 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0; 445 #endif 446 } 447 303 448 304 449 /** … … 317 462 #else 318 463 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 319 Assert (pVM);464 AssertPtr(pVM); 320 465 Assert(idCpu < pVM->cCPUs); 321 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread; 322 #endif 323 } 466 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread 467 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0; 468 #endif 469 } 470 324 471 325 472 /** 326 473 * Checks if somebody currently owns the critical section. 327 * Note: This doesn't prove that no deadlocks will occur later on; it's just a debugging tool328 474 * 329 475 * @returns true if locked. 330 476 * @returns false if not locked. 477 * 331 478 * @param pCritSect The critical section. 332 */ 333 VMMDECL(bool) PDMCritSectIsLocked(PCPDMCRITSECT pCritSect) 334 { 335 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD; 336 } 479 * 480 * @remarks This doesn't prove that no deadlocks will occur later on; it's 481 * just a debugging tool 482 */ 483 VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect) 484 { 485 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD 486 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0; 487 } 488 337 489 338 490 /** … … 345 497 VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect) 346 498 { 347 return pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC;499 return RTCritSectIsInitialized(&pCritSect->s.Core); 348 500 } 349 501 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r19874 r20008 2033 2033 VMMDECL(bool) PGMIsLocked(PVM pVM) 2034 2034 { 2035 return PDMCritSectIsLocked(&pVM->pgm.s.CritSect); 2036 } 2035 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect); 2036 } 2037 2037 2038 2038 2039 /**
Note:
See TracChangeset
for help on using the changeset viewer.

