Changeset 25663 in vbox
- Timestamp:
- Jan 6, 2010 2:36:42 AM (15 years ago)
- File:
-
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp
r25660 r25663 40 40 #include <iprt/assert.h> 41 41 #include <iprt/err.h> 42 #include <iprt/lockvalidator.h> 42 43 #include <iprt/mem.h> 43 44 #include <iprt/thread.h> 44 45 45 46 #include "internal/magics.h" 46 47 #include <stdio.h>//debug 47 #include "internal/strict.h" 48 48 49 49 … … 51 51 * Structures and Typedefs * 52 52 *******************************************************************************/ 53 typedef struct RTSEM XROADSINTERNAL54 { 55 /** Magic value (RTSEM XROADS_MAGIC). */56 uint32_t volatile u32Magic;57 uint32_t u32Padding; /**< alignment padding.*/53 typedef struct RTSEMRWINTERNAL 54 { 55 /** Magic value (RTSEMRW_MAGIC). */ 56 uint32_t volatile u32Magic; 57 uint32_t u32Padding; /**< alignment padding.*/ 58 58 /* The state variable. 59 59 * All accesses are atomic and it bits are defined like this: 60 * Bits 0..14 - c NorthSouth.60 * Bits 0..14 - cReads. 61 61 * Bit 15 - Unused. 62 * Bits 16..31 - c EastWest.63 * Bit 31 - fDirection; 0= NS, 1=EW.64 * Bits 32..46 - cWaiting NS62 * Bits 16..31 - cWrites. - doesn't make sense here 63 * Bit 31 - fDirection; 0=Read, 1=Write. 64 * Bits 32..46 - cWaitingReads 65 65 * Bit 47 - Unused. 66 * Bits 48..62 - cWaiting EW66 * Bits 48..62 - cWaitingWrites 67 67 * Bit 63 - Unused. 68 68 */ 69 uint64_t volatile u64State; 70 /** Per-direction data. */ 71 struct 72 { 73 /** What the north/south bound threads are blocking on when waiting for 74 * east/west traffic to stop. */ 75 RTSEMEVENTMULTI hEvt; 76 /** Indicates whether the semaphore needs resetting. */ 77 bool volatile fNeedReset; 78 } aDirs[2]; 79 } RTSEMXROADSINTERNAL; 69 uint64_t volatile u64State; 70 /** The write owner. */ 71 RTNATIVETHREAD volatile hNativeWriter; 72 /** The number of reads made by the current writer. */ 73 uint32_t volatile cWriterReads; 74 /** The number of reads made by the current writer. */ 75 uint32_t volatile cWriteRecursions; 76 77 /** What the writer threads are blocking on. */ 78 RTSEMEVENT hEvtWrite; 79 /** What the read threads are blocking on when waiting for the writer to 80 * finish. */ 81 RTSEMEVENTMULTI hEvtRead; 82 /** Indicates whether hEvtRead needs resetting. */ 83 bool volatile fNeedReset; 84 85 #ifdef RTSEMRW_STRICT 86 /** The validator record for the writer. */ 87 RTLOCKVALRECEXCL ValidatorWrite; 88 /** The validator record for the readers. */ 89 RTLOCKVALRECSHRD ValidatorRead; 90 #endif 91 } RTSEMRWINTERNAL; 80 92 81 93 … … 83 95 * Defined Constants And Macros * 84 96 *******************************************************************************/ 85 #define RTSEMXROADS_CNT_BITS 15 86 #define RTSEMXROADS_CNT_MASK UINT64_C(0x00007fff) 87 88 #define RTSEMXROADS_CNT_NS_SHIFT 0 89 #define RTSEMXROADS_CNT_NS_MASK (RTSEMXROADS_CNT_MASK << RTSEMXROADS_CNT_NS_SHIFT) 90 #define RTSEMXROADS_CNT_EW_SHIFT 16 91 #define RTSEMXROADS_CNT_EW_MASK (RTSEMXROADS_CNT_MASK << RTSEMXROADS_CNT_EW_SHIFT) 92 #define RTSEMXROADS_DIR_SHIFT 31 93 #define RTSEMXROADS_DIR_MASK RT_BIT_64(RTSEMXROADS_DIR_SHIFT) 94 95 #define RTSEMXROADS_WAIT_CNT_NS_SHIFT 32 96 #define RTSEMXROADS_WAIT_CNT_NS_MASK (RTSEMXROADS_CNT_MASK << RTSEMXROADS_WAIT_CNT_NS_SHIFT) 97 #define RTSEMXROADS_WAIT_CNT_EW_SHIFT 48 98 #define RTSEMXROADS_WAIT_CNT_EW_MASK (RTSEMXROADS_CNT_MASK << RTSEMXROADS_WAIT_CNT_EW_SHIFT) 99 100 101 #if 0 /* debugging aid */ 102 static uint32_t volatile g_iHist = 0; 103 static struct 104 { 105 void *tsc; 106 RTTHREAD hThread; 107 uint32_t line; 108 bool fDir; 109 void *u64State; 110 void *u64OldState; 111 bool fNeedResetNS; 112 bool fNeedResetEW; 113 const char *psz; 114 } g_aHist[256]; 115 116 # define add_hist(ns, os, dir, what) \ 117 do \ 118 { \ 119 uint32_t i = (ASMAtomicIncU32(&g_iHist) - 1) % RT_ELEMENTS(g_aHist);\ 120 g_aHist[i].line = __LINE__; \ 121 g_aHist[i].u64OldState = (void *)(os); \ 122 g_aHist[i].u64State = (void *)(ns); \ 123 g_aHist[i].fDir = (dir); \ 124 g_aHist[i].psz = (what); \ 125 g_aHist[i].fNeedResetNS = pThis->aDirs[0].fNeedReset; \ 126 g_aHist[i].fNeedResetEW = pThis->aDirs[1].fNeedReset; \ 127 g_aHist[i].hThread = RTThreadSelf(); \ 128 g_aHist[i].tsc = (void *)ASMReadTSC(); \ 129 } while (0) 130 131 # undef DECL_FORCE_INLINE 132 # define DECL_FORCE_INLINE(type) static type 133 #else 134 # define add_hist(ns, os, dir, what) do { } while (0) 135 #endif 136 137 138 RTDECL(int) RTSemXRoadsCreate(PRTSEMXROADS phXRoads) 139 { 140 RTSEMXROADSINTERNAL *pThis = (RTSEMXROADSINTERNAL *)RTMemAlloc(sizeof(*pThis)); 97 #define RTSEMRW_CNT_BITS 15 98 #define RTSEMRW_CNT_MASK UINT64_C(0x00007fff) 99 100 #define RTSEMRW_CNT_RD_SHIFT 0 101 #define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT) 102 #define RTSEMRW_CNT_WR_SHIFT 16 103 #define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT) 104 #define RTSEMRW_DIR_SHIFT 31 105 #define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT) 106 #define RTSEMRW_DIR_READ UINT64_C(0) 107 #define RTSEMRW_DIR_WRITE UINT64_C(1) 108 109 #define RTSEMRW_WAIT_CNT_RD_SHIFT 32 110 #define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT) 111 //#define RTSEMRW_WAIT_CNT_WR_SHIFT 48 112 //#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT) 113 114 115 116 RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem) 117 { 118 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis)); 141 119 if (!pThis) 142 120 return VERR_NO_MEMORY; 143 121 144 int rc = RTSemEventMultiCreate(&pThis-> aDirs[0].hEvt);122 int rc = RTSemEventMultiCreate(&pThis->hEvtRead); 145 123 if (RT_SUCCESS(rc)) 146 124 { 147 rc = RTSemEvent MultiCreate(&pThis->aDirs[1].hEvt);125 rc = RTSemEventCreate(&pThis->hEvtWrite); 148 126 if (RT_SUCCESS(rc)) 149 127 { 150 pThis->u32Magic = RTSEMXROADS_MAGIC; 151 pThis->u32Padding = 0; 152 pThis->u64State = 0; 153 pThis->aDirs[0].fNeedReset = false; 154 pThis->aDirs[1].fNeedReset = false; 155 *phXRoads = pThis; 128 pThis->u32Magic = RTSEMRW_MAGIC; 129 pThis->u32Padding = 0; 130 pThis->u64State = 0; 131 pThis->hNativeWriter = NIL_RTNATIVETHREAD; 132 pThis->cWriterReads = 0; 133 pThis->cWriteRecursions = 0; 134 pThis->fNeedReset = false; 135 #ifdef RTSEMRW_STRICT 136 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis); 137 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis, false /*fSignaller*/); 138 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core); 139 #endif 140 141 *phRWSem = pThis; 156 142 return VINF_SUCCESS; 157 143 } 158 RTSemEventMultiDestroy(pThis-> aDirs[0].hEvt);144 RTSemEventMultiDestroy(pThis->hEvtRead); 159 145 } 160 146 return rc; … … 162 148 163 149 164 RTDECL(int) RTSem XRoadsDestroy(RTSEMXROADS hXRoads)150 RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem) 165 151 { 166 152 /* 167 153 * Validate input. 168 154 */ 169 RTSEM XROADSINTERNAL *pThis = hXRoads;170 if (pThis == NIL_RTSEM XROADS)155 RTSEMRWINTERNAL *pThis = hRWSem; 156 if (pThis == NIL_RTSEMRW) 171 157 return VINF_SUCCESS; 172 158 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 173 AssertReturn(pThis->u32Magic == RTSEM XROADS_MAGIC, VERR_INVALID_HANDLE);174 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEM XROADS_CNT_NS_MASK | RTSEMXROADS_CNT_EW_MASK)));159 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 160 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK))); 175 161 176 162 /* 177 163 * Invalidate the object and free up the resources. 178 164 */ 179 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMXROADS_MAGIC_DEAD, RTSEMXROADS_MAGIC), VERR_INVALID_HANDLE);180 181 RTSEMEVENTMULTI hEvt ;182 ASMAtomicXchgHandle(&pThis-> aDirs[0].hEvt, NIL_RTSEMEVENTMULTI, &hEvt);183 int rc = RTSemEventMultiDestroy(hEvt );165 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE); 166 167 RTSEMEVENTMULTI hEvtRead; 168 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead); 169 int rc = RTSemEventMultiDestroy(hEvtRead); 184 170 AssertRC(rc); 185 171 186 ASMAtomicXchgHandle(&pThis->aDirs[1].hEvt, NIL_RTSEMEVENTMULTI, &hEvt); 187 rc = RTSemEventMultiDestroy(hEvt); 172 RTSEMEVENT hEvtWrite; 173 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite); 174 rc = RTSemEventDestroy(hEvtWrite); 188 175 AssertRC(rc); 189 176 177 #ifdef RTSEMRW_STRICT 178 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead); 179 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite); 180 #endif 190 181 RTMemFree(pThis); 191 182 return VINF_SUCCESS; … … 193 184 194 185 195 /** 196 * Internal worker for RTSemXRoadsNSEnter and RTSemXRoadsEWEnter. 197 * 198 * @returns IPRT status code. 199 * @param pThis The semaphore instace. 200 * @param fDir The direction. 201 * @param uCountShift The shift count for getting the count. 202 * @param fCountMask The mask for getting the count. 203 * @param uWaitCountShift The shift count for getting the wait count. 204 * @param fWaitCountMask The mask for getting the wait count. 205 */ 206 DECL_FORCE_INLINE(int) rtSemXRoadsEnter(RTSEMXROADSINTERNAL *pThis, uint64_t fDir, 207 uint64_t uCountShift, uint64_t fCountMask, 208 uint64_t uWaitCountShift, uint64_t fWaitCountMask) 209 { 210 uint64_t u64OldState; 211 uint64_t u64State; 212 213 u64State = ASMAtomicReadU64(&pThis->u64State); 214 u64OldState = u64State; 215 add_hist(u64State, u64OldState, fDir, "enter"); 186 static int rtSemRWRequestRead(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) 187 { 188 /* 189 * Validate input. 190 */ 191 RTSEMRWINTERNAL *pThis = hRWSem; 192 if (pThis == NIL_RTSEMRW) 193 return VINF_SUCCESS; 194 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 195 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 196 197 #ifdef RTSEMRW_STRICT 198 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); 199 if (cMillies > 0) 200 { 201 int rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 202 if (RT_FAILURE(rc9)) 203 return rc9; 204 } 205 #endif 206 207 /* 208 * Get cracking... 209 */ 210 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); 211 uint64_t u64OldState = u64State; 216 212 217 213 for (;;) 218 214 { 219 if ((u64State & RTSEM XROADS_DIR_MASK) == (fDir << RTSEMXROADS_DIR_SHIFT))215 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)) 220 216 { 221 217 /* It flows in the right direction, try follow it before it changes. */ 222 uint64_t c = (u64State & fCountMask) >> uCountShift;218 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; 223 219 c++; 224 Assert(c < 8*_1K);225 u64State &= ~ fCountMask;226 u64State |= c << uCountShift;220 Assert(c < RTSEMRW_CNT_MASK / 2); 221 u64State &= ~RTSEMRW_CNT_RD_MASK; 222 u64State |= c << RTSEMRW_CNT_RD_SHIFT; 227 223 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 228 224 { 229 add_hist(u64State, u64OldState, fDir, "enter-simple"); 225 #ifdef RTSEMRW_STRICT 226 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 227 #endif 230 228 break; 231 229 } 232 230 } 233 else if ((u64State & (RTSEM XROADS_CNT_NS_MASK | RTSEMXROADS_CNT_EW_MASK)) == 0)231 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0) 234 232 { 235 233 /* Wrong direction, but we're alone here and can simply try switch the direction. */ 236 u64State &= ~(RTSEM XROADS_CNT_NS_MASK | RTSEMXROADS_CNT_EW_MASK | RTSEMXROADS_DIR_MASK);237 u64State |= (UINT64_C(1) << uCountShift) | (fDir << RTSEMXROADS_DIR_SHIFT);234 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK); 235 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT); 238 236 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 239 237 { 240 Assert(!pThis->aDirs[fDir].fNeedReset); 241 add_hist(u64State, u64OldState, fDir, "enter-switch"); 238 Assert(!pThis->fNeedReset); 239 #ifdef RTSEMRW_STRICT 240 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 241 #endif 242 242 break; 243 243 } … … 245 245 else 246 246 { 247 /* Is the writer perhaps doing a read recursion? */ 248 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 249 RTNATIVETHREAD hNativeWriter; 250 ASMAtomicReadHandle(&pThis->hNativeWriter, &hNativeWriter); 251 if (hNativeSelf == hNativeWriter) 252 { 253 #ifdef RTSEMRW_STRICT 254 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos); 255 if (RT_FAILURE(rc9)) 256 return rc9; 257 #endif 258 Assert(pThis->cWriterReads < UINT32_MAX / 2); 259 ASMAtomicIncU32(&pThis->cWriterReads); 260 return VINF_SUCCESS; /* don't break! */ 261 } 262 263 /* If the timeout is 0, return already. */ 264 if (!cMillies) 265 return VERR_TIMEOUT; 266 247 267 /* Add ourselves to the queue and wait for the direction to change. */ 248 uint64_t c = (u64State & fCountMask) >> uCountShift;268 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; 249 269 c++; 250 Assert(c < RTSEM XROADS_CNT_MASK / 2);251 252 uint64_t cWait = (u64State & fWaitCountMask) >> uWaitCountShift;270 Assert(c < RTSEMRW_CNT_MASK / 2); 271 272 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; 253 273 cWait++; 254 274 Assert(cWait <= c); 255 Assert(cWait < RTSEM XROADS_CNT_MASK / 2);256 257 u64State &= ~( fCountMask | fWaitCountMask);258 u64State |= (c << uCountShift) | (cWait << uWaitCountShift);275 Assert(cWait < RTSEMRW_CNT_MASK / 2); 276 277 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK); 278 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT); 259 279 260 280 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 261 281 { 262 add_hist(u64State, u64OldState, fDir, "enter-wait");263 282 for (uint32_t iLoop = 0; ; iLoop++) 264 283 { 265 int rc = RTSemEventMultiWait(pThis->aDirs[fDir].hEvt, RT_INDEFINITE_WAIT); 266 AssertRCReturn(rc, rc); 267 268 if (pThis->u32Magic != RTSEMXROADS_MAGIC) 269 return VERR_SEM_DESTROYED; 270 271 Assert(pThis->aDirs[fDir].fNeedReset); 284 int rc; 285 #ifdef RTSEMRW_STRICT 286 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, 287 RTTHREADSTATE_RW_READ, false); 288 if (RT_SUCCESS(rc)) 289 #else 290 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 291 #endif 292 { 293 if (fInterruptible) 294 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies); 295 else 296 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies); 297 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); 298 if (pThis->u32Magic != RTSEMRW_MAGIC) 299 return VERR_SEM_DESTROYED; 300 } 301 if (RT_FAILURE(rc)) 302 { 303 /* Decrement the counts and return the error. */ 304 for (;;) 305 { 306 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State); 307 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0); 308 c--; 309 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0); 310 cWait--; 311 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK); 312 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT); 313 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 314 break; 315 } 316 return rc; 317 } 318 319 Assert(pThis->fNeedReset); 272 320 u64State = ASMAtomicReadU64(&pThis->u64State); 273 add_hist(u64State, u64OldState, fDir, "enter-wakeup"); 274 if ((u64State & RTSEMXROADS_DIR_MASK) == (fDir << RTSEMXROADS_DIR_SHIFT)) 321 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)) 275 322 break; 276 323 AssertMsg(iLoop < 1, ("%u\n", iLoop)); … … 282 329 u64OldState = u64State; 283 330 284 cWait = (u64State & fWaitCountMask) >> uWaitCountShift;331 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; 285 332 Assert(cWait > 0); 286 333 cWait--; 287 u64State &= ~ fWaitCountMask;288 u64State |= cWait << uWaitCountShift;334 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK; 335 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT; 289 336 290 337 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) … … 292 339 if (cWait == 0) 293 340 { 294 if (ASMAtomicXchgBool(&pThis-> aDirs[fDir].fNeedReset, false))341 if (ASMAtomicXchgBool(&pThis->fNeedReset, false)) 295 342 { 296 add_hist(u64State, u64OldState, fDir, fDir ? "enter-reset-EW" : "enter-reset-NS"); 297 int rc = RTSemEventMultiReset(pThis->aDirs[fDir].hEvt); 343 int rc = RTSemEventMultiReset(pThis->hEvtRead); 298 344 AssertRCReturn(rc, rc); 299 345 } 300 else301 add_hist(u64State, u64OldState, fDir, "enter-dec-no-need");302 346 } 303 347 break; … … 305 349 u64State = ASMAtomicReadU64(&pThis->u64State); 306 350 } 351 352 #ifdef RTSEMRW_STRICT 353 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 354 #endif 307 355 break; 308 356 } 309 310 add_hist(u64State, u64OldState, fDir, "enter-wait-failed"); 311 } 312 313 if (pThis->u32Magic != RTSEMXROADS_MAGIC) 357 } 358 359 if (pThis->u32Magic != RTSEMRW_MAGIC) 314 360 return VERR_SEM_DESTROYED; 315 361 … … 320 366 321 367 /* got it! */ 322 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEM XROADS_DIR_MASK) == (fDir << RTSEMXROADS_DIR_SHIFT));368 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)); 323 369 return VINF_SUCCESS; 324 } 325 326 327 /** 328 * Internal worker for RTSemXRoadsNSLeave and RTSemXRoadsEWLeave. 329 * 330 * @returns IPRT status code. 331 * @param pThis The semaphore instace. 332 * @param fDir The direction. 333 * @param uCountShift The shift count for getting the count. 334 * @param fCountMask The mask for getting the count. 335 */ 336 DECL_FORCE_INLINE(int) rtSemXRoadsLeave(RTSEMXROADSINTERNAL *pThis, uint64_t fDir, uint64_t uCountShift, uint64_t fCountMask) 337 { 370 371 } 372 373 374 #undef RTSemRWRequestRead 375 RTDECL(int) RTSemRWRequestRead(RTSEMRW RWSem, unsigned cMillies) 376 { 377 #ifndef RTSEMRW_STRICT 378 return rtSemRWRequestRead(RWSem, cMillies, false, NULL); 379 #else 380 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 381 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos); 382 #endif 383 } 384 RT_EXPORT_SYMBOL(RTSemRWRequestRead); 385 386 387 RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL) 388 { 389 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); 390 return rtSemRWRequestRead(RWSem, cMillies, false, &SrcPos); 391 } 392 RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug); 393 394 395 #undef RTSemRWRequestReadNoResume 396 RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW RWSem, unsigned cMillies) 397 { 398 #ifndef RTSEMRW_STRICT 399 return rtSemRWRequestRead(RWSem, cMillies, true, NULL); 400 #else 401 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 402 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos); 403 #endif 404 } 405 RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume); 406 407 408 RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL) 409 { 410 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); 411 return rtSemRWRequestRead(RWSem, cMillies, true, &SrcPos); 412 } 413 RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug); 414 415 416 417 RTDECL(int) RTSemRWReleaseRead(RTSEMRW RWSem) 418 { 419 /* 420 * Validate handle. 421 */ 422 RTSEMRWINTERNAL *pThis = RWSem; 423 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 424 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 425 426 /* 427 * Check the direction and take action accordingly. 428 */ 429 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); 430 uint64_t u64OldState = u64State; 431 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)) 432 { 433 #ifdef RTSEMRW_STRICT 434 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD); 435 if (RT_FAILURE(rc9)) 436 return rc9; 437 #endif 438 for (;;) 439 { 440 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; 441 AssertReturn(c > 0, VERR_NOT_OWNER); 442 c--; 443 444 if ( c > 0 445 || (u64State & RTSEMRW_CNT_RD_MASK) == 0) 446 { 447 /* Don't change the direction. */ 448 u64State &= ~RTSEMRW_CNT_RD_MASK; 449 u64State |= c << RTSEMRW_CNT_RD_SHIFT; 450 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 451 break; 452 } 453 else 454 { 455 /* Reverse the direction and signal the reader threads. */ 456 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK); 457 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT; 458 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 459 { 460 int rc = RTSemEventSignal(pThis->hEvtWrite); 461 AssertRC(rc); 462 break; 463 } 464 } 465 466 ASMNopPause(); 467 u64State = ASMAtomicReadU64(&pThis->u64State); 468 u64OldState = u64State; 469 } 470 } 471 else 472 { 473 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 474 RTNATIVETHREAD hNativeWriter; 475 ASMAtomicReadHandle(&pThis->hNativeWriter, &hNativeWriter); 476 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER); 477 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER); 478 #ifdef RTSEMRW_STRICT 479 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core); 480 if (RT_FAILURE(rc)) 481 return rc; 482 #endif 483 ASMAtomicDecU32(&pThis->cWriterReads); 484 } 485 486 return VINF_SUCCESS; 487 } 488 RT_EXPORT_SYMBOL(RTSemRWReleaseRead); 489 490 491 DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, unsigned cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) 492 { 493 /* 494 * Validate input. 495 */ 496 RTSEMRWINTERNAL *pThis = hRWSem; 497 if (pThis == NIL_RTSEMRW) 498 return VINF_SUCCESS; 499 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 500 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 501 502 #ifdef RTSEMRW_STRICT 503 RTTHREAD hThreadSelf = NIL_RTTHREAD; 504 if (cMillies) 505 { 506 hThreadSelf = RTThreadSelfAutoAdopt(); 507 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos); 508 if (RT_FAILURE(rc9)) 509 return rc9; 510 } 511 #endif 512 513 /* 514 * Check if we're already the owner and just recursing. 515 */ 516 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 517 RTNATIVETHREAD hNativeWriter; 518 ASMAtomicReadHandle(&pThis->hNativeWriter, &hNativeWriter); 519 if (hNativeSelf == hNativeWriter) 520 { 521 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)); 522 #ifdef RTSEMRW_STRICT 523 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos); 524 if (RT_FAILURE(rc9)) 525 return rc9; 526 #endif 527 Assert(pThis->cWriteRecursions < UINT32_MAX / 2); 528 ASMAtomicIncU32(&pThis->cWriteRecursions); 529 return VINF_SUCCESS; 530 } 531 532 /* 533 * Get cracking. 534 */ 535 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); 536 uint64_t u64OldState = u64State; 537 338 538 for (;;) 339 539 { 340 uint64_t u64OldState; 341 uint64_t u64State; 342 uint64_t c; 343 540 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT) 541 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0) 542 { 543 /* It flows in the right direction, try follow it before it changes. */ 544 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; 545 c++; 546 Assert(c < RTSEMRW_CNT_MASK / 2); 547 u64State &= ~RTSEMRW_CNT_WR_MASK; 548 u64State |= c << RTSEMRW_CNT_WR_SHIFT; 549 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 550 break; 551 } 552 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0) 553 { 554 /* Wrong direction, but we're alone here and can simply try switch the direction. */ 555 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK); 556 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT); 557 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 558 break; 559 } 560 else if (!cMillies) 561 /* Wrong direction and we're not supposed to wait, just return. */ 562 return VERR_TIMEOUT; 563 else 564 { 565 /* Add ourselves to the write count and break out to do the wait. */ 566 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; 567 c++; 568 Assert(c < RTSEMRW_CNT_MASK / 2); 569 u64State &= ~RTSEMRW_CNT_WR_MASK; 570 u64State |= c << RTSEMRW_CNT_WR_SHIFT; 571 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 572 break; 573 } 574 575 if (pThis->u32Magic != RTSEMRW_MAGIC) 576 return VERR_SEM_DESTROYED; 577 578 ASMNopPause(); 344 579 u64State = ASMAtomicReadU64(&pThis->u64State); 345 580 u64OldState = u64State; 346 347 /* The direction cannot change until we've left or we'll crash. */ 348 Assert((u64State & RTSEMXROADS_DIR_MASK) == (fDir << RTSEMXROADS_DIR_SHIFT)); 349 350 c = (u64State & fCountMask) >> uCountShift; 351 Assert(c > 0); 352 c--; 353 354 if ( c > 0 355 || (u64State & ((RTSEMXROADS_CNT_NS_MASK | RTSEMXROADS_CNT_EW_MASK) & ~fCountMask)) == 0) 356 { 357 /* We're not the last one across or there aren't any one waiting in the other direction. */ 358 u64State &= ~fCountMask; 359 u64State |= c << uCountShift; 360 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 361 { 362 add_hist(u64State, u64OldState, fDir, "leave-simple"); 363 return VINF_SUCCESS; 364 } 365 } 366 else 367 { 368 /* Reverse the direction and signal the threads in the other direction. */ 369 u64State &= ~(fCountMask | RTSEMXROADS_DIR_MASK); 370 u64State |= (uint64_t)!fDir << RTSEMXROADS_DIR_SHIFT; 371 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 372 { 373 add_hist(u64State, u64OldState, fDir, fDir ? "leave-signal-NS" : "leave-signal-EW"); 374 Assert(!pThis->aDirs[!fDir].fNeedReset); 375 ASMAtomicWriteBool(&pThis->aDirs[!fDir].fNeedReset, true); 376 int rc = RTSemEventMultiSignal(pThis->aDirs[!fDir].hEvt); 377 AssertRC(rc); 378 return VINF_SUCCESS; 379 } 380 } 381 382 ASMNopPause(); 383 if (pThis->u32Magic != RTSEMXROADS_MAGIC) 384 return VERR_SEM_DESTROYED; 385 } 386 } 387 388 389 RTDECL(int) RTSemXRoadsNSEnter(RTSEMXROADS hXRoads) 581 } 582 583 /* 584 * If we're in write mode now try grab the ownership. Play fair if there 585 * are threads already waiting. 586 */ 587 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT) 588 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1 589 || cMillies == 0); 590 if (fDone) 591 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone); 592 if (!fDone) 593 { 594 /* 595 * Wait for our turn. 596 */ 597 for (uint32_t iLoop = 0; ; iLoop++) 598 { 599 int rc; 600 #ifdef RTSEMRW_STRICT 601 if (cMillies) 602 { 603 if (hThreadSelf == NIL_RTTHREAD) 604 hThreadSelf = RTThreadSelfAutoAdopt(); 605 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, 606 RTTHREADSTATE_RW_WRITE, false); 607 } 608 else 609 rc = VINF_SUCCESS; 610 if (RT_SUCCESS(rc)) 611 #else 612 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); 613 #endif 614 { 615 if (fInterruptible) 616 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies); 617 else 618 rc = RTSemEventWait(pThis->hEvtWrite, cMillies); 619 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); 620 if (pThis->u32Magic != RTSEMRW_MAGIC) 621 return VERR_SEM_DESTROYED; 622 } 623 if (RT_FAILURE(rc)) 624 { 625 /* Decrement the counts and return the error. */ 626 for (;;) 627 { 628 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State); 629 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0); 630 c--; 631 u64State &= ~RTSEMRW_CNT_WR_MASK; 632 u64State |= c << RTSEMRW_CNT_WR_SHIFT; 633 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 634 break; 635 } 636 return rc; 637 } 638 639 u64State = ASMAtomicReadU64(&pThis->u64State); 640 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)) 641 { 642 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone); 643 if (fDone) 644 break; 645 } 646 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */ 647 } 648 } 649 650 /* 651 * Got it! 652 */ 653 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)); 654 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1); 655 Assert(pThis->cWriterReads == 0); 656 #ifdef RTSEMRW_STRICT 657 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true); 658 #endif 659 660 return VINF_SUCCESS; 661 } 662 663 664 #undef RTSemRWRequestWrite 665 RTDECL(int) RTSemRWRequestWrite(RTSEMRW RWSem, unsigned cMillies) 666 { 667 #ifndef RTSEMRW_STRICT 668 return rtSemRWRequestWrite(RWSem, cMillies, false, NULL); 669 #else 670 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 671 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos); 672 #endif 673 } 674 RT_EXPORT_SYMBOL(RTSemRWRequestWrite); 675 676 677 RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL) 678 { 679 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); 680 return rtSemRWRequestWrite(RWSem, cMillies, false, &SrcPos); 681 } 682 RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug); 683 684 685 #undef RTSemRWRequestWriteNoResume 686 RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW RWSem, unsigned cMillies) 687 { 688 #ifndef RTSEMRW_STRICT 689 return rtSemRWRequestWrite(RWSem, cMillies, true, NULL); 690 #else 691 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 692 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos); 693 #endif 694 } 695 RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume); 696 697 698 RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW RWSem, unsigned cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL) 699 { 700 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); 701 return rtSemRWRequestWrite(RWSem, cMillies, true, &SrcPos); 702 } 703 RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug); 704 705 706 RTDECL(int) RTSemRWReleaseWrite(RTSEMRW RWSem) 707 { 708 709 /* 710 * Validate handle. 711 */ 712 struct RTSEMRWINTERNAL *pThis = RWSem; 713 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 714 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 715 716 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 717 RTNATIVETHREAD hNativeWriter; 718 ASMAtomicReadHandle(&pThis->hNativeWriter, &hNativeWriter); 719 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER); 720 721 /* 722 * Unwind a recursion. 723 */ 724 if (pThis->cWriteRecursions == 1) 725 { 726 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */ 727 #ifdef RTSEMRW_STRICT 728 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true); 729 if (RT_FAILURE(rc9)) 730 return rc9; 731 #endif 732 /* 733 * Update the state. 734 */ 735 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0); 736 /** @todo validate order. */ 737 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD); 738 739 for (;;) 740 { 741 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); 742 uint64_t u64OldState = u64State; 743 744 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; 745 Assert(c > 0); 746 c--; 747 748 if ( c > 0 749 || (u64State & RTSEMRW_CNT_RD_MASK) == 0) 750 { 751 /* Don't change the direction, wait up the next writer if any. */ 752 u64State &= ~RTSEMRW_CNT_WR_MASK; 753 u64State |= c << RTSEMRW_CNT_WR_SHIFT; 754 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 755 { 756 if (c > 0) 757 { 758 int rc = RTSemEventSignal(pThis->hEvtWrite); 759 AssertRC(rc); 760 } 761 break; 762 } 763 } 764 else 765 { 766 /* Reverse the direction and signal the reader threads. */ 767 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK); 768 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT; 769 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) 770 { 771 Assert(!pThis->fNeedReset); 772 ASMAtomicWriteBool(&pThis->fNeedReset, true); 773 int rc = RTSemEventMultiSignal(pThis->hEvtRead); 774 AssertRC(rc); 775 break; 776 } 777 } 778 779 ASMNopPause(); 780 if (pThis->u32Magic != RTSEMRW_MAGIC) 781 return VERR_SEM_DESTROYED; 782 } 783 } 784 else 785 { 786 Assert(pThis->cWriteRecursions != 0); 787 #ifdef RTSEMRW_STRICT 788 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite); 789 if (RT_FAILURE(rc9)) 790 return rc9; 791 #endif 792 ASMAtomicDecU32(&pThis->cWriteRecursions); 793 } 794 795 return VINF_SUCCESS; 796 } 797 RT_EXPORT_SYMBOL(RTSemRWReleaseWrite); 798 799 800 RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW RWSem) 801 { 802 /* 803 * Validate handle. 804 */ 805 struct RTSEMRWINTERNAL *pThis = RWSem; 806 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 807 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 808 809 /* 810 * Check ownership. 811 */ 812 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); 813 RTNATIVETHREAD hNativeWriter; 814 ASMAtomicReadHandle(&pThis->hNativeWriter, &hNativeWriter); 815 return hNativeWriter == hNativeSelf; 816 } 817 RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner); 818 819 820 RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW RWSem) 821 { 822 /* 823 * Validate handle. 824 */ 825 struct RTSEMRWINTERNAL *pThis = RWSem; 826 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 827 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 828 829 /* 830 * Return the requested data. 831 */ 832 return pThis->cWriteRecursions; 833 } 834 RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion); 835 836 837 RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW RWSem) 838 { 839 /* 840 * Validate handle. 841 */ 842 struct RTSEMRWINTERNAL *pThis = RWSem; 843 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 844 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); 845 846 /* 847 * Return the requested data. 848 */ 849 return pThis->cWriterReads; 850 } 851 RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion); 852 853 854 RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW RWSem) 390 855 { 391 856 /* 392 857 * Validate input. 393 858 */ 394 RTSEMXROADSINTERNAL *pThis = hXRoads; 395 if (pThis == NIL_RTSEMXROADS) 396 return VINF_SUCCESS; 397 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 398 AssertReturn(pThis->u32Magic == RTSEMXROADS_MAGIC, VERR_INVALID_HANDLE); 399 400 return rtSemXRoadsEnter(pThis, 0, RTSEMXROADS_CNT_NS_SHIFT, RTSEMXROADS_CNT_NS_MASK, RTSEMXROADS_WAIT_CNT_NS_SHIFT, RTSEMXROADS_WAIT_CNT_NS_MASK); 401 } 402 403 404 RTDECL(int) RTSemXRoadsNSLeave(RTSEMXROADS hXRoads) 405 { 406 /* 407 * Validate input. 408 */ 409 RTSEMXROADSINTERNAL *pThis = hXRoads; 410 if (pThis == NIL_RTSEMXROADS) 411 return VINF_SUCCESS; 412 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 413 AssertReturn(pThis->u32Magic == RTSEMXROADS_MAGIC, VERR_INVALID_HANDLE); 414 415 return rtSemXRoadsLeave(pThis, 0, RTSEMXROADS_CNT_NS_SHIFT, RTSEMXROADS_CNT_NS_MASK); 416 } 417 418 419 RTDECL(int) RTSemXRoadsEWEnter(RTSEMXROADS hXRoads) 420 { 421 /* 422 * Validate input. 423 */ 424 RTSEMXROADSINTERNAL *pThis = hXRoads; 425 if (pThis == NIL_RTSEMXROADS) 426 return VINF_SUCCESS; 427 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 428 AssertReturn(pThis->u32Magic == RTSEMXROADS_MAGIC, VERR_INVALID_HANDLE); 429 430 return rtSemXRoadsEnter(pThis, 1, RTSEMXROADS_CNT_EW_SHIFT, RTSEMXROADS_CNT_EW_MASK, RTSEMXROADS_WAIT_CNT_EW_SHIFT, RTSEMXROADS_WAIT_CNT_EW_MASK); 431 } 432 433 434 RTDECL(int) RTSemXRoadsEWLeave(RTSEMXROADS hXRoads) 435 { 436 /* 437 * Validate input. 438 */ 439 RTSEMXROADSINTERNAL *pThis = hXRoads; 440 if (pThis == NIL_RTSEMXROADS) 441 return VINF_SUCCESS; 442 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 443 AssertReturn(pThis->u32Magic == RTSEMXROADS_MAGIC, VERR_INVALID_HANDLE); 444 445 return rtSemXRoadsLeave(pThis, 1, RTSEMXROADS_CNT_EW_SHIFT, RTSEMXROADS_CNT_EW_MASK); 446 } 447 859 struct RTSEMRWINTERNAL *pThis = RWSem; 860 AssertPtrReturn(pThis, 0); 861 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC, 862 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), 863 0); 864 865 /* 866 * Return the requested data. 867 */ 868 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); 869 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)) 870 return 0; 871 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; 872 } 873 RT_EXPORT_SYMBOL(RTSemRWGetReadCount); 874
Note:
See TracChangeset
for help on using the changeset viewer.

