Changeset 92777 in vbox
- Timestamp:
- Dec 7, 2021 1:25:16 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r3/linux/semevent-linux.cpp
r82968 r92777 1 1 /* $Id$ */ 2 2 /** @file 3 * IPRT - Event Semaphore, Linux (2.6. x+).3 * IPRT - Event Semaphore, Linux (2.6.0 and later). 4 4 */ 5 5 … … 37 37 * against glibc < 2.6. 38 38 */ 39 # include "../posix/semevent-posix.cpp"39 # include "../posix/semevent-posix.cpp" 40 40 __asm__ (".global epoll_pwait"); 41 41 … … 70 70 # define FUTEX_WAIT 0 71 71 # define FUTEX_WAKE 1 72 # define FUTEX_WAIT_BITSET 9 /**< @since 2.6.25 - uses absolute timeout. */ 72 73 #endif 73 74 … … 100 101 101 102 103 /********************************************************************************************************************************* 104 * Global Variables * 105 *********************************************************************************************************************************/ 106 static int volatile g_fCanUseWaitBitSet = -1; 107 108 102 109 /** 103 110 * Wrapper for the futex syscall. … … 116 123 117 124 125 DECLINLINE(void) rtSemLinuxCheckForFutexWaitBitSetSlow(int volatile *pfCanUseWaitBitSet) 126 { 127 uint32_t uTestVar = UINT32_MAX; 128 long rc = sys_futex(&uTestVar, FUTEX_WAIT_BITSET, UINT32_C(0xf0f0f0f0), NULL, NULL, UINT32_MAX); 129 *pfCanUseWaitBitSet = rc == -EAGAIN; 130 AssertMsg(rc == -ENOSYS || rc == -EAGAIN, ("%d\n", rc)); 131 } 132 133 134 DECLINLINE(void) rtSemLinuxCheckForFutexWaitBitSet(int volatile *pfCanUseWaitBitSet) 135 { 136 if (*pfCanUseWaitBitSet != -1) 137 { /* likely */ } 138 else 139 rtSemLinuxCheckForFutexWaitBitSetSlow(pfCanUseWaitBitSet); 140 } 141 142 118 143 119 144 RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem) … … 127 152 AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER); 128 153 Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL)); 154 155 /* 156 * Make sure we know whether FUTEX_WAIT_BITSET works. 157 */ 158 rtSemLinuxCheckForFutexWaitBitSet(&g_fCanUseWaitBitSet); 159 #if defined(DEBUG_bird) && !defined(IN_GUEST) 160 Assert(g_fCanUseWaitBitSet == true); 161 #endif 129 162 130 163 /* … … 240 273 241 274 242 static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume) 243 { 244 #ifdef RTSEMEVENT_STRICT 245 PCRTLOCKVALSRCPOS pSrcPos = NULL; 246 #endif 247 248 /* 249 * Validate input. 250 */ 251 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 252 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 253 AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); 254 255 /* 256 * Quickly check whether it's signaled. 257 */ 258 /** @todo this isn't fair if someone is already waiting on it. They should 259 * have the first go at it! 260 * (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */ 261 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 275 /** 276 * Performs an indefinite wait on the event. 277 */ 278 static int rtSemEventLinuxWaitIndefinite(struct RTSEMEVENTINTERNAL *pThis, uint32_t fFlags, PCRTLOCKVALSRCPOS pSrcPos) 279 { 280 RT_NOREF_PV(pSrcPos); 281 282 /* 283 * Quickly check whether it's signaled and there are no other waiters. 284 */ 285 uint32_t cWaiters = ASMAtomicIncS32(&pThis->cWaiters); 286 if ( cWaiters == 1 287 && ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 288 { 289 ASMAtomicDecS32(&pThis->cWaiters); 262 290 return VINF_SUCCESS; 263 264 /* 265 * Convert the timeout value. 266 */ 267 struct timespec ts; 268 struct timespec *pTimeout = NULL; 269 uint64_t u64End = 0; /* shut up gcc */ 270 if (cMillies != RT_INDEFINITE_WAIT) 271 { 272 if (!cMillies) 273 return VERR_TIMEOUT; 274 ts.tv_sec = cMillies / 1000; 275 ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); 276 u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); 277 pTimeout = &ts; 278 } 279 280 ASMAtomicIncS32(&pThis->cWaiters); 291 } 281 292 282 293 /* … … 297 308 { 298 309 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, 299 cMillies, RTTHREADSTATE_EVENT, true);310 RT_INDEFINITE_WAIT, RTTHREADSTATE_EVENT, true); 300 311 if (RT_FAILURE(rc)) 301 312 break; … … 303 314 #endif 304 315 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); 305 long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0);316 long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, NULL /*pTimeout*/, NULL, 0); 306 317 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); 307 318 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) … … 324 335 else if (lrc == -EINTR) 325 336 { 326 if ( !fAutoResume)337 if (fFlags & RTSEMWAIT_FLAGS_NORESUME) 327 338 { 328 339 rc = VERR_INTERRUPTED; … … 337 348 break; 338 349 } 350 } 351 352 ASMAtomicDecS32(&pThis->cWaiters); 353 return rc; 354 } 355 356 357 static int rtSemEventLinuxWaitPoll(struct RTSEMEVENTINTERNAL *pThis) 358 { 359 /* 360 * What we do here is isn't quite fair to anyone else waiting on it, however 361 * it might not be as bad as all that for callers making repeated poll calls 362 * because they cannot block, as that would be a virtual wait but without the 363 * chance of a permanept queue position. So, I hope we can live with this. 364 */ 365 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 366 return VINF_SUCCESS; 367 return VERR_TIMEOUT; 368 } 369 370 371 static int rtSemEventLinuxWaitTimed(struct RTSEMEVENTINTERNAL *pThis, uint32_t fFlags, 372 uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) 373 { 374 RT_NOREF_PV(pSrcPos); 375 376 /* 377 * Convert the timeout value. 378 */ 379 int iWaitOp; 380 uint32_t uWaitVal3; 381 timespec TsTimeout; 382 uint64_t uAbsTimeout = uTimeout; /* Note! only relevant for relative waits (FUTEX_WAIT). */ 383 if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) 384 { 385 if (!uTimeout) 386 return rtSemEventLinuxWaitPoll(pThis); 387 388 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) 389 { 390 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t) 391 || uTimeout < (uint64_t)UINT32_MAX * RT_MS_1SEC) 392 { 393 TsTimeout.tv_sec = uTimeout / RT_MS_1SEC; 394 TsTimeout.tv_nsec = (uTimeout % RT_MS_1SEC) & RT_NS_1MS; 395 uAbsTimeout *= RT_NS_1MS; 396 } 397 else 398 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 399 } 400 else 401 { 402 Assert(fFlags & RTSEMWAIT_FLAGS_NANOSECS); 403 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t) 404 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC) 405 { 406 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC; 407 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC; 408 } 409 else 410 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 411 } 412 413 if (fFlags & RTSEMWAIT_FLAGS_RESUME) 414 uAbsTimeout += RTTimeNanoTS(); 415 416 iWaitOp = FUTEX_WAIT; 417 uWaitVal3 = 0; 418 } 419 else 420 { 421 /* Absolute deadline: */ 422 Assert(fFlags & RTSEMWAIT_FLAGS_ABSOLUTE); 423 if (g_fCanUseWaitBitSet == true) 424 { 425 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) 426 { 427 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t) 428 || uTimeout < (uint64_t)UINT32_MAX * RT_MS_1SEC) 429 { 430 TsTimeout.tv_sec = uTimeout / RT_MS_1SEC; 431 TsTimeout.tv_nsec = (uTimeout % RT_MS_1SEC) & RT_NS_1MS; 432 } 433 else 434 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 435 } 436 else 437 { 438 Assert(fFlags & RTSEMWAIT_FLAGS_NANOSECS); 439 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t) 440 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC) 441 { 442 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC; 443 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC; 444 } 445 else 446 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 447 } 448 iWaitOp = FUTEX_WAIT_BITSET; 449 uWaitVal3 = UINT32_MAX; 450 } 451 else 452 { 453 /* Recalculate it as an relative timeout: */ 454 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) 455 { 456 if (uTimeout < UINT64_MAX / RT_NS_1MS) 457 uAbsTimeout = uTimeout *= RT_NS_1MS; 458 else 459 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 460 } 461 462 uint64_t const u64Now = RTTimeNanoTS(); 463 if (u64Now < uTimeout) 464 uTimeout -= u64Now; 465 else 466 return rtSemEventLinuxWaitPoll(pThis); 467 468 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t) 469 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC) 470 { 471 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC; 472 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC; 473 } 474 else 475 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 476 477 iWaitOp = FUTEX_WAIT; 478 uWaitVal3 = 0; 479 } 480 } 481 482 /* 483 * Quickly check whether it's signaled and there are no other waiters. 484 */ 485 uint32_t cWaiters = ASMAtomicIncS32(&pThis->cWaiters); 486 if ( cWaiters == 1 487 && ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 488 { 489 ASMAtomicDecS32(&pThis->cWaiters); 490 return VINF_SUCCESS; 491 } 492 493 /* 494 * The wait loop. 495 */ 496 #ifdef RTSEMEVENT_STRICT 497 RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) 498 ? RTThreadSelfAutoAdopt() 499 : RTThreadSelf(); 500 #else 501 RTTHREAD hThreadSelf = RTThreadSelf(); 502 #endif 503 int rc = VINF_SUCCESS; 504 for (;;) 505 { 506 #ifdef RTSEMEVENT_STRICT 507 if (pThis->fEverHadSignallers) 508 { 509 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, 510 iWaitOp == FUTEX_WAIT ? uTimeout / RT_NS_1MS : RT_MS_1HOUR /*whatever*/, 511 RTTHREADSTATE_EVENT, true); 512 if (RT_FAILURE(rc)) 513 break; 514 } 515 #endif 516 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); 517 long lrc = sys_futex(&pThis->fSignalled, iWaitOp, 0, &TsTimeout, NULL, uWaitVal3); 518 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); 519 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) 520 { 521 rc = VERR_SEM_DESTROYED; 522 break; 523 } 524 525 if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK)) 526 { 527 /* successful wakeup or fSignalled > 0 in the meantime */ 528 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 529 break; 530 } 531 else if (lrc == -ETIMEDOUT) 532 { 533 rc = VERR_TIMEOUT; 534 break; 535 } 536 else if (lrc == -EINTR) 537 { 538 if (fFlags & RTSEMWAIT_FLAGS_NORESUME) 539 { 540 rc = VERR_INTERRUPTED; 541 break; 542 } 543 } 544 else 545 { 546 /* this shouldn't happen! */ 547 AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno)); 548 rc = RTErrConvertFromErrno(lrc); 549 break; 550 } 551 339 552 /* adjust the relative timeout */ 340 if ( pTimeout)341 { 342 int64_t i64Diff = u 64End- RTTimeSystemNanoTS();553 if (iWaitOp == FUTEX_WAIT) 554 { 555 int64_t i64Diff = uAbsTimeout - RTTimeSystemNanoTS(); 343 556 if (i64Diff < 1000) 344 557 { … … 346 559 break; 347 560 } 348 ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000);349 ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);561 TsTimeout.tv_sec = (uint64_t)i64Diff / RT_NS_1SEC; 562 TsTimeout.tv_nsec = (uint64_t)i64Diff % RT_NS_1SEC; 350 563 } 351 564 } … … 356 569 357 570 358 RTDECL(int) RTSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies) 359 { 360 int rc = rtSemEventWait(hEventSem, cMillies, true); 571 /** 572 * Internal wait worker function. 573 */ 574 DECLINLINE(int) rtSemEventLinuxWait(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) 575 { 576 /* 577 * Validate input. 578 */ 579 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 580 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 581 uint32_t fSignalled = pThis->fSignalled; 582 AssertReturn(fSignalled == false || fSignalled == true, VERR_INVALID_HANDLE); 583 AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); 584 585 /* 586 * Timed or indefinite wait? 587 */ 588 if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) 589 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos); 590 return rtSemEventLinuxWaitTimed(hEventSem, fFlags, uTimeout, pSrcPos); 591 } 592 593 594 RTDECL(int) RTSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies) 595 { 596 int rc; 597 #ifndef RTSEMEVENT_STRICT 598 if (cMillies == RT_INDEFINITE_WAIT) 599 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, NULL); 600 else 601 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS, 602 cMillies, NULL); 603 #else 604 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 605 if (cMillies == RT_INDEFINITE_WAIT) 606 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, &SrcPos); 607 else 608 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS, 609 cMillies, &SrcPos); 610 #endif 361 611 Assert(rc != VERR_INTERRUPTED); 362 Assert(rc != VERR_TIMEOUT || cMillies != RT_INDEFINITE_WAIT);363 612 return rc; 364 613 } … … 367 616 RTDECL(int) RTSemEventWaitNoResume(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies) 368 617 { 369 return rtSemEventWait(hEventSem, cMillies, false); 618 int rc; 619 #ifndef RTSEMEVENT_STRICT 620 if (cMillies == RT_INDEFINITE_WAIT) 621 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, NULL); 622 else 623 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS, 624 cMillies, NULL); 625 #else 626 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 627 if (cMillies == RT_INDEFINITE_WAIT) 628 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, &SrcPos); 629 else 630 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS, 631 cMillies, &SrcPos); 632 #endif 633 Assert(rc != VERR_INTERRUPTED); 634 return rc; 635 } 636 637 638 RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout) 639 { 640 #ifndef RTSEMEVENT_STRICT 641 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, NULL); 642 #else 643 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 644 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, &SrcPos); 645 #endif 646 } 647 648 649 RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout, 650 RTHCUINTPTR uId, RT_SRC_POS_DECL) 651 { 652 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API(); 653 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, &SrcPos); 654 } 655 656 657 RTDECL(uint32_t) RTSemEventGetResolution(void) 658 { 659 /** @todo we have 1ns parameter resolution, but need to verify that this is what 660 * the kernel actually will use when setting the timer. Most likely 661 * it's rounded a little, but hopefully not to a multiple of HZ. */ 662 return 1; 370 663 } 371 664
Note:
See TracChangeset
for help on using the changeset viewer.

