Changeset 87792 in vbox
- Timestamp:
- Feb 18, 2021 6:38:24 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 15 edited
-
include/VBox/err.h (modified) (1 diff)
-
include/VBox/vmm/gvm.h (modified) (1 diff)
-
include/VBox/vmm/gvm.mac (modified) (1 diff)
-
include/VBox/vmm/tm.h (modified) (2 diffs)
-
include/VBox/vmm/vm.h (modified) (2 diffs)
-
include/VBox/vmm/vm.mac (modified) (1 diff)
-
include/VBox/vmm/vmm.h (modified) (2 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (1 diff)
-
src/VBox/VMM/VMMAll/TMAll.cpp (modified) (41 diffs)
-
src/VBox/VMM/VMMAll/TMAllVirtual.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMR0/GVMMR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/TM.cpp (modified) (43 diffs)
-
src/VBox/VMM/include/TMInline.h (modified) (4 diffs)
-
src/VBox/VMM/include/TMInternal.h (modified) (16 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r87482 r87792 1031 1031 /** Invalid value for cVirtualTicking. */ 1032 1032 #define VERR_TM_VIRTUAL_TICKING_IPE (-2211) 1033 /** Max timer limit reached. */ 1034 #define VERR_TM_TOO_MANY_TIMERS (-2212) 1035 /** Invalid timer queue number. */ 1036 #define VERR_TM_INVALID_TIMER_QUEUE (-2213) 1037 /** The timer queue is not longer allowed to grow. */ 1038 #define VERR_TM_TIMER_QUEUE_CANNOT_GROW (-2214) 1039 /** TM internal processing error \#1. */ 1040 #define VERR_TM_IPE_1 (-2291) 1041 /** TM internal processing error \#2. */ 1042 #define VERR_TM_IPE_2 (-2292) 1043 /** TM internal processing error \#3. */ 1044 #define VERR_TM_IPE_3 (-2293) 1045 /** TM internal processing error \#4. */ 1046 #define VERR_TM_IPE_4 (-2294) 1047 /** TM internal processing error \#5. */ 1048 #define VERR_TM_IPE_5 (-2295) 1049 /** TM internal processing error \#6. */ 1050 #define VERR_TM_IPE_6 (-2296) 1051 /** TM internal processing error \#7. */ 1052 #define VERR_TM_IPE_7 (-2297) 1053 /** TM internal processing error \#8. */ 1054 #define VERR_TM_IPE_8 (-2298) 1055 /** TM internal processing error \#9. */ 1056 #define VERR_TM_IPE_9 (-2299) 1033 1057 /** @} */ 1034 1058 -
trunk/include/VBox/vmm/gvm.h
r87511 r87792 255 255 } dbgfr0; 256 256 257 union 258 { 259 #if defined(VMM_INCLUDED_SRC_include_TMInternal_h) && defined(IN_RING0) 260 TMR0PERVM s; 261 #endif 262 uint8_t padding[128]; 263 } tmr0; 264 257 265 /** Padding so aCpus starts on a page boundrary. */ 258 266 #ifdef VBOX_WITH_NEM_R0 259 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 256 - 64 - 2176 - 640 - 512 - 64 - 1024 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];267 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 256 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 260 268 #else 261 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 64 - 2176 - 640 - 512 - 64 - 1024 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];269 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 262 270 #endif 263 271 -
trunk/include/VBox/vmm/gvm.mac
r87511 r87792 82 82 alignb 64 83 83 .dbgfr0 resb 1024 84 alignb 64 85 .tmr0 resb 128 84 86 85 87 times ((($ + VMM_MAX_CPU_COUNT * RTR0PTR_CB + 4095) & ~4095) - ($ + VMM_MAX_CPU_COUNT * RTR0PTR_CB)) resb 1 -
trunk/include/VBox/vmm/tm.h
r87773 r87792 262 262 263 263 264 #ifdef IN_RING3265 264 /** @defgroup grp_tm_r3 The TM Host Context Ring-3 API 266 265 * @{ … … 296 295 VMMR3_INT_DECL(bool) TMR3CpuTickIsFixedRateMonotonic(PVM pVM, bool fWithParavirtEnabled); 297 296 /** @} */ 298 #endif /* IN_RING3 */ 297 298 299 /** @defgroup grp_tm_r0 The TM Host Context Ring-0 API 300 * @{ 301 */ 302 VMMR0_INT_DECL(void) TMR0InitPerVMData(PGVM pGVM); 303 VMMR0_INT_DECL(void) TMR0CleanupVM(PGVM pGVM); 304 VMMR0_INT_DECL(int) TMR0TimerQueueGrow(PGVM pGVM, uint32_t idxQueue, uint32_t cMinTimers); 305 /** @} */ 299 306 300 307 -
trunk/include/VBox/vmm/vm.h
r87777 r87792 1364 1364 struct TM s; 1365 1365 #endif 1366 uint8_t padding[ 7872]; /* multiple of 64 */1366 uint8_t padding[9152]; /* multiple of 64 */ 1367 1367 } tm; 1368 1368 … … 1463 1463 1464 1464 /** Padding for aligning the structure size on a page boundrary. */ 1465 uint8_t abAlignment2[ 3672 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];1465 uint8_t abAlignment2[2392 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT]; 1466 1466 1467 1467 /* ---- end small stuff ---- */ -
trunk/include/VBox/vmm/vm.mac
r87777 r87792 131 131 .em resb 256 132 132 .nem resb 128 133 .tm resb 7872133 .tm resb 9152 134 134 .dbgf resb 2432 135 135 .ssm resb 128 -
trunk/include/VBox/vmm/vmm.h
r87107 r87792 424 424 VMMR0_DO_IOM_SYNC_STATS_INDICES, 425 425 426 /** Official call we use for testing Ring-0 APIs. */427 VMMR0_DO_TESTS = 704,428 429 426 /** Call DBGFR0TraceCreateReqHandler. */ 430 VMMR0_DO_DBGF_TRACER_CREATE = 7 10,427 VMMR0_DO_DBGF_TRACER_CREATE = 704, 431 428 /** Call DBGFR0TraceCallReqHandler. */ 432 429 VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER, … … 439 436 /** Call DBGFR0BpOwnerInitReqHandler(). */ 440 437 VMMR0_DO_DBGF_BP_OWNER_INIT, 438 439 /** Grow a timer queue. */ 440 VMMR0_DO_TM_GROW_TIMER_QUEUE = 768, 441 442 /** Official call we use for testing Ring-0 APIs. */ 443 VMMR0_DO_TESTS = 2048, 441 444 442 445 /** The usual 32-bit type blow up. */ -
trunk/src/VBox/VMM/Makefile.kmk
r87728 r87792 504 504 VMMR0/PGMR0Pool.cpp \ 505 505 VMMR0/PGMR0SharedPage.cpp \ 506 VMMR0/TMR0.cpp \ 506 507 VMMR0/VMMR0.cpp \ 507 508 VMMRZ/CPUMRZ.cpp \ -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r87774 r87792 360 360 DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMER pTimer) 361 361 { 362 TMCLOCK const enmClock = pTimer->enmClock; 363 AssertReturnVoid((unsigned)enmClock < TMCLOCK_MAX); 364 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 365 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue); 366 362 367 if ( VM_IS_EMT(pVM) 363 368 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM))) … … 365 370 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 366 371 Log3(("tmSchedule: tmTimerQueueSchedule\n")); 367 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);372 tmTimerQueueSchedule(pVM, pQueueCC, pQueue); 368 373 #ifdef VBOX_STRICT 369 374 tmTimerQueuesSanityChecks(pVM, "tmSchedule"); … … 404 409 * Links the timer onto the scheduling queue. 405 410 * 406 * @param pQueue The timer queue the timer belongs to. 407 * @param pTimer The timer. 411 * @param pQueueCC The current context queue (same as @a pQueue for 412 * ring-3). 413 * @param pQueue The shared queue data. 414 * @param pTimer The timer. 408 415 * 409 416 * @todo FIXME: Look into potential race with the thread running the queues 410 417 * and stuff. 411 418 */ 412 DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer) 413 { 414 Assert(!pTimer->offScheduleNext); 415 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue; 416 int32_t offHead; 419 DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer) 420 { 421 Assert(pTimer->idxScheduleNext == UINT32_MAX); 422 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0]; 423 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc); 424 425 uint32_t idxHead; 417 426 do 418 427 { 419 offHead = pQueue->offSchedule; 420 if (offHead) 421 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer; 422 else 423 pTimer->offScheduleNext = 0; 424 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead)); 428 idxHead = pQueue->idxSchedule; 429 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc); 430 pTimer->idxScheduleNext = idxHead; 431 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead)); 425 432 } 426 433 … … 438 445 DECLINLINE(bool) tmTimerTryWithLink(PVMCC pVM, PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld) 439 446 { 447 TMCLOCK const enmClock = pTimer->enmClock; 448 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, false); 440 449 if (tmTimerTry(pTimer, enmStateNew, enmStateOld)) 441 450 { 442 tmTimerLinkSchedule(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer); 451 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 452 tmTimerLinkSchedule(TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer); 443 453 return true; 444 454 } … … 451 461 * 452 462 * @param pVM The cross context VM structure. 453 * @param pQueue The queue. 463 * @param pQueueCC The current context queue (same as @a pQueue for 464 * ring-3). 465 * @param pQueue The shared queue data. 454 466 * @param pTimer The timer. 455 467 * @param u64Expire The timer expiration time. … … 457 469 * @remarks Called while owning the relevant queue lock. 458 470 */ 459 DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire) 460 { 461 Assert(!pTimer->offNext); 462 Assert(!pTimer->offPrev); 471 DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, 472 PTMTIMER pTimer, uint64_t u64Expire) 473 { 474 Assert(pTimer->idxNext == UINT32_MAX); 475 Assert(pTimer->idxPrev == UINT32_MAX); 463 476 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */ 464 477 RT_NOREF(pVM); 465 478 466 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);479 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue); 467 480 if (pCur) 468 481 { 469 for (;; pCur = TMTIMER_GET_NEXT(pCur))482 for (;; pCur = tmTimerGetNext(pQueueCC, pCur)) 470 483 { 471 484 if (pCur->u64Expire > u64Expire) 472 485 { 473 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);474 TMTIMER_SET_NEXT(pTimer, pCur);475 TMTIMER_SET_PREV(pTimer, pPrev);486 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur); 487 tmTimerSetNext(pQueueCC, pTimer, pCur); 488 tmTimerSetPrev(pQueueCC, pTimer, pPrev); 476 489 if (pPrev) 477 TMTIMER_SET_NEXT(pPrev, pTimer);490 tmTimerSetNext(pQueueCC, pPrev, pTimer); 478 491 else 479 492 { 480 TMTIMER_SET_HEAD(pQueue, pTimer);493 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer); 481 494 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire); 482 495 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName); 483 496 } 484 TMTIMER_SET_PREV(pCur, pTimer);497 tmTimerSetPrev(pQueueCC, pCur, pTimer); 485 498 return; 486 499 } 487 if ( !pCur->offNext)500 if (pCur->idxNext == UINT32_MAX) 488 501 { 489 TMTIMER_SET_NEXT(pCur, pTimer);490 TMTIMER_SET_PREV(pTimer, pCur);502 tmTimerSetNext(pQueueCC, pCur, pTimer); 503 tmTimerSetPrev(pQueueCC, pTimer, pCur); 491 504 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName); 492 505 return; … … 496 509 else 497 510 { 498 TMTIMER_SET_HEAD(pQueue, pTimer);511 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer); 499 512 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire); 500 513 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName); … … 508 521 * 509 522 * @param pVM The cross context VM structure. 510 * @param pQueue The timer queue. 523 * @param pQueueCC The current context queue (same as @a pQueue for 524 * ring-3). 525 * @param pQueue The shared queue data. 511 526 * @param pTimer The timer that needs scheduling. 512 527 * 513 528 * @remarks Called while owning the lock. 514 529 */ 515 DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)530 DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer) 516 531 { 517 532 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); … … 533 548 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE))) 534 549 break; /* retry */ 535 tmTimerQueueUnlinkActive(pVM, pQueue , pTimer);550 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer); 536 551 RT_FALL_THRU(); 537 552 … … 540 555 */ 541 556 case TMTIMERSTATE_PENDING_SCHEDULE: 542 Assert( !pTimer->offNext); Assert(!pTimer->offPrev);557 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX); 543 558 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE))) 544 559 break; /* retry */ 545 tmTimerQueueLinkActive(pVM, pQueue , pTimer, pTimer->u64Expire);560 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire); 546 561 return; 547 562 … … 552 567 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP))) 553 568 break; /* retry */ 554 tmTimerQueueUnlinkActive(pVM, pQueue , pTimer);569 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer); 555 570 RT_FALL_THRU(); 556 571 … … 559 574 */ 560 575 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: 561 Assert( !pTimer->offNext); Assert(!pTimer->offPrev);576 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX); 562 577 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE))) 563 578 break; … … 576 591 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 577 592 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 578 tmTimerLinkSchedule(pQueue , pTimer);593 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer); 579 594 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed)); 580 595 return; … … 601 616 * 602 617 * @param pVM The cross context VM structure. 603 * @param pQueue The queue to schedule. 618 * @param pQueueCC The current context queue (same as @a pQueue for 619 * ring-3) data of the queue to schedule. 620 * @param pQueue The shared queue data of the queue to schedule. 604 621 * 605 622 * @remarks Called while owning the lock. 606 623 */ 607 void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUE pQueue)624 void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue) 608 625 { 609 626 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM); … … 613 630 * Dequeue the scheduling list and iterate it. 614 631 */ 615 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0); 616 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire)); 617 if (!offNext) 618 return; 619 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext); 620 while (pNext) 621 { 632 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX); 633 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire)); 634 while (idxNext != UINT32_MAX) 635 { 636 AssertBreak(idxNext < pQueueCC->cTimersAlloc); 637 622 638 /* 623 * Unlink the head timer and findthe next one.639 * Unlink the head timer and take down the index of the next one. 624 640 */ 625 PTMTIMER pTimer = pNext;626 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;627 pTimer-> offScheduleNext = 0;641 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext]; 642 idxNext = pTimer->idxScheduleNext; 643 pTimer->idxScheduleNext = UINT32_MAX; 628 644 629 645 /* … … 632 648 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n", 633 649 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->szName)); 634 tmTimerQueueScheduleOne(pVM, pQueue , pTimer);650 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer); 635 651 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState))); 636 } /* foreach timer in current schedule batch. */652 } 637 653 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire)); 638 654 } … … 648 664 * @remarks Called while owning the lock. 649 665 */ 650 void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)666 void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere) 651 667 { 652 668 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM); 653 669 654 /* 655 * Check the linking of the active lists. 656 */ 657 bool fHaveVirtualSyncLock = false; 658 for (int i = 0; i < TMCLOCK_MAX; i++) 659 { 660 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i]; 661 Assert((int)pQueue->enmClock == i); 662 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC) 670 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 671 { 672 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 673 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue); 674 Assert(pQueue->enmClock == (TMCLOCK)idxQueue); 675 676 int rc = PDMCritSectRwTryEnterShared(&pQueue->AllocLock); 677 if (RT_SUCCESS(rc)) 663 678 { 664 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS) 665 continue; 666 fHaveVirtualSyncLock = true; 667 } 668 PTMTIMER pPrev = NULL; 669 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur)) 670 { 671 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i)); 672 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev)); 673 TMTIMERSTATE enmState = pCur->enmState; 674 switch (enmState) 679 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC 680 || PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS) 675 681 { 676 case TMTIMERSTATE_ACTIVE: 677 AssertMsg( !pCur->offScheduleNext 678 || pCur->enmState != TMTIMERSTATE_ACTIVE, 679 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext)); 680 break; 681 case TMTIMERSTATE_PENDING_STOP: 682 case TMTIMERSTATE_PENDING_RESCHEDULE: 683 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 684 break; 685 default: 686 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState))); 687 break; 688 } 689 } 690 } 691 692 693 # ifdef IN_RING3 694 /* 695 * Do the big list and check that active timers all are in the active lists. 696 */ 697 PTMTIMERR3 pPrev = NULL; 698 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext) 699 { 700 Assert(pCur->pBigPrev == pPrev); 701 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX); 702 703 TMTIMERSTATE enmState = pCur->enmState; 704 switch (enmState) 705 { 706 case TMTIMERSTATE_ACTIVE: 707 case TMTIMERSTATE_PENDING_STOP: 708 case TMTIMERSTATE_PENDING_RESCHEDULE: 709 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 710 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC) 682 /* Check the linking of the active lists. */ 683 PTMTIMER pPrev = NULL; 684 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue); 685 pCur; 686 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur)) 711 687 { 712 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]); 713 Assert(pCur->offPrev || pCur == pCurAct); 714 while (pCurAct && pCurAct != pCur) 715 pCurAct = TMTIMER_GET_NEXT(pCurAct); 716 Assert(pCurAct == pCur); 717 } 718 break; 719 720 case TMTIMERSTATE_PENDING_SCHEDULE: 721 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: 722 case TMTIMERSTATE_STOPPED: 723 case TMTIMERSTATE_EXPIRED_DELIVER: 724 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC) 725 { 726 Assert(!pCur->offNext); 727 Assert(!pCur->offPrev); 728 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]); 729 pCurAct; 730 pCurAct = TMTIMER_GET_NEXT(pCurAct)) 688 AssertMsg(pCur->enmClock == (TMCLOCK)idxQueue, ("%s: %d != %u\n", pszWhere, pCur->enmClock, idxQueue)); 689 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev)); 690 TMTIMERSTATE enmState = pCur->enmState; 691 switch (enmState) 731 692 { 732 Assert(pCurAct != pCur); 733 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur); 734 Assert(TMTIMER_GET_PREV(pCurAct) != pCur); 693 case TMTIMERSTATE_ACTIVE: 694 AssertMsg( pCur->idxScheduleNext == UINT32_MAX 695 || pCur->enmState != TMTIMERSTATE_ACTIVE, 696 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext)); 697 break; 698 case TMTIMERSTATE_PENDING_STOP: 699 case TMTIMERSTATE_PENDING_RESCHEDULE: 700 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 701 break; 702 default: 703 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState))); 704 break; 735 705 } 736 706 } 737 break; 738 739 /* ignore */ 740 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 741 break; 742 743 /* shouldn't get here! */ 744 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 745 case TMTIMERSTATE_DESTROY: 746 default: 747 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState))); 748 break; 707 708 # ifdef IN_RING3 709 /* Go thru all the timers and check that the active ones all are in the active lists. */ 710 uint32_t idxTimer = pQueue->cTimersAlloc; 711 uint32_t cFree = 0; 712 while (idxTimer-- > 0) 713 { 714 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer]; 715 TMTIMERSTATE const enmState = pTimer->enmState; 716 switch (enmState) 717 { 718 case TMTIMERSTATE_FREE: 719 cFree++; 720 break; 721 722 case TMTIMERSTATE_ACTIVE: 723 case TMTIMERSTATE_PENDING_STOP: 724 case TMTIMERSTATE_PENDING_RESCHEDULE: 725 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 726 { 727 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue); 728 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct); 729 while (pCurAct && pCurAct != pTimer) 730 pCurAct = tmTimerGetNext(pQueueCC, pCurAct); 731 Assert(pCurAct == pTimer); 732 break; 733 } 734 735 case TMTIMERSTATE_PENDING_SCHEDULE: 736 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: 737 case TMTIMERSTATE_STOPPED: 738 case TMTIMERSTATE_EXPIRED_DELIVER: 739 { 740 Assert(pTimer->idxNext == UINT32_MAX); 741 Assert(pTimer->idxPrev == UINT32_MAX); 742 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue); 743 pCurAct; 744 pCurAct = tmTimerGetNext(pQueueCC, pCurAct)) 745 { 746 Assert(pCurAct != pTimer); 747 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer); 748 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer); 749 } 750 break; 751 } 752 753 /* ignore */ 754 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 755 break; 756 757 case TMTIMERSTATE_INVALID: 758 Assert(idxTimer == 0); 759 break; 760 761 /* shouldn't get here! */ 762 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 763 case TMTIMERSTATE_DESTROY: 764 default: 765 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState))); 766 break; 767 } 768 769 /* Check the handle value. */ 770 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY) 771 { 772 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer); 773 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue); 774 } 775 } 776 Assert(cFree == pQueue->cTimersFree); 777 # endif /* IN_RING3 */ 778 779 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC) 780 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 781 } 782 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 749 783 } 750 784 } 751 # endif /* IN_RING3 */752 753 if (fHaveVirtualSyncLock)754 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);755 785 } 756 786 #endif /* !VBOX_STRICT */ … … 883 913 * Check for TMCLOCK_VIRTUAL expiration. 884 914 */ 885 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);915 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire); 886 916 const int64_t i64Delta1 = u64Expire1 - u64Now; 887 917 if (i64Delta1 <= 0) … … 906 936 */ 907 937 uint64_t u64VirtualSyncNow; 908 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);938 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 909 939 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) 910 940 { … … 915 945 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 916 946 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 917 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))947 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire))) 918 948 { 919 949 u64VirtualSyncNow = u64Now - u64VirtualSyncNow; … … 960 990 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp); 961 991 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 962 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);992 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 963 993 if (fCatchUp) 964 994 { … … 971 1001 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) 972 1002 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 973 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)1003 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire) 974 1004 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 975 1005 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) … … 996 1026 } 997 1027 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 998 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)1028 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire) 999 1029 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 1000 1030 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) … … 1171 1201 static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire) 1172 1202 { 1173 Assert( !pTimer->offPrev);1174 Assert( !pTimer->offNext);1203 Assert(pTimer->idxPrev == UINT32_MAX); 1204 Assert(pTimer->idxNext == UINT32_MAX); 1175 1205 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE); 1176 1206 1177 1207 TMCLOCK const enmClock = pTimer->enmClock; 1208 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, VERR_TM_IPE_2); 1178 1209 1179 1210 /* … … 1193 1224 * Link the timer into the active list. 1194 1225 */ 1195 tmTimerQueueLinkActive(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire); 1226 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 1227 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire); 1196 1228 1197 1229 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt); … … 1220 1252 AssertRCReturn(rc, rc); 1221 1253 1222 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC]; 1223 TMTIMERSTATE enmState = pTimer->enmState; 1254 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC]; 1255 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue); 1256 TMTIMERSTATE const enmState = pTimer->enmState; 1224 1257 switch (enmState) 1225 1258 { … … 1235 1268 pTimer->u64Expire = u64Expire; 1236 1269 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE); 1237 tmTimerQueueLinkActive(pVM, pQueue , pTimer, u64Expire);1270 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire); 1238 1271 rc = VINF_SUCCESS; 1239 1272 break; … … 1241 1274 case TMTIMERSTATE_ACTIVE: 1242 1275 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive); 1243 tmTimerQueueUnlinkActive(pVM, pQueue , pTimer);1276 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer); 1244 1277 pTimer->u64Expire = u64Expire; 1245 tmTimerQueueLinkActive(pVM, pQueue , pTimer, u64Expire);1278 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire); 1246 1279 rc = VINF_SUCCESS; 1247 1280 break; … … 1356 1389 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) 1357 1390 { 1358 Assert( !pTimer->offPrev);1359 Assert( !pTimer->offNext);1391 Assert(pTimer->idxPrev == UINT32_MAX); 1392 Assert(pTimer->idxNext == UINT32_MAX); 1360 1393 pTimer->u64Expire = u64Expire; 1361 1394 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); … … 1478 1511 static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now) 1479 1512 { 1480 Assert( !pTimer->offPrev);1481 Assert( !pTimer->offNext);1513 Assert(pTimer->idxPrev == UINT32_MAX); 1514 Assert(pTimer->idxNext == UINT32_MAX); 1482 1515 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE); 1483 1516 … … 1486 1519 */ 1487 1520 TMCLOCK const enmClock = pTimer->enmClock; 1521 AssertReturn((unsigned)enmClock < (unsigned)TMCLOCK_MAX, VERR_TM_IPE_2); 1488 1522 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); 1489 1523 pTimer->u64Expire = u64Expire; … … 1494 1528 */ 1495 1529 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName); 1496 tmTimerQueueLinkActive(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire); 1530 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 1531 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire); 1497 1532 1498 1533 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt); … … 1530 1565 1531 1566 /* Update the timer. */ 1532 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC]; 1533 TMTIMERSTATE enmState = pTimer->enmState; 1567 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC]; 1568 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue); 1569 TMTIMERSTATE const enmState = pTimer->enmState; 1534 1570 switch (enmState) 1535 1571 { … … 1542 1578 pTimer->u64Expire = u64Expire; 1543 1579 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE); 1544 tmTimerQueueLinkActive(pVM, pQueue , pTimer, u64Expire);1580 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire); 1545 1581 rc = VINF_SUCCESS; 1546 1582 break; … … 1548 1584 case TMTIMERSTATE_ACTIVE: 1549 1585 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive); 1550 tmTimerQueueUnlinkActive(pVM, pQueue , pTimer);1586 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer); 1551 1587 pTimer->u64Expire = u64Expire; 1552 tmTimerQueueLinkActive(pVM, pQueue , pTimer, u64Expire);1588 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire); 1553 1589 rc = VINF_SUCCESS; 1554 1590 break; … … 1679 1715 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) 1680 1716 { 1681 Assert( !pTimer->offPrev);1682 Assert( !pTimer->offNext);1717 Assert(pTimer->idxPrev == UINT32_MAX); 1718 Assert(pTimer->idxNext == UINT32_MAX); 1683 1719 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); 1684 1720 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n", … … 1879 1915 1880 1916 /* Update the timer state. */ 1881 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC]; 1882 TMTIMERSTATE enmState = pTimer->enmState; 1917 TMTIMERSTATE const enmState = pTimer->enmState; 1883 1918 switch (enmState) 1884 1919 { 1885 1920 case TMTIMERSTATE_ACTIVE: 1886 tmTimerQueueUnlinkActive(pVM, pQueue, pTimer); 1921 { 1922 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC]; 1923 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer); 1887 1924 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED); 1888 1925 rc = VINF_SUCCESS; 1889 1926 break; 1927 } 1890 1928 1891 1929 case TMTIMERSTATE_EXPIRED_DELIVER: … … 2554 2592 AssertCompile(TMTIMERSTATE_##state == (num)); \ 2555 2593 return #num "-" #state 2594 CASE( 0,INVALID); 2556 2595 CASE( 1,STOPPED); 2557 2596 CASE( 2,ACTIVE); … … 2580 2619 * @param pVM The cross context VM structure. 2581 2620 */ 2582 static uint32_t tmGetFrequencyHint(PVM pVM)2621 static uint32_t tmGetFrequencyHint(PVMCC pVM) 2583 2622 { 2584 2623 /* … … 2599 2638 */ 2600 2639 uMaxHzHint = 0; 2601 for ( int i = 0; i < TMCLOCK_MAX; i++)2640 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2602 2641 { 2603 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i]; 2604 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur)) 2642 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2643 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue); 2644 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue); pCur; pCur = tmTimerGetNext(pQueueCC, pCur)) 2605 2645 { 2606 2646 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint); … … 2624 2664 case TMTIMERSTATE_DESTROY: 2625 2665 case TMTIMERSTATE_FREE: 2666 case TMTIMERSTATE_INVALID: 2626 2667 break; 2627 2668 /* no default, want gcc warnings when adding more states. */ -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r87633 r87792 279 279 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 280 280 && !pVM->tm.s.fRunningQueues 281 && ( pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64281 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64 282 282 || ( pVM->tm.s.fVirtualSyncTicking 283 && pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync283 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 284 284 ) 285 285 ) … … 416 416 } 417 417 418 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);418 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 419 419 if (pnsAbsDeadline) 420 420 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going … … 520 520 } 521 521 522 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);522 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 523 523 if (pnsAbsDeadline) 524 524 *pnsAbsDeadline = u64Expire; … … 597 597 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu); 598 598 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 599 && pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)599 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64) 600 600 { 601 601 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__)); … … 635 635 { 636 636 off = u64 - off; 637 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);637 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 638 638 if (off < u64Expire) 639 639 { … … 767 767 u64 -= off; 768 768 /** @todo u64VirtualSyncLast */ 769 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);769 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 770 770 if (pnsAbsDeadline) 771 771 *pnsAbsDeadline = u64Expire; … … 928 928 { 929 929 /** @todo Try use ASMAtomicUoReadU64 instead. */ 930 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);930 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire); 931 931 return u64Expire == uDeadlineVersion; 932 932 } -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r86683 r87792 910 910 PDMR0InitPerVMData(pGVM); 911 911 IOMR0InitPerVMData(pGVM); 912 TMR0InitPerVMData(pGVM); 912 913 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2)) 913 914 { … … 1306 1307 DBGFR0CleanupVM(pGVM); 1307 1308 PGMR0CleanupVM(pGVM); 1309 TMR0CleanupVM(pGVM); 1308 1310 1309 1311 AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */ -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r87666 r87792 2310 2310 2311 2311 /* 2312 * TM requests. 2313 */ 2314 case VMMR0_DO_TM_GROW_TIMER_QUEUE: 2315 { 2316 if (pReqHdr || idCpu == NIL_VMCPUID) 2317 return VERR_INVALID_PARAMETER; 2318 rc = TMR0TimerQueueGrow(pGVM, RT_HI_U32(u64Arg), RT_LO_U32(u64Arg)); 2319 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2320 break; 2321 } 2322 2323 /* 2312 2324 * For profiling. 2313 2325 */ … … 2390 2402 if ( pVM != NULL 2391 2403 && pGVM != NULL 2392 && pVM == pGVM /** @todo drop p GVM */2404 && pVM == pGVM /** @todo drop pVM or pGVM */ 2393 2405 && idCpu < pGVM->cCpus 2394 2406 && pGVM->pSession == pSession -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r87774 r87792 152 152 #include <iprt/file.h> 153 153 #include <iprt/getopt.h> 154 #include <iprt/rand.h> 154 155 #include <iprt/semaphore.h> 155 156 #include <iprt/string.h> … … 175 176 static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM); 176 177 static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass); 178 #ifdef VBOX_WITH_STATISTICS 179 static void tmR3TimerQueueRegisterStats(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cTimers); 180 #endif 177 181 static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t iTick); 178 182 static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue); … … 187 191 static DECLCALLBACK(void) tmR3InfoCpuLoad(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs); 188 192 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpu, void *pvData); 189 static const char *tmR3GetTSCModeName(PVM pVM);190 static const char *tmR3GetTSCModeNameEx(TMTSCMODE enmMode);193 static const char *tmR3GetTSCModeName(PVM pVM); 194 static const char *tmR3GetTSCModeNameEx(TMTSCMODE enmMode); 191 195 192 196 … … 212 216 * Init the structure. 213 217 */ 214 void *pv;215 int rc = MMHyperAlloc(pVM, sizeof(pVM->tm.s.paTimerQueuesR3[0]) * TMCLOCK_MAX, 0, MM_TAG_TM, &pv);216 AssertRCReturn(rc, rc);217 pVM->tm.s.paTimerQueuesR3 = (PTMTIMERQUEUE)pv;218 pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pv);219 pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pv);220 221 pVM->tm.s.offVM = RT_UOFFSETOF(VM, tm.s);222 218 pVM->tm.s.idTimerCpu = pVM->cCpus - 1; /* The last CPU. */ 223 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock = TMCLOCK_VIRTUAL; 224 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire = INT64_MAX; 225 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].enmClock = TMCLOCK_VIRTUAL_SYNC; 226 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].u64Expire = INT64_MAX; 227 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].enmClock = TMCLOCK_REAL; 228 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].u64Expire = INT64_MAX; 229 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].enmClock = TMCLOCK_TSC; 230 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].u64Expire = INT64_MAX; 219 220 strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].szName, "virtual"); 221 strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].szName, "virtual_sync"); /* Underscore is for STAM ordering issue. */ 222 strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_REAL].szName, "real"); 223 strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_TSC].szName, "tsc"); 224 225 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->tm.s.aTimerQueues); i++) 226 { 227 Assert(pVM->tm.s.aTimerQueues[i].szName[0] != '\0'); 228 pVM->tm.s.aTimerQueues[i].enmClock = (TMCLOCK)i; 229 pVM->tm.s.aTimerQueues[i].u64Expire = INT64_MAX; 230 pVM->tm.s.aTimerQueues[i].idxActive = UINT32_MAX; 231 pVM->tm.s.aTimerQueues[i].idxSchedule = UINT32_MAX; 232 pVM->tm.s.aTimerQueues[i].idxFreeHint = 1; 233 int rc = PDMR3CritSectRwInit(pVM, &pVM->tm.s.aTimerQueues[i].AllocLock, RT_SRC_POS, 234 "TM queue %s", pVM->tm.s.aTimerQueues[i].szName); 235 AssertLogRelRCReturn(rc, rc); 236 } 231 237 232 238 /* … … 243 249 244 250 RTHCPHYS HCPhysGIP; 245 rc = SUPR3GipGetPhys(&HCPhysGIP);251 int rc = SUPR3GipGetPhys(&HCPhysGIP); 246 252 AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc); 247 253 … … 1111 1117 pVM->tm.s.fTSCModeSwitchAllowed &= tmR3HasFixedTSC(pVM) && GIMIsEnabled(pVM) && !VM_IS_RAW_MODE_ENABLED(pVM); 1112 1118 LogRel(("TM: TMR3InitFinalize: fTSCModeSwitchAllowed=%RTbool\n", pVM->tm.s.fTSCModeSwitchAllowed)); 1119 1120 /* 1121 * Grow the virtual & real timer tables so we've got sufficient 1122 * space for dynamically created timers. We cannot allocate more 1123 * after ring-0 init completes. 1124 */ 1125 static struct { uint32_t idxQueue, cExtra; } s_aExtra[] = { {TMCLOCK_VIRTUAL, 128}, {TMCLOCK_REAL, 32} }; 1126 for (uint32_t i = 0; i < RT_ELEMENTS(s_aExtra); i++) 1127 { 1128 uint32_t const cExtra = s_aExtra[i].cExtra; 1129 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[s_aExtra[i].idxQueue]; 1130 if (s_aExtra[i].cExtra > pQueue->cTimersFree) 1131 { 1132 uint32_t cTimersAlloc = pQueue->cTimersAlloc + s_aExtra[i].cExtra - pQueue->cTimersFree; 1133 rc = VMMR3CallR0Emt(pVM, VMMGetCpu(pVM), VMMR0_DO_TM_GROW_TIMER_QUEUE, 1134 RT_MAKE_U64(cTimersAlloc, s_aExtra[i].idxQueue), NULL); 1135 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc cTimersAlloc=%u %s\n", rc, cTimersAlloc, pQueue->szName), rc); 1136 } 1137 } 1138 1139 #ifdef VBOX_WITH_STATISTICS 1140 /* 1141 * Register timer statistics now that we've fixed the timer table sizes. 1142 */ 1143 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 1144 { 1145 pVM->tm.s.aTimerQueues[idxQueue].fCannotGrow = true; 1146 tmR3TimerQueueRegisterStats(pVM, &pVM->tm.s.aTimerQueues[idxQueue], UINT32_MAX); 1147 } 1148 #endif 1149 1113 1150 return rc; 1114 1151 } … … 1127 1164 LogFlow(("TMR3Relocate\n")); 1128 1165 1129 pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pVM->tm.s.paTimerQueuesR3);1130 1131 1166 if (VM_IS_RAW_MODE_ENABLED(pVM)) 1132 1167 { 1133 1168 pVM->tm.s.pvGIPRC = MMHyperR3ToRC(pVM, pVM->tm.s.pvGIPR3); 1134 pVM->tm.s.paTimerQueuesRC = MMHyperR3ToRC(pVM, pVM->tm.s.paTimerQueuesR3);1135 1169 pVM->tm.s.VirtualGetRawDataRC.pu64Prev += offDelta; 1136 1170 pVM->tm.s.VirtualGetRawDataRC.pfnBad += offDelta; … … 1153 1187 VMM_INT_DECL(int) TMR3Term(PVM pVM) 1154 1188 { 1155 AssertMsg(pVM->tm.s.offVM, ("bad init order!\n"));1156 1189 if (pVM->tm.s.pTimer) 1157 1190 { … … 1206 1239 */ 1207 1240 for (int i = 0; i < TMCLOCK_MAX; i++) 1208 tmTimerQueueSchedule(pVM, &pVM->tm.s. paTimerQueuesR3[i]);1241 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[i], &pVM->tm.s.aTimerQueues[i]); 1209 1242 #ifdef VBOX_STRICT 1210 1243 tmTimerQueuesSanityChecks(pVM, "TMR3Reset"); … … 1493 1526 1494 1527 1528 #ifdef VBOX_WITH_STATISTICS 1529 1530 /** 1531 * Register statistics for a timer. 1532 * 1533 * @param pVM The cross context VM structure. 1534 * @param pTimer The timer to register statistics for. 1535 */ 1536 static void tmR3TimerRegisterStats(PVM pVM, PTMTIMER pTimer) 1537 { 1538 STAMR3RegisterF(pVM, &pTimer->StatTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 1539 tmR3TimerClockName(pTimer), "/TM/Timers/%s", pTimer->szName); 1540 STAMR3RegisterF(pVM, &pTimer->StatCritSectEnter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 1541 "", "/TM/Timers/%s/CritSectEnter", pTimer->szName); 1542 STAMR3RegisterF(pVM, &pTimer->StatGet, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1543 "", "/TM/Timers/%s/Get", pTimer->szName); 1544 STAMR3RegisterF(pVM, &pTimer->StatSetAbsolute, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1545 "", "/TM/Timers/%s/SetAbsolute", pTimer->szName); 1546 STAMR3RegisterF(pVM, &pTimer->StatSetRelative, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1547 "", "/TM/Timers/%s/SetRelative", pTimer->szName); 1548 STAMR3RegisterF(pVM, &pTimer->StatStop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1549 "", "/TM/Timers/%s/Stop", pTimer->szName); 1550 } 1551 1552 1553 /** 1554 * Deregister the statistics for a timer. 1555 */ 1556 static void tmR3TimerDeregisterStats(PVM pVM, PTMTIMER pTimer) 1557 { 1558 char szPrefix[128]; 1559 size_t cchPrefix = RTStrPrintf(szPrefix, sizeof(szPrefix), "/TM/Timers/%s/", pTimer->szName); 1560 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix); 1561 szPrefix[cchPrefix - 1] = '\0'; 1562 STAMR3Deregister(pVM->pUVM, szPrefix); 1563 } 1564 1565 1566 /** 1567 * Register statistics for all allocated timers in a queue. 1568 * 1569 * @param pVM The cross context VM structure. 1570 * @param pTimer The timer to register statistics for. 1571 * @param cTimers Number of timers to consider (in growth scenario). 1572 */ 1573 static void tmR3TimerQueueRegisterStats(PVM pVM, PTMTIMERQUEUE pQueue, uint32_t cTimers) 1574 { 1575 uint32_t idxTimer = RT_MIN(cTimers, pQueue->cTimersAlloc); 1576 while (idxTimer-- > 0) 1577 { 1578 PTMTIMER pTimer = &pQueue->paTimers[idxTimer]; 1579 TMTIMERSTATE enmState = pTimer->enmState; 1580 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY) 1581 tmR3TimerRegisterStats(pVM, pTimer); 1582 } 1583 } 1584 1585 #endif /* VBOX_WITH_STATISTICS */ 1586 1587 1495 1588 /** 1496 1589 * Internal TMR3TimerCreate worker. … … 1505 1598 static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, uint32_t fFlags, const char *pszName, PPTMTIMERR3 ppTimer) 1506 1599 { 1507 PTMTIMER R3pTimer;1600 PTMTIMER pTimer; 1508 1601 1509 1602 /* … … 1511 1604 */ 1512 1605 VM_ASSERT_EMT(pVM); 1606 1513 1607 AssertReturn((fFlags & (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0)) != (TMTIMER_FLAGS_RING0 | TMTIMER_FLAGS_NO_RING0), 1514 1608 VERR_INVALID_FLAGS); 1609 1515 1610 AssertPtrReturn(pszName, VERR_INVALID_POINTER); 1516 1611 size_t const cchName = strlen(pszName); … … 1518 1613 AssertMsgReturn(cchName > 2, ("Too short timer name: %s\n", pszName), VERR_INVALID_NAME); 1519 1614 1615 AssertMsgReturn(enmClock >= TMCLOCK_REAL && enmClock < TMCLOCK_MAX, 1616 ("%d\n", enmClock), VERR_INVALID_PARAMETER); 1617 AssertReturn(enmClock != TMCLOCK_TSC, VERR_NOT_SUPPORTED); 1618 if (enmClock == TMCLOCK_VIRTUAL_SYNC) 1619 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_WRONG_ORDER); 1620 1621 /* 1622 * Exclusively lock the queue. 1623 * 1624 * Note! This means that it is not possible to allocate timers from a timer callback. 1625 */ 1626 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 1627 int rc = PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1628 AssertRCReturn(rc, rc); 1629 1520 1630 /* 1521 1631 * Allocate the timer. 1522 1632 */ 1523 if (pVM->tm.s.pFree && VM_IS_EMT(pVM)) 1524 { 1525 pTimer = pVM->tm.s.pFree; 1526 pVM->tm.s.pFree = pTimer->pBigNext; 1527 Log3(("TM: Recycling timer %p, new free head %p.\n", pTimer, pTimer->pBigNext)); 1528 } 1529 else 1530 pTimer = NULL; 1531 1532 if (!pTimer) 1533 { 1534 int rc = MMHyperAlloc(pVM, sizeof(*pTimer), 0, MM_TAG_TM, (void **)&pTimer); 1535 if (RT_FAILURE(rc)) 1536 return rc; 1537 Log3(("TM: Allocated new timer %p\n", pTimer)); 1538 } 1633 if (!pQueue->cTimersFree) 1634 { 1635 AssertReturn(!pQueue->fCannotGrow, VERR_TM_TIMER_QUEUE_CANNOT_GROW); 1636 uint32_t cTimersAlloc = pQueue->cTimersAlloc + 64; 1637 Assert(cTimersAlloc < _32K); 1638 rc = VMMR3CallR0Emt(pVM, VMMGetCpu(pVM), VMMR0_DO_TM_GROW_TIMER_QUEUE, 1639 RT_MAKE_U64(cTimersAlloc, (uint64_t)(pQueue - &pVM->tm.s.aTimerQueues[0])), NULL); 1640 AssertLogRelRCReturnStmt(rc, PDMCritSectRwLeaveExcl(&pQueue->AllocLock), rc); 1641 AssertReturnStmt(pQueue->cTimersAlloc >= cTimersAlloc, PDMCritSectRwLeaveExcl(&pQueue->AllocLock), VERR_TM_IPE_3); 1642 } 1643 1644 /* Scan the array for free timers. */ 1645 pTimer = NULL; 1646 PTMTIMER const paTimers = pQueue->paTimers; 1647 uint32_t const cTimersAlloc = pQueue->cTimersAlloc; 1648 uint32_t idxTimer = pQueue->idxFreeHint; 1649 for (uint32_t iScan = 0; iScan < 2; iScan++) 1650 { 1651 while (idxTimer < cTimersAlloc) 1652 { 1653 if (paTimers[idxTimer].enmState == TMTIMERSTATE_FREE) 1654 { 1655 pTimer = &paTimers[idxTimer]; 1656 pQueue->idxFreeHint = idxTimer + 1; 1657 break; 1658 } 1659 idxTimer++; 1660 } 1661 if (pTimer != NULL) 1662 break; 1663 idxTimer = 1; 1664 } 1665 AssertLogRelMsgReturnStmt(pTimer != NULL, ("cTimersFree=%u cTimersAlloc=%u enmClock=%s\n", pQueue->cTimersFree, 1666 pQueue->cTimersAlloc, pQueue->szName), 1667 PDMCritSectRwLeaveExcl(&pQueue->AllocLock), VERR_INTERNAL_ERROR_3); 1668 pQueue->cTimersFree -= 1; 1539 1669 1540 1670 /* 1541 1671 * Initialize it. 1542 1672 */ 1673 Assert(idxTimer != 0); 1674 Assert(idxTimer <= TMTIMERHANDLE_TIMER_IDX_MASK); 1675 pTimer->hSelf = idxTimer 1676 | ((uintptr_t)(pQueue - &pVM->tm.s.aTimerQueues[0]) << TMTIMERHANDLE_QUEUE_IDX_SHIFT); 1677 Assert(!(pTimer->hSelf & TMTIMERHANDLE_RANDOM_MASK)); 1678 pTimer->hSelf |= (RTRandU64() & TMTIMERHANDLE_RANDOM_MASK); 1679 1543 1680 pTimer->u64Expire = 0; 1544 1681 pTimer->enmClock = enmClock; 1545 pTimer->hSelf = (TMTIMERHANDLE)pTimer;1546 1682 pTimer->enmState = TMTIMERSTATE_STOPPED; 1547 pTimer-> offScheduleNext = 0;1548 pTimer-> offNext = 0;1549 pTimer-> offPrev = 0;1683 pTimer->idxScheduleNext = UINT32_MAX; 1684 pTimer->idxNext = UINT32_MAX; 1685 pTimer->idxPrev = UINT32_MAX; 1550 1686 pTimer->fFlags = fFlags; 1551 1687 pTimer->uHzHint = 0; … … 1555 1691 pTimer->szName[cchName] = '\0'; 1556 1692 1557 /* insert into the list of created timers. */ 1693 #ifdef VBOX_STRICT 1558 1694 TM_LOCK_TIMERS(pVM); 1559 pTimer->pBigPrev = NULL;1560 pTimer->pBigNext = pVM->tm.s.pCreated;1561 pVM->tm.s.pCreated = pTimer;1562 if (pTimer->pBigNext)1563 pTimer->pBigNext->pBigPrev = pTimer;1564 #ifdef VBOX_STRICT1565 1695 tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate"); 1696 TM_UNLOCK_TIMERS(pVM); 1566 1697 #endif 1567 TM_UNLOCK_TIMERS(pVM); 1698 1699 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1568 1700 1569 1701 #ifdef VBOX_WITH_STATISTICS 1570 1702 /* 1571 * Register statistics. 1572 */ 1573 STAMR3RegisterF(pVM, &pTimer->StatTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 1574 tmR3TimerClockName(pTimer), "/TM/Timers/%s", pTimer->szName); 1575 STAMR3RegisterF(pVM, &pTimer->StatCritSectEnter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 1576 "", "/TM/Timers/%s/CritSectEnter", pTimer->szName); 1577 STAMR3RegisterF(pVM, &pTimer->StatGet, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1578 "", "/TM/Timers/%s/Get", pTimer->szName); 1579 STAMR3RegisterF(pVM, &pTimer->StatSetAbsolute, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1580 "", "/TM/Timers/%s/SetAbsolute", pTimer->szName); 1581 STAMR3RegisterF(pVM, &pTimer->StatSetRelative, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1582 "", "/TM/Timers/%s/SetRelative", pTimer->szName); 1583 STAMR3RegisterF(pVM, &pTimer->StatStop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, 1584 "", "/TM/Timers/%s/Stop", pTimer->szName); 1703 * Only register statistics if we're passed the no-realloc point. 1704 */ 1705 if (pQueue->fCannotGrow) 1706 tmR3TimerRegisterStats(pVM, pTimer); 1585 1707 #endif 1586 1708 … … 1762 1884 static int tmR3TimerDestroy(PVMCC pVM, PTMTIMER pTimer) 1763 1885 { 1764 Assert ((unsigned)pTimer->enmClock < (unsigned)TMCLOCK_MAX);1765 1766 PTMTIMERQUEUE pQueue = &pVM->tm.s. CTX_SUFF(paTimerQueues)[pTimer->enmClock];1886 AssertReturn((unsigned)pTimer->enmClock < (unsigned)TMCLOCK_MAX, VERR_INTERNAL_ERROR_3); 1887 1888 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[pTimer->enmClock]; 1767 1889 bool fActive = false; 1768 1890 bool fPending = false; … … 1776 1898 * like create does. All the work is done here. 1777 1899 */ 1900 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1778 1901 TM_LOCK_TIMERS(pVM); 1779 1902 for (int cRetries = 1000;; cRetries--) … … 1814 1937 AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName)); 1815 1938 TM_UNLOCK_TIMERS(pVM); 1939 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1940 1941 AssertMsgReturn(cRetries > 0, ("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName), 1942 VERR_TM_UNSTABLE_STATE); 1816 1943 if (!RTThreadYield()) 1817 1944 RTThreadSleep(1); 1818 AssertMsgReturn(cRetries > 0, ("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName), 1819 VERR_TM_UNSTABLE_STATE);1945 1946 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1820 1947 TM_LOCK_TIMERS(pVM); 1821 1948 continue; … … 1827 1954 case TMTIMERSTATE_DESTROY: 1828 1955 TM_UNLOCK_TIMERS(pVM); 1956 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1829 1957 AssertLogRelMsgFailedReturn(("pTimer=%p %s\n", pTimer, tmTimerState(enmState)), VERR_TM_INVALID_STATE); 1830 1958 … … 1832 1960 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName)); 1833 1961 TM_UNLOCK_TIMERS(pVM); 1962 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1834 1963 return VERR_TM_UNKNOWN_STATE; 1835 1964 } … … 1845 1974 AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName)); 1846 1975 TM_UNLOCK_TIMERS(pVM); 1976 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1977 1847 1978 AssertMsgReturn(cRetries > 0, ("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName), 1848 1979 VERR_TM_UNSTABLE_STATE); 1980 1981 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1849 1982 TM_LOCK_TIMERS(pVM); 1850 1983 } … … 1855 1988 if (fActive) 1856 1989 { 1857 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);1858 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);1990 const PTMTIMER pPrev = tmTimerGetPrev(pQueue, pTimer); 1991 const PTMTIMER pNext = tmTimerGetNext(pQueue, pTimer); 1859 1992 if (pPrev) 1860 TMTIMER_SET_NEXT(pPrev, pNext);1993 tmTimerSetPrev(pQueue, pPrev, pNext); 1861 1994 else 1862 1995 { 1863 TMTIMER_SET_HEAD(pQueue, pNext);1996 tmTimerQueueSetHead(pQueue, pQueue, pNext); 1864 1997 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; 1865 1998 } 1866 1999 if (pNext) 1867 TMTIMER_SET_PREV(pNext, pPrev);1868 pTimer-> offNext = 0;1869 pTimer-> offPrev = 0;2000 tmTimerSetPrev(pQueue, pNext, pPrev); 2001 pTimer->idxNext = UINT32_MAX; 2002 pTimer->idxPrev = UINT32_MAX; 1870 2003 } 1871 2004 … … 1877 2010 Log3(("TMR3TimerDestroy: tmTimerQueueSchedule\n")); 1878 2011 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 1879 Assert(pQueue-> offSchedule);1880 tmTimerQueueSchedule(pVM, pQueue );2012 Assert(pQueue->idxSchedule < pQueue->cTimersAlloc); 2013 tmTimerQueueSchedule(pVM, pQueue, pQueue); 1881 2014 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 1882 2015 } 1883 2016 2017 #ifdef VBOX_WITH_STATISTICS 1884 2018 /* 1885 2019 * Deregister statistics. 1886 2020 */ 1887 #ifdef VBOX_WITH_STATISTICS 1888 char szPrefix[128]; 1889 RTStrPrintf(szPrefix, sizeof(szPrefix), "/TM/Timers/%s", pTimer->szName); 1890 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix); 2021 tmR3TimerDeregisterStats(pVM, pTimer); 1891 2022 #endif 1892 2023 1893 2024 /* 1894 * Ready to move the timer from the created list and onto the free list. 1895 */ 1896 Assert(!pTimer->offNext); Assert(!pTimer->offPrev); Assert(!pTimer->offScheduleNext); 1897 1898 /* unlink from created list */ 1899 if (pTimer->pBigPrev) 1900 pTimer->pBigPrev->pBigNext = pTimer->pBigNext; 1901 else 1902 pVM->tm.s.pCreated = pTimer->pBigNext; 1903 if (pTimer->pBigNext) 1904 pTimer->pBigNext->pBigPrev = pTimer->pBigPrev; 1905 pTimer->pBigNext = 0; 1906 pTimer->pBigPrev = 0; 1907 1908 /* free */ 1909 Log2(("TM: Inserting %p into the free list ahead of %p!\n", pTimer, pVM->tm.s.pFree)); 2025 * Change it to free state and update the queue accordingly. 2026 */ 2027 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX); Assert(pTimer->idxScheduleNext == UINT32_MAX); 2028 1910 2029 TM_SET_STATE(pTimer, TMTIMERSTATE_FREE); 1911 pTimer->pBigNext = pVM->tm.s.pFree; 1912 pVM->tm.s.pFree = pTimer; 2030 2031 pQueue->cTimersFree += 1; 2032 uint32_t idxTimer = (uint32_t)(pTimer - pQueue->paTimers); 2033 if (idxTimer < pQueue->idxFreeHint) 2034 pQueue->idxFreeHint = idxTimer; 1913 2035 1914 2036 #ifdef VBOX_STRICT … … 1916 2038 #endif 1917 2039 TM_UNLOCK_TIMERS(pVM); 2040 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1918 2041 return VINF_SUCCESS; 1919 2042 } … … 1951 2074 return VERR_INVALID_PARAMETER; 1952 2075 1953 TM_LOCK_TIMERS(pVM); 1954 PTMTIMER pCur = pVM->tm.s.pCreated; 1955 while (pCur) 1956 { 1957 PTMTIMER pDestroy = pCur; 1958 pCur = pDestroy->pBigNext; 1959 if ( pDestroy->enmType == TMTIMERTYPE_DEV 1960 && pDestroy->u.Dev.pDevIns == pDevIns) 2076 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2077 { 2078 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2079 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2080 uint32_t idxTimer = pQueue->cTimersAlloc; 2081 while (idxTimer-- > 0) 1961 2082 { 1962 int rc = tmR3TimerDestroy(pVM, pDestroy); 1963 AssertRC(rc); 2083 PTMTIMER pTimer = &pQueue->paTimers[idxTimer]; 2084 if ( pTimer->enmType == TMTIMERTYPE_DEV 2085 && pTimer->u.Dev.pDevIns == pDevIns 2086 && pTimer->enmState < TMTIMERSTATE_DESTROY) 2087 { 2088 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2089 2090 int rc = tmR3TimerDestroy(pVM, pTimer); 2091 AssertRC(rc); 2092 2093 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2094 } 1964 2095 } 1965 }1966 TM_UNLOCK_TIMERS(pVM);2096 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2097 } 1967 2098 1968 2099 LogFlow(("TMR3TimerDestroyDevice: returns VINF_SUCCESS\n")); … … 1984 2115 return VERR_INVALID_PARAMETER; 1985 2116 1986 TM_LOCK_TIMERS(pVM); 1987 PTMTIMER pCur = pVM->tm.s.pCreated; 1988 while (pCur) 1989 { 1990 PTMTIMER pDestroy = pCur; 1991 pCur = pDestroy->pBigNext; 1992 if ( pDestroy->enmType == TMTIMERTYPE_USB 1993 && pDestroy->u.Usb.pUsbIns == pUsbIns) 2117 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2118 { 2119 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2120 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2121 uint32_t idxTimer = pQueue->cTimersAlloc; 2122 while (idxTimer-- > 0) 1994 2123 { 1995 int rc = tmR3TimerDestroy(pVM, pDestroy); 1996 AssertRC(rc); 2124 PTMTIMER pTimer = &pQueue->paTimers[idxTimer]; 2125 if ( pTimer->enmType == TMTIMERTYPE_USB 2126 && pTimer->u.Usb.pUsbIns == pUsbIns 2127 && pTimer->enmState < TMTIMERSTATE_DESTROY) 2128 { 2129 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2130 2131 int rc = tmR3TimerDestroy(pVM, pTimer); 2132 AssertRC(rc); 2133 2134 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2135 } 1997 2136 } 1998 }1999 TM_UNLOCK_TIMERS(pVM);2137 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2138 } 2000 2139 2001 2140 LogFlow(("TMR3TimerDestroyUsb: returns VINF_SUCCESS\n")); … … 2017 2156 return VERR_INVALID_PARAMETER; 2018 2157 2019 TM_LOCK_TIMERS(pVM); 2020 PTMTIMER pCur = pVM->tm.s.pCreated; 2021 while (pCur) 2022 { 2023 PTMTIMER pDestroy = pCur; 2024 pCur = pDestroy->pBigNext; 2025 if ( pDestroy->enmType == TMTIMERTYPE_DRV 2026 && pDestroy->u.Drv.pDrvIns == pDrvIns) 2158 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2159 { 2160 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2161 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2162 uint32_t idxTimer = pQueue->cTimersAlloc; 2163 while (idxTimer-- > 0) 2027 2164 { 2028 int rc = tmR3TimerDestroy(pVM, pDestroy); 2029 AssertRC(rc); 2165 PTMTIMER pTimer = &pQueue->paTimers[idxTimer]; 2166 if ( pTimer->enmType == TMTIMERTYPE_DRV 2167 && pTimer->u.Drv.pDrvIns == pDrvIns 2168 && pTimer->enmState < TMTIMERSTATE_DESTROY) 2169 { 2170 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2171 2172 int rc = tmR3TimerDestroy(pVM, pTimer); 2173 AssertRC(rc); 2174 2175 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 2176 } 2030 2177 } 2031 }2032 TM_UNLOCK_TIMERS(pVM);2178 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 2179 } 2033 2180 2034 2181 LogFlow(("TMR3TimerDestroyDriver: returns VINF_SUCCESS\n")); … … 2069 2216 DECLINLINE(bool) tmR3HasExpiredTimer(PVM pVM, TMCLOCK enmClock) 2070 2217 { 2071 const uint64_t u64Expire = pVM->tm.s. CTX_SUFF(paTimerQueues)[enmClock].u64Expire;2218 const uint64_t u64Expire = pVM->tm.s.aTimerQueues[enmClock].u64Expire; 2072 2219 return u64Expire != INT64_MAX && u64Expire <= tmClock(pVM, enmClock); 2073 2220 } … … 2087 2234 */ 2088 2235 uint64_t u64Now = TMVirtualGetNoCheck(pVM); 2089 if (pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now)2236 if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64Now) 2090 2237 return true; 2091 2238 u64Now = pVM->tm.s.fVirtualSyncTicking 2092 2239 ? u64Now - pVM->tm.s.offVirtualSync 2093 2240 : pVM->tm.s.u64VirtualSync; 2094 if (pVM->tm.s. CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now)2241 if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now) 2095 2242 return true; 2096 2243 … … 2132 2279 #endif 2133 2280 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 2134 && ( pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule/** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */2135 || pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule2136 || pVM->tm.s. paTimerQueuesR3[TMCLOCK_REAL].offSchedule2137 || pVM->tm.s. paTimerQueuesR3[TMCLOCK_TSC].offSchedule2281 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule != UINT32_MAX /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */ 2282 || pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].idxSchedule != UINT32_MAX 2283 || pVM->tm.s.aTimerQueues[TMCLOCK_REAL].idxSchedule != UINT32_MAX 2284 || pVM->tm.s.aTimerQueues[TMCLOCK_TSC].idxSchedule != UINT32_MAX 2138 2285 || tmR3AnyExpiredTimers(pVM) 2139 2286 ) … … 2189 2336 VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /* Clear the FF once we started working for real. */ 2190 2337 2191 Assert( !pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule);2338 Assert(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule == UINT32_MAX); 2192 2339 tmR3TimerQueueRunVirtualSync(pVM); 2193 2340 if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */ … … 2200 2347 /* TMCLOCK_VIRTUAL */ 2201 2348 STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2); 2202 if (pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule)2203 tmTimerQueueSchedule(pVM, &pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL]);2204 tmR3TimerQueueRun(pVM, &pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL]);2349 if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].idxSchedule != UINT32_MAX) 2350 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL], &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]); 2351 tmR3TimerQueueRun(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]); 2205 2352 STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2); 2206 2353 2207 2354 /* TMCLOCK_TSC */ 2208 Assert( !pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offActive); /* not used */2355 Assert(pVM->tm.s.aTimerQueues[TMCLOCK_TSC].idxActive == UINT32_MAX); /* not used */ 2209 2356 2210 2357 /* TMCLOCK_REAL */ 2211 2358 STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3); 2212 if (pVM->tm.s. paTimerQueuesR3[TMCLOCK_REAL].offSchedule)2213 tmTimerQueueSchedule(pVM, &pVM->tm.s. paTimerQueuesR3[TMCLOCK_REAL]);2214 tmR3TimerQueueRun(pVM, &pVM->tm.s. paTimerQueuesR3[TMCLOCK_REAL]);2359 if (pVM->tm.s.aTimerQueues[TMCLOCK_REAL].idxSchedule != UINT32_MAX) 2360 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL], &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]); 2361 tmR3TimerQueueRun(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]); 2215 2362 STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3); 2216 2363 2217 2364 #ifdef VBOX_STRICT 2218 2365 /* check that we didn't screw up. */ 2366 TM_LOCK_TIMERS(pVM); 2219 2367 tmTimerQueuesSanityChecks(pVM, "TMR3TimerQueuesDo"); 2368 TM_UNLOCK_TIMERS(pVM); 2220 2369 #endif 2221 2370 … … 2257 2406 * arm the timer again. 2258 2407 */ 2259 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);2408 PTMTIMER pNext = tmTimerQueueGetHead(pQueue, pQueue); 2260 2409 if (!pNext) 2261 2410 return; … … 2264 2413 { 2265 2414 PTMTIMER pTimer = pNext; 2266 pNext = TMTIMER_GET_NEXT(pTimer);2415 pNext = tmTimerGetNext(pQueue, pTimer); 2267 2416 PPDMCRITSECT pCritSect = pTimer->pCritSect; 2268 2417 if (pCritSect) … … 2278 2427 if (fRc) 2279 2428 { 2280 Assert( !pTimer->offScheduleNext); /* this can trigger falsely */2429 Assert(pTimer->idxScheduleNext == UINT32_MAX); /* this can trigger falsely */ 2281 2430 2282 2431 /* unlink */ 2283 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);2432 const PTMTIMER pPrev = tmTimerGetPrev(pQueue, pTimer); 2284 2433 if (pPrev) 2285 TMTIMER_SET_NEXT(pPrev, pNext);2434 tmTimerSetNext(pQueue, pPrev, pNext); 2286 2435 else 2287 2436 { 2288 TMTIMER_SET_HEAD(pQueue, pNext);2437 tmTimerQueueSetHead(pQueue, pQueue, pNext); 2289 2438 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; 2290 2439 } 2291 2440 if (pNext) 2292 TMTIMER_SET_PREV(pNext, pPrev);2293 pTimer-> offNext = 0;2294 pTimer-> offPrev = 0;2441 tmTimerSetPrev(pQueue, pNext, pPrev); 2442 pTimer->idxNext = UINT32_MAX; 2443 pTimer->idxPrev = UINT32_MAX; 2295 2444 2296 2445 /* fire */ … … 2334 2483 static void tmR3TimerQueueRunVirtualSync(PVM pVM) 2335 2484 { 2336 PTMTIMERQUEUE const pQueue = &pVM->tm.s. paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC];2485 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC]; 2337 2486 VM_ASSERT_EMT(pVM); 2338 2487 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock)); … … 2341 2490 * Any timers? 2342 2491 */ 2343 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);2492 PTMTIMER pNext = tmTimerQueueGetHead(pQueue, pQueue); 2344 2493 if (RT_UNLIKELY(!pNext)) 2345 2494 { … … 2454 2603 /* Advance */ 2455 2604 PTMTIMER pTimer = pNext; 2456 pNext = TMTIMER_GET_NEXT(pTimer);2605 pNext = tmTimerGetNext(pQueue, pTimer); 2457 2606 2458 2607 /* Take the associated lock. */ … … 2478 2627 2479 2628 /* Unlink it, change the state and do the callout. */ 2480 tmTimerQueueUnlinkActive(pVM, pQueue, p Timer);2629 tmTimerQueueUnlinkActive(pVM, pQueue, pQueue, pTimer); 2481 2630 TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER); 2482 2631 STAM_PROFILE_START(&pTimer->StatTimer, PrfTimer); … … 2684 2833 Log2(("TMR3VirtualSyncFF: running queue\n")); 2685 2834 2686 Assert( !pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule);2835 Assert(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule == UINT32_MAX); 2687 2836 tmR3TimerQueueRunVirtualSync(pVM); 2688 2837 if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */ … … 2744 2893 case TMTIMERSTATE_DESTROY: 2745 2894 case TMTIMERSTATE_FREE: 2895 case TMTIMERSTATE_INVALID: 2746 2896 AssertMsgFailed(("Invalid timer state %d %s (%s)\n", pTimer->enmState, tmTimerState(pTimer->enmState), pTimer->szName)); 2747 2897 return SSMR3HandleSetStatus(pSSM, VERR_TM_INVALID_STATE); … … 3692 3842 "HzHint", 3693 3843 "State"); 3694 TM_LOCK_TIMERS(pVM); 3695 for (PTMTIMERR3 pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext) 3696 { 3697 pHlp->pfnPrintf(pHlp, 3698 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %6RU32 %-25s %s\n", 3699 pTimer, 3700 pTimer->offNext, 3701 pTimer->offPrev, 3702 pTimer->offScheduleNext, 3703 tmR3Get5CharClockName(pTimer->enmClock), 3704 TMTimerGet(pVM, pTimer->hSelf), 3705 pTimer->u64Expire, 3706 pTimer->uHzHint, 3707 tmTimerState(pTimer->enmState), 3708 pTimer->szName); 3709 } 3710 TM_UNLOCK_TIMERS(pVM); 3844 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 3845 { 3846 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 3847 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 3848 for (uint32_t idxTimer = 0; idxTimer < pQueue->cTimersAlloc; idxTimer++) 3849 { 3850 PTMTIMER pTimer = &pQueue->paTimers[idxTimer]; 3851 TMTIMERSTATE enmState = pTimer->enmState; 3852 if (enmState < TMTIMERSTATE_DESTROY && enmState > TMTIMERSTATE_INVALID) 3853 pHlp->pfnPrintf(pHlp, 3854 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %6RU32 %-25s %s\n", 3855 pTimer, 3856 pTimer->idxNext, 3857 pTimer->idxPrev, 3858 pTimer->idxScheduleNext, 3859 tmR3Get5CharClockName(pTimer->enmClock), 3860 TMTimerGet(pVM, pTimer->hSelf), 3861 pTimer->u64Expire, 3862 pTimer->uHzHint, 3863 tmTimerState(enmState), 3864 pTimer->szName); 3865 } 3866 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 3867 } 3711 3868 } 3712 3869 … … 3734 3891 "HzHint", 3735 3892 "State"); 3736 for (unsigned iQueue = 0; iQueue < TMCLOCK_MAX; iQueue++) 3737 { 3893 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 3894 { 3895 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 3896 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 3738 3897 TM_LOCK_TIMERS(pVM); 3739 for (PTMTIMERR3 pTimer = TMTIMER_GET_HEAD(&pVM->tm.s.paTimerQueuesR3[iQueue]);3898 for (PTMTIMERR3 pTimer = tmTimerQueueGetHead(pQueue, pQueue); 3740 3899 pTimer; 3741 pTimer = TMTIMER_GET_NEXT(pTimer))3900 pTimer = tmTimerGetNext(pQueue, pTimer)) 3742 3901 { 3743 3902 pHlp->pfnPrintf(pHlp, 3744 3903 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %6RU32 %-25s %s\n", 3745 3904 pTimer, 3746 pTimer-> offNext,3747 pTimer-> offPrev,3748 pTimer-> offScheduleNext,3905 pTimer->idxNext, 3906 pTimer->idxPrev, 3907 pTimer->idxScheduleNext, 3749 3908 tmR3Get5CharClockName(pTimer->enmClock), 3750 3909 TMTimerGet(pVM, pTimer->hSelf), … … 3755 3914 } 3756 3915 TM_UNLOCK_TIMERS(pVM); 3916 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 3757 3917 } 3758 3918 } -
trunk/src/VBox/VMM/include/TMInline.h
r87774 r87792 23 23 24 24 25 DECLINLINE(PTMTIMER) tmTimerQueueGetHead(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueueShared) 26 { 27 #ifdef IN_RING3 28 RT_NOREF(pQueueShared); 29 uint32_t const idx = pQueueCC->idxActive; 30 #else 31 uint32_t const idx = pQueueShared->idxActive; 32 #endif 33 if (idx < pQueueCC->cTimersAlloc) 34 return &pQueueCC->paTimers[idx]; 35 return NULL; 36 } 37 38 39 DECLINLINE(void) tmTimerQueueSetHead(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueueShared, PTMTIMER pHead) 40 { 41 uint32_t idx; 42 if (pHead) 43 { 44 idx = (uint32_t)(pHead - &pQueueCC->paTimers[0]); 45 AssertMsgStmt(idx < pQueueCC->cTimersAlloc, 46 ("idx=%u (%s) cTimersAlloc=%u\n", idx, pHead->szName, pQueueCC->cTimersAlloc), 47 idx = UINT32_MAX); 48 } 49 else 50 idx = UINT32_MAX; 51 #ifndef IN_RING3 52 pQueueShared->idxActive = idx; 53 #else 54 pQueueCC->idxActive = idx; 55 RT_NOREF(pQueueShared); 56 #endif 57 } 58 59 60 /** 61 * Get the previous timer - translates TMTIMER::idxPrev. 62 */ 63 DECLINLINE(PTMTIMER) tmTimerGetPrev(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer) 64 { 65 uint32_t const idxPrev = pTimer->idxPrev; 66 Assert(idxPrev); 67 if (idxPrev < pQueueCC->cTimersAlloc) 68 return &pQueueCC->paTimers[idxPrev]; 69 Assert(idxPrev == UINT32_MAX); 70 return NULL; 71 } 72 73 74 /** 75 * Get the next timer - translates TMTIMER::idxNext. 76 */ 77 DECLINLINE(PTMTIMER) tmTimerGetNext(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer) 78 { 79 uint32_t const idxNext = pTimer->idxNext; 80 Assert(idxNext); 81 if (idxNext < pQueueCC->cTimersAlloc) 82 return &pQueueCC->paTimers[idxNext]; 83 Assert(idxNext == UINT32_MAX); 84 return NULL; 85 } 86 87 88 /** 89 * Set the previous timer link (TMTIMER::idxPrev). 90 */ 91 DECLINLINE(void) tmTimerSetPrev(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer, PTMTIMER pPrev) 92 { 93 uint32_t idxPrev; 94 if (pPrev) 95 { 96 idxPrev = (uint32_t)(pPrev - &pQueueCC->paTimers[0]); 97 Assert(idxPrev); 98 AssertMsgStmt(idxPrev < pQueueCC->cTimersAlloc, 99 ("idxPrev=%u (%s) cTimersAlloc=%u\n", idxPrev, pPrev->szName, pQueueCC->cTimersAlloc), 100 idxPrev = UINT32_MAX); 101 } 102 else 103 idxPrev = UINT32_MAX; 104 pTimer->idxPrev = idxPrev; 105 } 106 107 108 /** 109 * Set the next timer link (TMTIMER::idxNext). 110 */ 111 DECLINLINE(void) tmTimerSetNext(PTMTIMERQUEUECC pQueueCC, PTMTIMER pTimer, PTMTIMER pNext) 112 { 113 uint32_t idxNext; 114 if (pNext) 115 { 116 idxNext = (uint32_t)(pNext - &pQueueCC->paTimers[0]); 117 Assert(idxNext); 118 AssertMsgStmt(idxNext < pQueueCC->cTimersAlloc, 119 ("idxNext=%u (%s) cTimersAlloc=%u\n", idxNext, pNext->szName, pQueueCC->cTimersAlloc), 120 idxNext = UINT32_MAX); 121 } 122 else 123 idxNext = UINT32_MAX; 124 pTimer->idxNext = idxNext; 125 } 126 127 25 128 /** 26 129 * Used to unlink a timer from the active list. 27 130 * 28 131 * @param pVM The cross context VM structure. 29 * @param pQueue The timer queue. 132 * @param pQueueCC The context specific queue data (same as @a pQueue for 133 * ring-3). 134 * @param pQueue The shared timer queue data. 30 135 * @param pTimer The timer that needs linking. 31 136 * 32 137 * @remarks Called while owning the relevant queue lock. 33 138 */ 34 DECL_FORCE_INLINE(void) tmTimerQueueUnlinkActive(PVMCC pVM, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)139 DECL_FORCE_INLINE(void) tmTimerQueueUnlinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer) 35 140 { 36 141 #ifdef VBOX_STRICT … … 42 147 RT_NOREF(pVM); 43 148 44 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);45 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);149 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pTimer); 150 const PTMTIMER pNext = tmTimerGetNext(pQueueCC, pTimer); 46 151 if (pPrev) 47 TMTIMER_SET_NEXT(pPrev, pNext);48 else 49 { 50 TMTIMER_SET_HEAD(pQueue, pNext);152 tmTimerSetNext(pQueueCC, pPrev, pNext); 153 else 154 { 155 tmTimerQueueSetHead(pQueueCC, pQueue, pNext); 51 156 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; 52 157 DBGFTRACE_U64_TAG(pVM, pQueue->u64Expire, "tmTimerQueueUnlinkActive"); 53 158 } 54 159 if (pNext) 55 TMTIMER_SET_PREV(pNext, pPrev);56 pTimer-> offNext = 0;57 pTimer-> offPrev = 0;160 tmTimerSetPrev(pQueueCC, pNext, pPrev); 161 pTimer->idxNext = UINT32_MAX; 162 pTimer->idxPrev = UINT32_MAX; 58 163 } 59 164 … … 70 175 #ifdef IN_RING3 71 176 # define TMTIMER_HANDLE_TO_PTR_RETURN_EX(a_pVM, a_hTimer, a_rcRet, a_pTimerVar) do { \ 72 RT_NOREF(a_pVM); \ 73 (a_pTimerVar) = (PTMTIMER)hTimer; \ 74 AssertPtrReturn((a_pTimerVar), a_rcRet); \ 177 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 178 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 179 AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \ 180 \ 181 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 182 AssertReturn(idxQueue < (a_pVM)->tm.s.aTimerQueues[idxQueue].cTimersAlloc, a_rcRet); \ 183 \ 184 (a_pTimerVar) = &(a_pVM)->tm.s.aTimerQueues[idxQueue].paTimers[idxTimer]; \ 75 185 AssertReturn((a_pTimerVar)->hSelf == a_hTimer, a_rcRet); \ 76 186 } while (0) 77 187 #else 78 188 # define TMTIMER_HANDLE_TO_PTR_RETURN_EX(a_pVM, a_hTimer, a_rcRet, a_pTimerVar) do { \ 79 (a_pTimerVar) = (PTMTIMER)MMHyperR3ToCC(pVM, (RTR3PTR)hTimer); \ 80 AssertPtrReturn((a_pTimerVar), a_rcRet); \ 189 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 190 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 191 AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \ 192 AssertCompile(RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues) == RT_ELEMENTS((a_pVM)->tmr0.s.aTimerQueues)); \ 193 \ 194 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 195 AssertReturn(idxQueue < (a_pVM)->tmr0.s.aTimerQueues[idxQueue].cTimersAlloc, a_rcRet); \ 196 \ 197 (a_pTimerVar) = &(a_pVM)->tmr0.s.aTimerQueues[idxQueue].paTimers[idxTimer]; \ 81 198 AssertReturn((a_pTimerVar)->hSelf == a_hTimer, a_rcRet); \ 82 199 Assert((a_pTimerVar)->fFlags & TMTIMER_FLAGS_RING0); \ 200 Assert(VM_IS_EMT(pVM)); \ 83 201 } while (0) 84 202 #endif … … 105 223 #ifdef IN_RING3 106 224 # define TMTIMER_HANDLE_TO_PTR_RETURN_VOID(a_pVM, a_hTimer, a_pTimerVar) do { \ 107 RT_NOREF(a_pVM); \ 108 (a_pTimerVar) = (PTMTIMER)hTimer; \ 109 AssertPtrReturnVoid((a_pTimerVar)); \ 225 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 226 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 227 AssertReturnVoid(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues)); \ 228 \ 229 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 230 AssertReturnVoid(idxQueue < (a_pVM)->tm.s.aTimerQueues[idxQueue].cTimersAlloc); \ 231 \ 232 (a_pTimerVar) = &(a_pVM)->tm.s.aTimerQueues[idxQueue].paTimers[idxTimer]; \ 110 233 AssertReturnVoid((a_pTimerVar)->hSelf == a_hTimer); \ 111 234 } while (0) 112 235 #else 113 236 # define TMTIMER_HANDLE_TO_PTR_RETURN_VOID(a_pVM, a_hTimer, a_pTimerVar) do { \ 114 (a_pTimerVar) = (PTMTIMER)MMHyperR3ToCC(pVM, (RTR3PTR)hTimer); \ 115 AssertPtrReturnVoid((a_pTimerVar)); \ 237 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 238 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 239 AssertReturnVoid(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues)); \ 240 AssertCompile(RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues) == RT_ELEMENTS((a_pVM)->tmr0.s.aTimerQueues)); \ 241 \ 242 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 243 AssertReturnVoid(idxQueue < (a_pVM)->tmr0.s.aTimerQueues[idxQueue].cTimersAlloc); \ 244 \ 245 (a_pTimerVar) = &(a_pVM)->tmr0.s.aTimerQueues[idxQueue].paTimers[idxTimer]; \ 116 246 AssertReturnVoid((a_pTimerVar)->hSelf == a_hTimer); \ 117 247 Assert((a_pTimerVar)->fFlags & TMTIMER_FLAGS_RING0); \ 248 Assert(VM_IS_EMT(pVM)); \ 118 249 } while (0) 119 250 #endif -
trunk/src/VBox/VMM/include/TMInternal.h
r87773 r87792 29 29 #include <VBox/vmm/stam.h> 30 30 #include <VBox/vmm/pdmcritsect.h> 31 #include <VBox/vmm/pdmcritsectrw.h> 31 32 32 33 RT_C_DECLS_BEGIN … … 50 51 typedef enum TMTIMERTYPE 51 52 { 53 /** Invalid zero value. */ 54 TMTIMERTYPE_INVALID = 0, 52 55 /** Device timer. */ 53 TMTIMERTYPE_DEV = 1,56 TMTIMERTYPE_DEV, 54 57 /** USB device timer. */ 55 58 TMTIMERTYPE_USB, … … 65 68 typedef enum TMTIMERSTATE 66 69 { 70 /** Invalid zero entry (used for table entry zero). */ 71 TMTIMERSTATE_INVALID = 0, 67 72 /** Timer is stopped. */ 68 TMTIMERSTATE_STOPPED = 1,73 TMTIMERSTATE_STOPPED, 69 74 /** Timer is active. */ 70 75 TMTIMERSTATE_ACTIVE, … … 104 109 && (enmState) >= TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE) 105 110 111 /** @name Timer handle value elements 112 * @{ */ 113 #define TMTIMERHANDLE_RANDOM_MASK UINT64_C(0xffffffffff000000) 114 #define TMTIMERHANDLE_QUEUE_IDX_SHIFT 16 115 #define TMTIMERHANDLE_QUEUE_IDX_MASK UINT64_C(0x0000000000ff0000) 116 #define TMTIMERHANDLE_QUEUE_IDX_SMASK UINT64_C(0x00000000000000ff) 117 #define TMTIMERHANDLE_TIMER_IDX_MASK UINT64_C(0x000000000000ffff) 118 /** @} */ 119 106 120 107 121 /** … … 166 180 /** Timer state. */ 167 181 volatile TMTIMERSTATE enmState; 168 /** T imer relative offset to thenext timer in the schedule list. */169 int32_t volatile offScheduleNext;170 171 /** T imer relative offset tothe next timer in the chain. */172 int32_t offNext;173 /** T imer relative offset tothe previous timer in the chain. */174 int32_t offPrev;182 /** The index of the next next timer in the schedule list. */ 183 int32_t volatile idxScheduleNext; 184 185 /** The index of the next timer in the chain. */ 186 uint32_t idxNext; 187 /** The index of the previous timer in the chain. */ 188 uint32_t idxPrev; 175 189 176 190 /** It's own handle value. */ … … 186 200 R3PTRTYPE(PPDMCRITSECT) pCritSect; 187 201 188 /** Pointer to the next timer in the list of created or free timers. (TM::pTimers or TM::pFree) */189 PTMTIMERR3 pBigNext;190 /** Pointer to the previous timer in the list of all created timers. (TM::pTimers) */191 PTMTIMERR3 pBigPrev;192 202 /** The timer name. */ 193 203 char szName[32]; … … 234 244 #endif 235 245 236 /** Get the previous timer. */ 237 #define TMTIMER_GET_PREV(pTimer) ((PTMTIMER)((pTimer)->offPrev ? (intptr_t)(pTimer) + (pTimer)->offPrev : 0)) 238 /** Get the next timer. */ 239 #define TMTIMER_GET_NEXT(pTimer) ((PTMTIMER)((pTimer)->offNext ? (intptr_t)(pTimer) + (pTimer)->offNext : 0)) 240 /** Set the previous timer link. */ 241 #define TMTIMER_SET_PREV(pTimer, pPrev) ((pTimer)->offPrev = (pPrev) ? (intptr_t)(pPrev) - (intptr_t)(pTimer) : 0) 242 /** Set the next timer link. */ 243 #define TMTIMER_SET_NEXT(pTimer, pNext) ((pTimer)->offNext = (pNext) ? (intptr_t)(pNext) - (intptr_t)(pTimer) : 0) 244 245 246 /** 247 * A timer queue. 248 * 249 * This is allocated on the hyper heap. 246 247 /** 248 * A timer queue, shared. 250 249 */ 251 250 typedef struct TMTIMERQUEUE 252 251 { 252 /** The ring-0 mapping of the timer table. */ 253 R3PTRTYPE(PTMTIMER) paTimers; 254 253 255 /** The cached expire time for this queue. 254 256 * Updated by EMT when scheduling the queue or modifying the head timer. … … 259 261 * When no scheduling is pending, this list is will be ordered by expire time (ascending). 260 262 * Access is serialized by only letting the emulation thread (EMT) do changes. 261 *262 * The offset is relative to the queue structure.263 263 */ 264 int32_t offActive;264 uint32_t idxActive; 265 265 /** List of timers pending scheduling of some kind. 266 266 * … … 268 268 * TMTIMERSTATE_PENDING_DESTRUCTION, TMTIMERSTATE_PENDING_STOPPING_DESTRUCTION, 269 269 * TMTIMERSTATE_PENDING_RESCHEDULING and TMTIMERSTATE_PENDING_SCHEDULE. 270 *271 * The offset is relative to the queue structure.272 270 */ 273 int32_t volatile offSchedule;271 uint32_t volatile idxSchedule; 274 272 /** The clock for this queue. */ 275 273 TMCLOCK enmClock; 276 /** Pad the structure up to 32 bytes. */ 277 uint32_t au32Padding[3]; 274 275 /** The size of the paTimers allocation (in entries). */ 276 uint32_t cTimersAlloc; 277 /** Number of free timer entries. */ 278 uint32_t cTimersFree; 279 /** Where to start looking for free timers. */ 280 uint32_t idxFreeHint; 281 /** The queue name. */ 282 char szName[16]; 283 /** Set if we've disabled growing. */ 284 bool fCannotGrow; 285 /** Align on 64-byte boundrary. */ 286 bool afAlignment[7]; 287 /** Lock serializing timer allocation and deallocation. */ 288 PDMCRITSECTRW AllocLock; 278 289 } TMTIMERQUEUE; 279 290 AssertCompileSizeAlignment(TMTIMERQUEUE, 64); 280 291 /** Pointer to a timer queue. */ 281 292 typedef TMTIMERQUEUE *PTMTIMERQUEUE; 282 293 283 /** Get the head of the active timer list. */ 284 #define TMTIMER_GET_HEAD(pQueue) ((PTMTIMER)((pQueue)->offActive ? (intptr_t)(pQueue) + (pQueue)->offActive : 0)) 285 /** Set the head of the active timer list. */ 286 #define TMTIMER_SET_HEAD(pQueue, pHead) ((pQueue)->offActive = pHead ? (intptr_t)pHead - (intptr_t)(pQueue) : 0) 294 /** 295 * A timer queue, ring-0 only bits. 296 */ 297 typedef struct TMTIMERQUEUER0 298 { 299 /** The size of the paTimers allocation (in entries). */ 300 uint32_t cTimersAlloc; 301 uint32_t uAlignment; 302 /** The ring-0 mapping of the timer table. */ 303 R0PTRTYPE(PTMTIMER) paTimers; 304 /** Handle to the timer table allocation. */ 305 RTR0MEMOBJ hMemObj; 306 /** Handle to the ring-3 mapping of the timer table. */ 307 RTR0MEMOBJ hMapObj; 308 } TMTIMERQUEUER0; 309 /** Pointer to the ring-0 timer queue data. */ 310 typedef TMTIMERQUEUER0 *PTMTIMERQUEUER0; 311 312 /** Pointer to the current context data for a timer queue. 313 * @note In ring-3 this is the same as the shared data. */ 314 #ifdef IN_RING3 315 typedef TMTIMERQUEUE *PTMTIMERQUEUECC; 316 #else 317 typedef TMTIMERQUEUER0 *PTMTIMERQUEUECC; 318 #endif 319 /** Helper macro for getting the current context queue point. */ 320 #ifdef IN_RING3 321 # define TM_GET_TIMER_QUEUE_CC(a_pVM, a_idxQueue, a_pQueueShared) (a_pQueueShared) 322 #else 323 # define TM_GET_TIMER_QUEUE_CC(a_pVM, a_idxQueue, a_pQueueShared) (&(a_pVM)->tmr0.s.aTimerQueues[a_idxQueue]) 324 #endif 287 325 288 326 … … 348 386 349 387 /** 350 * Converts a TM pointer into a VM pointer.351 * @returns Pointer to the VM structure the TM is part of.352 * @param pTM Pointer to TM instance data.353 */354 #define TM2VM(pTM) ( (PVM)((char*)pTM - pTM->offVM) )355 356 357 /**358 388 * TM VM Instance data. 359 389 * Changes to this must checked against the padding of the cfgm union in VM! … … 361 391 typedef struct TM 362 392 { 363 /** Offset to the VM structure.364 * See TM2VM(). */365 RTUINT offVM;366 367 393 /** The current TSC mode of the VM. 368 394 * Config variable: Mode (string). */ … … 370 396 /** The original TSC mode of the VM. */ 371 397 TMTSCMODE enmOriginalTSCMode; 372 /** Alignment padding. */373 uint32_t u32Alignment0;374 398 /** Whether the TSC is tied to the execution of code. 375 399 * Config variable: TSCTiedToExecution (bool) */ … … 513 537 R3PTRTYPE(char *) pszAlignment2b; 514 538 515 /** Timer queues for the different clock types - R3 Ptr */ 516 R3PTRTYPE(PTMTIMERQUEUE) paTimerQueuesR3; 517 /** Timer queues for the different clock types - R0 Ptr */ 518 R0PTRTYPE(PTMTIMERQUEUE) paTimerQueuesR0; 519 /** Timer queues for the different clock types - RC Ptr */ 520 RCPTRTYPE(PTMTIMERQUEUE) paTimerQueuesRC; 539 /** Timer queues for the different clock types. */ 540 TMTIMERQUEUE aTimerQueues[TMCLOCK_MAX]; 521 541 522 542 /** Pointer to our RC mapping of the GIP. */ … … 524 544 /** Pointer to our R3 mapping of the GIP. */ 525 545 R3PTRTYPE(void *) pvGIPR3; 526 527 /** Pointer to a singly linked list of free timers.528 * This chain is using the TMTIMER::pBigNext members.529 * Only accessible from the emulation thread. */530 PTMTIMERR3 pFree;531 532 /** Pointer to a doubly linked list of created timers.533 * This chain is using the TMTIMER::pBigNext and TMTIMER::pBigPrev members.534 * Only accessible from the emulation thread. */535 PTMTIMERR3 pCreated;536 546 537 547 /** The schedule timer timer handle (runtime timer). … … 696 706 /** Pointer to TM VM instance data. */ 697 707 typedef TM *PTM; 708 698 709 699 710 /** … … 796 807 typedef TMCPU *PTMCPU; 797 808 809 810 /** 811 * TM data kept in the ring-0 GVM. 812 */ 813 typedef struct TMR0PERVM 814 { 815 /** Timer queues for the different clock types. */ 816 TMTIMERQUEUER0 aTimerQueues[TMCLOCK_MAX]; 817 } TMR0PERVM; 818 819 798 820 const char *tmTimerState(TMTIMERSTATE enmState); 799 void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUE pQueue);821 void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue); 800 822 #ifdef VBOX_STRICT 801 void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere);823 void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere); 802 824 #endif 803 825
Note:
See TracChangeset
for help on using the changeset viewer.

