VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 74795

Last change on this file since 74795 was 74785, checked in by vboxsync, 6 years ago

vm.h,VMM: Use VMCPU_FF_IS_SET instead of VMCPU_FF_IS_PENDING when checking a single flag. Added compile time assertion on single flag. bugref:9180

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 91.9 KB
Line 
1/* $Id: TMAll.cpp 74785 2018-10-12 10:14:19Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30# ifdef VBOX_WITH_REM
31# include <VBox/vmm/rem.h>
32# endif
33#endif
34#include "TMInternal.h"
35#include <VBox/vmm/vm.h>
36
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <VBox/sup.h>
41#include <iprt/time.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/asm-math.h>
45#ifdef IN_RING3
46# include <iprt/thread.h>
47#endif
48
49#include "TMInline.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** @def TMTIMER_ASSERT_CRITSECT
56 * Checks that the caller owns the critical section if one is associated with
57 * the timer. */
58#ifdef VBOX_STRICT
59# define TMTIMER_ASSERT_CRITSECT(pTimer) \
60 do { \
61 if ((pTimer)->pCritSect) \
62 { \
63 VMSTATE enmState; \
64 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
65 AssertMsg( pCritSect \
66 && ( PDMCritSectIsOwner(pCritSect) \
67 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
68 || enmState == VMSTATE_RESETTING \
69 || enmState == VMSTATE_RESETTING_LS ),\
70 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
71 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
72 } \
73 } while (0)
74#else
75# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
76#endif
77
78/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
79 * Checks for lock order trouble between the timer critsect and the critical
80 * section critsect. The virtual sync critsect must always be entered before
81 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
82 * isn't any critical section associated with the timer or if the calling thread
83 * doesn't own it, ASSUMING of course that the thread using this macro is going
84 * to enter the virtual sync critical section anyway.
85 *
86 * @remarks This is a sligtly relaxed timer locking attitude compared to
87 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
88 * should know what it's doing if it's stopping or starting a timer
89 * without taking the device lock.
90 */
91#ifdef VBOX_STRICT
92# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
93 do { \
94 if ((pTimer)->pCritSect) \
95 { \
96 VMSTATE enmState; \
97 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVM, (pTimer)->pCritSect); \
98 AssertMsg( pCritSect \
99 && ( !PDMCritSectIsOwner(pCritSect) \
100 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
101 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
102 || enmState == VMSTATE_RESETTING \
103 || enmState == VMSTATE_RESETTING_LS ),\
104 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
105 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
106 } \
107 } while (0)
108#else
109# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
110#endif
111
112
113/**
114 * Notification that execution is about to start.
115 *
116 * This call must always be paired with a TMNotifyEndOfExecution call.
117 *
118 * The function may, depending on the configuration, resume the TSC and future
119 * clocks that only ticks when we're executing guest code.
120 *
121 * @param pVCpu The cross context virtual CPU structure.
122 */
123VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
124{
125 PVM pVM = pVCpu->CTX_SUFF(pVM);
126
127#ifndef VBOX_WITHOUT_NS_ACCOUNTING
128 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
129#endif
130 if (pVM->tm.s.fTSCTiedToExecution)
131 tmCpuTickResume(pVM, pVCpu);
132}
133
134
135/**
136 * Notification that execution has ended.
137 *
138 * This call must always be paired with a TMNotifyStartOfExecution call.
139 *
140 * The function may, depending on the configuration, suspend the TSC and future
141 * clocks that only ticks when we're executing guest code.
142 *
143 * @param pVCpu The cross context virtual CPU structure.
144 */
145VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
146{
147 PVM pVM = pVCpu->CTX_SUFF(pVM);
148
149 if (pVM->tm.s.fTSCTiedToExecution)
150 tmCpuTickPause(pVCpu);
151
152#ifndef VBOX_WITHOUT_NS_ACCOUNTING
153 uint64_t const u64NsTs = RTTimeNanoTS();
154 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
155 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
156 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
157 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
158
159# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
160 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
161 if (cNsExecutingDelta < 5000)
162 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
163 else if (cNsExecutingDelta < 50000)
164 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
165 else
166 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
167 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
168 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
169 if (cNsOtherNewDelta > 0)
170 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
171# endif
172
173 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
174 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
175 pVCpu->tm.s.cNsTotal = cNsTotalNew;
176 pVCpu->tm.s.cNsOther = cNsOtherNew;
177 pVCpu->tm.s.cPeriodsExecuting++;
178 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
179#endif
180}
181
182
183/**
184 * Notification that the cpu is entering the halt state
185 *
186 * This call must always be paired with a TMNotifyEndOfExecution call.
187 *
188 * The function may, depending on the configuration, resume the TSC and future
189 * clocks that only ticks when we're halted.
190 *
191 * @param pVCpu The cross context virtual CPU structure.
192 */
193VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
194{
195 PVM pVM = pVCpu->CTX_SUFF(pVM);
196
197#ifndef VBOX_WITHOUT_NS_ACCOUNTING
198 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
199#endif
200
201 if ( pVM->tm.s.fTSCTiedToExecution
202 && !pVM->tm.s.fTSCNotTiedToHalt)
203 tmCpuTickResume(pVM, pVCpu);
204}
205
206
207/**
208 * Notification that the cpu is leaving the halt state
209 *
210 * This call must always be paired with a TMNotifyStartOfHalt call.
211 *
212 * The function may, depending on the configuration, suspend the TSC and future
213 * clocks that only ticks when we're halted.
214 *
215 * @param pVCpu The cross context virtual CPU structure.
216 */
217VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
218{
219 PVM pVM = pVCpu->CTX_SUFF(pVM);
220
221 if ( pVM->tm.s.fTSCTiedToExecution
222 && !pVM->tm.s.fTSCNotTiedToHalt)
223 tmCpuTickPause(pVCpu);
224
225#ifndef VBOX_WITHOUT_NS_ACCOUNTING
226 uint64_t const u64NsTs = RTTimeNanoTS();
227 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
228 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
229 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
230 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
231
232# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
234 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
235 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
236 if (cNsOtherNewDelta > 0)
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
238# endif
239
240 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
241 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
242 pVCpu->tm.s.cNsTotal = cNsTotalNew;
243 pVCpu->tm.s.cNsOther = cNsOtherNew;
244 pVCpu->tm.s.cPeriodsHalted++;
245 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
246#endif
247}
248
249
250/**
251 * Raise the timer force action flag and notify the dedicated timer EMT.
252 *
253 * @param pVM The cross context VM structure.
254 */
255DECLINLINE(void) tmScheduleNotify(PVM pVM)
256{
257 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
258 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
259 {
260 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
261 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
262#ifdef IN_RING3
263# ifdef VBOX_WITH_REM
264 REMR3NotifyTimerPending(pVM, pVCpuDst);
265# endif
266 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
267#endif
268 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
269 }
270}
271
272
273/**
274 * Schedule the queue which was changed.
275 */
276DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
277{
278 PVM pVM = pTimer->CTX_SUFF(pVM);
279 if ( VM_IS_EMT(pVM)
280 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
281 {
282 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
283 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
284 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
285#ifdef VBOX_STRICT
286 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
287#endif
288 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
289 TM_UNLOCK_TIMERS(pVM);
290 }
291 else
292 {
293 TMTIMERSTATE enmState = pTimer->enmState;
294 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
295 tmScheduleNotify(pVM);
296 }
297}
298
299
300/**
301 * Try change the state to enmStateNew from enmStateOld
302 * and link the timer into the scheduling queue.
303 *
304 * @returns Success indicator.
305 * @param pTimer Timer in question.
306 * @param enmStateNew The new timer state.
307 * @param enmStateOld The old timer state.
308 */
309DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
310{
311 /*
312 * Attempt state change.
313 */
314 bool fRc;
315 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
316 return fRc;
317}
318
319
320/**
321 * Links the timer onto the scheduling queue.
322 *
323 * @param pQueue The timer queue the timer belongs to.
324 * @param pTimer The timer.
325 *
326 * @todo FIXME: Look into potential race with the thread running the queues
327 * and stuff.
328 */
329DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
330{
331 Assert(!pTimer->offScheduleNext);
332 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
333 int32_t offHead;
334 do
335 {
336 offHead = pQueue->offSchedule;
337 if (offHead)
338 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
339 else
340 pTimer->offScheduleNext = 0;
341 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
342}
343
344
345/**
346 * Try change the state to enmStateNew from enmStateOld
347 * and link the timer into the scheduling queue.
348 *
349 * @returns Success indicator.
350 * @param pTimer Timer in question.
351 * @param enmStateNew The new timer state.
352 * @param enmStateOld The old timer state.
353 */
354DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
355{
356 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
357 {
358 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
359 return true;
360 }
361 return false;
362}
363
364
365/**
366 * Links a timer into the active list of a timer queue.
367 *
368 * @param pQueue The queue.
369 * @param pTimer The timer.
370 * @param u64Expire The timer expiration time.
371 *
372 * @remarks Called while owning the relevant queue lock.
373 */
374DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
375{
376 Assert(!pTimer->offNext);
377 Assert(!pTimer->offPrev);
378 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
379
380 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
381 if (pCur)
382 {
383 for (;; pCur = TMTIMER_GET_NEXT(pCur))
384 {
385 if (pCur->u64Expire > u64Expire)
386 {
387 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
388 TMTIMER_SET_NEXT(pTimer, pCur);
389 TMTIMER_SET_PREV(pTimer, pPrev);
390 if (pPrev)
391 TMTIMER_SET_NEXT(pPrev, pTimer);
392 else
393 {
394 TMTIMER_SET_HEAD(pQueue, pTimer);
395 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
396 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
397 }
398 TMTIMER_SET_PREV(pCur, pTimer);
399 return;
400 }
401 if (!pCur->offNext)
402 {
403 TMTIMER_SET_NEXT(pCur, pTimer);
404 TMTIMER_SET_PREV(pTimer, pCur);
405 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
406 return;
407 }
408 }
409 }
410 else
411 {
412 TMTIMER_SET_HEAD(pQueue, pTimer);
413 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
414 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
415 }
416}
417
418
419
420/**
421 * Schedules the given timer on the given queue.
422 *
423 * @param pQueue The timer queue.
424 * @param pTimer The timer that needs scheduling.
425 *
426 * @remarks Called while owning the lock.
427 */
428DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
429{
430 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
431
432 /*
433 * Processing.
434 */
435 unsigned cRetries = 2;
436 do
437 {
438 TMTIMERSTATE enmState = pTimer->enmState;
439 switch (enmState)
440 {
441 /*
442 * Reschedule timer (in the active list).
443 */
444 case TMTIMERSTATE_PENDING_RESCHEDULE:
445 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
446 break; /* retry */
447 tmTimerQueueUnlinkActive(pQueue, pTimer);
448 RT_FALL_THRU();
449
450 /*
451 * Schedule timer (insert into the active list).
452 */
453 case TMTIMERSTATE_PENDING_SCHEDULE:
454 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
455 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
456 break; /* retry */
457 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
458 return;
459
460 /*
461 * Stop the timer in active list.
462 */
463 case TMTIMERSTATE_PENDING_STOP:
464 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
465 break; /* retry */
466 tmTimerQueueUnlinkActive(pQueue, pTimer);
467 RT_FALL_THRU();
468
469 /*
470 * Stop the timer (not on the active list).
471 */
472 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
473 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
474 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
475 break;
476 return;
477
478 /*
479 * The timer is pending destruction by TMR3TimerDestroy, our caller.
480 * Nothing to do here.
481 */
482 case TMTIMERSTATE_DESTROY:
483 break;
484
485 /*
486 * Postpone these until they get into the right state.
487 */
488 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
489 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
490 tmTimerLinkSchedule(pQueue, pTimer);
491 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
492 return;
493
494 /*
495 * None of these can be in the schedule.
496 */
497 case TMTIMERSTATE_FREE:
498 case TMTIMERSTATE_STOPPED:
499 case TMTIMERSTATE_ACTIVE:
500 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
501 case TMTIMERSTATE_EXPIRED_DELIVER:
502 default:
503 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
504 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
505 return;
506 }
507 } while (cRetries-- > 0);
508}
509
510
511/**
512 * Schedules the specified timer queue.
513 *
514 * @param pVM The cross context VM structure.
515 * @param pQueue The queue to schedule.
516 *
517 * @remarks Called while owning the lock.
518 */
519void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
520{
521 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
522 NOREF(pVM);
523
524 /*
525 * Dequeue the scheduling list and iterate it.
526 */
527 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
528 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
529 if (!offNext)
530 return;
531 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
532 while (pNext)
533 {
534 /*
535 * Unlink the head timer and find the next one.
536 */
537 PTMTIMER pTimer = pNext;
538 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
539 pTimer->offScheduleNext = 0;
540
541 /*
542 * Do the scheduling.
543 */
544 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
545 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
546 tmTimerQueueScheduleOne(pQueue, pTimer);
547 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
548 } /* foreach timer in current schedule batch. */
549 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
550}
551
552
553#ifdef VBOX_STRICT
554/**
555 * Checks that the timer queues are sane.
556 *
557 * @param pVM The cross context VM structure.
558 * @param pszWhere Caller location clue.
559 *
560 * @remarks Called while owning the lock.
561 */
562void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
563{
564 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
565
566 /*
567 * Check the linking of the active lists.
568 */
569 bool fHaveVirtualSyncLock = false;
570 for (int i = 0; i < TMCLOCK_MAX; i++)
571 {
572 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
573 Assert((int)pQueue->enmClock == i);
574 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
575 {
576 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
577 continue;
578 fHaveVirtualSyncLock = true;
579 }
580 PTMTIMER pPrev = NULL;
581 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
582 {
583 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
584 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
585 TMTIMERSTATE enmState = pCur->enmState;
586 switch (enmState)
587 {
588 case TMTIMERSTATE_ACTIVE:
589 AssertMsg( !pCur->offScheduleNext
590 || pCur->enmState != TMTIMERSTATE_ACTIVE,
591 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
592 break;
593 case TMTIMERSTATE_PENDING_STOP:
594 case TMTIMERSTATE_PENDING_RESCHEDULE:
595 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
596 break;
597 default:
598 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
599 break;
600 }
601 }
602 }
603
604
605# ifdef IN_RING3
606 /*
607 * Do the big list and check that active timers all are in the active lists.
608 */
609 PTMTIMERR3 pPrev = NULL;
610 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
611 {
612 Assert(pCur->pBigPrev == pPrev);
613 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
614
615 TMTIMERSTATE enmState = pCur->enmState;
616 switch (enmState)
617 {
618 case TMTIMERSTATE_ACTIVE:
619 case TMTIMERSTATE_PENDING_STOP:
620 case TMTIMERSTATE_PENDING_RESCHEDULE:
621 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
622 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
623 {
624 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
625 Assert(pCur->offPrev || pCur == pCurAct);
626 while (pCurAct && pCurAct != pCur)
627 pCurAct = TMTIMER_GET_NEXT(pCurAct);
628 Assert(pCurAct == pCur);
629 }
630 break;
631
632 case TMTIMERSTATE_PENDING_SCHEDULE:
633 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
634 case TMTIMERSTATE_STOPPED:
635 case TMTIMERSTATE_EXPIRED_DELIVER:
636 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
637 {
638 Assert(!pCur->offNext);
639 Assert(!pCur->offPrev);
640 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
641 pCurAct;
642 pCurAct = TMTIMER_GET_NEXT(pCurAct))
643 {
644 Assert(pCurAct != pCur);
645 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
646 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
647 }
648 }
649 break;
650
651 /* ignore */
652 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
653 break;
654
655 /* shouldn't get here! */
656 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
657 case TMTIMERSTATE_DESTROY:
658 default:
659 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
660 break;
661 }
662 }
663# endif /* IN_RING3 */
664
665 if (fHaveVirtualSyncLock)
666 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
667}
668#endif /* !VBOX_STRICT */
669
670#ifdef VBOX_HIGH_RES_TIMERS_HACK
671
672/**
673 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
674 * EMT is polling.
675 *
676 * @returns See tmTimerPollInternal.
677 * @param pVM The cross context VM structure.
678 * @param u64Now Current virtual clock timestamp.
679 * @param u64Delta The delta to the next even in ticks of the
680 * virtual clock.
681 * @param pu64Delta Where to return the delta.
682 */
683DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
684{
685 Assert(!(u64Delta & RT_BIT_64(63)));
686
687 if (!pVM->tm.s.fVirtualWarpDrive)
688 {
689 *pu64Delta = u64Delta;
690 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
691 }
692
693 /*
694 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
695 */
696 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
697 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
698
699 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
700 u64GipTime -= u64Start; /* the start is GIP time. */
701 if (u64GipTime >= u64Delta)
702 {
703 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
704 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
705 }
706 else
707 {
708 u64Delta -= u64GipTime;
709 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
710 u64Delta += u64GipTime;
711 }
712 *pu64Delta = u64Delta;
713 u64GipTime += u64Start;
714 return u64GipTime;
715}
716
717
718/**
719 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
720 * than the one dedicated to timer work.
721 *
722 * @returns See tmTimerPollInternal.
723 * @param pVM The cross context VM structure.
724 * @param u64Now Current virtual clock timestamp.
725 * @param pu64Delta Where to return the delta.
726 */
727DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
728{
729 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
730 *pu64Delta = s_u64OtherRet;
731 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
732}
733
734
735/**
736 * Worker for tmTimerPollInternal.
737 *
738 * @returns See tmTimerPollInternal.
739 * @param pVM The cross context VM structure.
740 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
741 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
742 * timer EMT.
743 * @param u64Now Current virtual clock timestamp.
744 * @param pu64Delta Where to return the delta.
745 * @param pCounter The statistics counter to update.
746 */
747DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
748 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
749{
750 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
751 if (pVCpuDst != pVCpu)
752 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
753 *pu64Delta = 0;
754 return 0;
755}
756
757/**
758 * Common worker for TMTimerPollGIP and TMTimerPoll.
759 *
760 * This function is called before FFs are checked in the inner execution EM loops.
761 *
762 * @returns The GIP timestamp of the next event.
763 * 0 if the next event has already expired.
764 *
765 * @param pVM The cross context VM structure.
766 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
767 * @param pu64Delta Where to store the delta.
768 *
769 * @thread The emulation thread.
770 *
771 * @remarks GIP uses ns ticks.
772 */
773DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
774{
775 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
776 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
777 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
778
779 /*
780 * Return straight away if the timer FF is already set ...
781 */
782 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
783 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
784
785 /*
786 * ... or if timers are being run.
787 */
788 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
789 {
790 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
791 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
792 }
793
794 /*
795 * Check for TMCLOCK_VIRTUAL expiration.
796 */
797 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
798 const int64_t i64Delta1 = u64Expire1 - u64Now;
799 if (i64Delta1 <= 0)
800 {
801 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
802 {
803 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
804 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
805#if defined(IN_RING3) && defined(VBOX_WITH_REM)
806 REMR3NotifyTimerPending(pVM, pVCpuDst);
807#endif
808 }
809 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
810 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
811 }
812
813 /*
814 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
815 * This isn't quite as straight forward if in a catch-up, not only do
816 * we have to adjust the 'now' but when have to adjust the delta as well.
817 */
818
819 /*
820 * Optimistic lockless approach.
821 */
822 uint64_t u64VirtualSyncNow;
823 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
824 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
825 {
826 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
827 {
828 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
829 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
830 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
831 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
832 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
833 {
834 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
835 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
836 if (i64Delta2 > 0)
837 {
838 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
839 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
840
841 if (pVCpu == pVCpuDst)
842 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
843 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
844 }
845
846 if ( !pVM->tm.s.fRunningQueues
847 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
848 {
849 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
850 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
851#if defined(IN_RING3) && defined(VBOX_WITH_REM)
852 REMR3NotifyTimerPending(pVM, pVCpuDst);
853#endif
854 }
855
856 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
857 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
858 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
859 }
860 }
861 }
862 else
863 {
864 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
865 LogFlow(("TMTimerPoll: stopped\n"));
866 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
867 }
868
869 /*
870 * Complicated lockless approach.
871 */
872 uint64_t off;
873 uint32_t u32Pct = 0;
874 bool fCatchUp;
875 int cOuterTries = 42;
876 for (;; cOuterTries--)
877 {
878 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
879 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
880 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
881 if (fCatchUp)
882 {
883 /* No changes allowed, try get a consistent set of parameters. */
884 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
885 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
886 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
887 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
888 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
889 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
890 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
891 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
892 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
893 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
894 || cOuterTries <= 0)
895 {
896 uint64_t u64Delta = u64Now - u64Prev;
897 if (RT_LIKELY(!(u64Delta >> 32)))
898 {
899 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
900 if (off > u64Sub + offGivenUp)
901 off -= u64Sub;
902 else /* we've completely caught up. */
903 off = offGivenUp;
904 }
905 else
906 /* More than 4 seconds since last time (or negative), ignore it. */
907 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
908
909 /* Check that we're still running and in catch up. */
910 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
911 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
912 break;
913 }
914 }
915 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
916 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
917 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
918 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
919 break; /* Got an consistent offset */
920
921 /* Repeat the initial checks before iterating. */
922 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
923 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
924 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
925 {
926 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
927 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
928 }
929 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
930 {
931 LogFlow(("TMTimerPoll: stopped\n"));
932 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
933 }
934 if (cOuterTries <= 0)
935 break; /* that's enough */
936 }
937 if (cOuterTries <= 0)
938 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
939 u64VirtualSyncNow = u64Now - off;
940
941 /* Calc delta and see if we've got a virtual sync hit. */
942 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
943 if (i64Delta2 <= 0)
944 {
945 if ( !pVM->tm.s.fRunningQueues
946 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
947 {
948 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
949 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
950#if defined(IN_RING3) && defined(VBOX_WITH_REM)
951 REMR3NotifyTimerPending(pVM, pVCpuDst);
952#endif
953 }
954 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
955 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
956 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
957 }
958
959 /*
960 * Return the time left to the next event.
961 */
962 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
963 if (pVCpu == pVCpuDst)
964 {
965 if (fCatchUp)
966 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
967 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
968 }
969 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
970}
971
972
973/**
974 * Set FF if we've passed the next virtual event.
975 *
976 * This function is called before FFs are checked in the inner execution EM loops.
977 *
978 * @returns true if timers are pending, false if not.
979 *
980 * @param pVM The cross context VM structure.
981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
982 * @thread The emulation thread.
983 */
984VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
985{
986 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
987 uint64_t off = 0;
988 tmTimerPollInternal(pVM, pVCpu, &off);
989 return off == 0;
990}
991
992
993/**
994 * Set FF if we've passed the next virtual event.
995 *
996 * This function is called before FFs are checked in the inner execution EM loops.
997 *
998 * @param pVM The cross context VM structure.
999 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1000 * @thread The emulation thread.
1001 */
1002VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
1003{
1004 uint64_t off;
1005 tmTimerPollInternal(pVM, pVCpu, &off);
1006}
1007
1008
1009/**
1010 * Set FF if we've passed the next virtual event.
1011 *
1012 * This function is called before FFs are checked in the inner execution EM loops.
1013 *
1014 * @returns The GIP timestamp of the next event.
1015 * 0 if the next event has already expired.
1016 * @param pVM The cross context VM structure.
1017 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1018 * @param pu64Delta Where to store the delta.
1019 * @thread The emulation thread.
1020 */
1021VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
1022{
1023 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1024}
1025
1026#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1027
1028/**
1029 * Gets the host context ring-3 pointer of the timer.
1030 *
1031 * @returns HC R3 pointer.
1032 * @param pTimer Timer handle as returned by one of the create functions.
1033 */
1034VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1035{
1036 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1037}
1038
1039
1040/**
1041 * Gets the host context ring-0 pointer of the timer.
1042 *
1043 * @returns HC R0 pointer.
1044 * @param pTimer Timer handle as returned by one of the create functions.
1045 */
1046VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1047{
1048 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1049}
1050
1051
1052/**
1053 * Gets the RC pointer of the timer.
1054 *
1055 * @returns RC pointer.
1056 * @param pTimer Timer handle as returned by one of the create functions.
1057 */
1058VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1059{
1060 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1061}
1062
1063
1064/**
1065 * Locks the timer clock.
1066 *
1067 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1068 * if the clock does not have a lock.
1069 * @param pTimer The timer which clock lock we wish to take.
1070 * @param rcBusy What to return in ring-0 and raw-mode context
1071 * if the lock is busy. Pass VINF_SUCCESS to
1072 * acquired the critical section thru a ring-3
1073 call if necessary.
1074 *
1075 * @remarks Currently only supported on timers using the virtual sync clock.
1076 */
1077VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1078{
1079 AssertPtr(pTimer);
1080 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1081 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1082}
1083
1084
1085/**
1086 * Unlocks a timer clock locked by TMTimerLock.
1087 *
1088 * @param pTimer The timer which clock to unlock.
1089 */
1090VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1091{
1092 AssertPtr(pTimer);
1093 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1094 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1095}
1096
1097
1098/**
1099 * Checks if the current thread owns the timer clock lock.
1100 *
1101 * @returns @c true if its the owner, @c false if not.
1102 * @param pTimer The timer handle.
1103 */
1104VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1105{
1106 AssertPtr(pTimer);
1107 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1108 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1109}
1110
1111
1112/**
1113 * Optimized TMTimerSet code path for starting an inactive timer.
1114 *
1115 * @returns VBox status code.
1116 *
1117 * @param pVM The cross context VM structure.
1118 * @param pTimer The timer handle.
1119 * @param u64Expire The new expire time.
1120 */
1121static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1122{
1123 Assert(!pTimer->offPrev);
1124 Assert(!pTimer->offNext);
1125 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1126
1127 TMCLOCK const enmClock = pTimer->enmClock;
1128
1129 /*
1130 * Calculate and set the expiration time.
1131 */
1132 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1133 {
1134 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1135 AssertMsgStmt(u64Expire >= u64Last,
1136 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1137 u64Expire = u64Last);
1138 }
1139 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1140 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1141
1142 /*
1143 * Link the timer into the active list.
1144 */
1145 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1146
1147 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1148 TM_UNLOCK_TIMERS(pVM);
1149 return VINF_SUCCESS;
1150}
1151
1152
1153/**
1154 * TMTimerSet for the virtual sync timer queue.
1155 *
1156 * This employs a greatly simplified state machine by always acquiring the
1157 * queue lock and bypassing the scheduling list.
1158 *
1159 * @returns VBox status code
1160 * @param pVM The cross context VM structure.
1161 * @param pTimer The timer handle.
1162 * @param u64Expire The expiration time.
1163 */
1164static int tmTimerVirtualSyncSet(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1165{
1166 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1167 VM_ASSERT_EMT(pVM);
1168 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1169 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1170 AssertRCReturn(rc, rc);
1171
1172 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1173 TMTIMERSTATE enmState = pTimer->enmState;
1174 switch (enmState)
1175 {
1176 case TMTIMERSTATE_EXPIRED_DELIVER:
1177 case TMTIMERSTATE_STOPPED:
1178 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1179 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1180 else
1181 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1182
1183 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1184 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1185 pTimer->u64Expire = u64Expire;
1186 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1187 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1188 rc = VINF_SUCCESS;
1189 break;
1190
1191 case TMTIMERSTATE_ACTIVE:
1192 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1193 tmTimerQueueUnlinkActive(pQueue, pTimer);
1194 pTimer->u64Expire = u64Expire;
1195 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1196 rc = VINF_SUCCESS;
1197 break;
1198
1199 case TMTIMERSTATE_PENDING_RESCHEDULE:
1200 case TMTIMERSTATE_PENDING_STOP:
1201 case TMTIMERSTATE_PENDING_SCHEDULE:
1202 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1203 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1204 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1205 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1206 case TMTIMERSTATE_DESTROY:
1207 case TMTIMERSTATE_FREE:
1208 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1209 rc = VERR_TM_INVALID_STATE;
1210 break;
1211
1212 default:
1213 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1214 rc = VERR_TM_UNKNOWN_STATE;
1215 break;
1216 }
1217
1218 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1219 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1220 return rc;
1221}
1222
1223
1224/**
1225 * Arm a timer with a (new) expire time.
1226 *
1227 * @returns VBox status code.
1228 * @param pTimer Timer handle as returned by one of the create functions.
1229 * @param u64Expire New expire time.
1230 */
1231VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1232{
1233 PVM pVM = pTimer->CTX_SUFF(pVM);
1234
1235 /* Treat virtual sync timers specially. */
1236 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1237 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1238
1239 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1240 TMTIMER_ASSERT_CRITSECT(pTimer);
1241
1242 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1243
1244#ifdef VBOX_WITH_STATISTICS
1245 /*
1246 * Gather optimization info.
1247 */
1248 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1249 TMTIMERSTATE enmOrgState = pTimer->enmState;
1250 switch (enmOrgState)
1251 {
1252 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1253 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1254 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1255 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1256 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1257 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1258 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1259 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1260 }
1261#endif
1262
1263 /*
1264 * The most common case is setting the timer again during the callback.
1265 * The second most common case is starting a timer at some other time.
1266 */
1267#if 1
1268 TMTIMERSTATE enmState1 = pTimer->enmState;
1269 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1270 || ( enmState1 == TMTIMERSTATE_STOPPED
1271 && pTimer->pCritSect))
1272 {
1273 /* Try take the TM lock and check the state again. */
1274 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1275 {
1276 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1277 {
1278 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1279 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1280 return VINF_SUCCESS;
1281 }
1282 TM_UNLOCK_TIMERS(pVM);
1283 }
1284 }
1285#endif
1286
1287 /*
1288 * Unoptimized code path.
1289 */
1290 int cRetries = 1000;
1291 do
1292 {
1293 /*
1294 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1295 */
1296 TMTIMERSTATE enmState = pTimer->enmState;
1297 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1298 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1299 switch (enmState)
1300 {
1301 case TMTIMERSTATE_EXPIRED_DELIVER:
1302 case TMTIMERSTATE_STOPPED:
1303 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1304 {
1305 Assert(!pTimer->offPrev);
1306 Assert(!pTimer->offNext);
1307 pTimer->u64Expire = u64Expire;
1308 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1309 tmSchedule(pTimer);
1310 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1311 return VINF_SUCCESS;
1312 }
1313 break;
1314
1315 case TMTIMERSTATE_PENDING_SCHEDULE:
1316 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1317 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1318 {
1319 pTimer->u64Expire = u64Expire;
1320 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1321 tmSchedule(pTimer);
1322 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1323 return VINF_SUCCESS;
1324 }
1325 break;
1326
1327
1328 case TMTIMERSTATE_ACTIVE:
1329 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1330 {
1331 pTimer->u64Expire = u64Expire;
1332 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1333 tmSchedule(pTimer);
1334 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1335 return VINF_SUCCESS;
1336 }
1337 break;
1338
1339 case TMTIMERSTATE_PENDING_RESCHEDULE:
1340 case TMTIMERSTATE_PENDING_STOP:
1341 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1342 {
1343 pTimer->u64Expire = u64Expire;
1344 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1345 tmSchedule(pTimer);
1346 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1347 return VINF_SUCCESS;
1348 }
1349 break;
1350
1351
1352 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1353 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1354 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1355#ifdef IN_RING3
1356 if (!RTThreadYield())
1357 RTThreadSleep(1);
1358#else
1359/** @todo call host context and yield after a couple of iterations */
1360#endif
1361 break;
1362
1363 /*
1364 * Invalid states.
1365 */
1366 case TMTIMERSTATE_DESTROY:
1367 case TMTIMERSTATE_FREE:
1368 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1369 return VERR_TM_INVALID_STATE;
1370 default:
1371 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1372 return VERR_TM_UNKNOWN_STATE;
1373 }
1374 } while (cRetries-- > 0);
1375
1376 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1377 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1378 return VERR_TM_TIMER_UNSTABLE_STATE;
1379}
1380
1381
1382/**
1383 * Return the current time for the specified clock, setting pu64Now if not NULL.
1384 *
1385 * @returns Current time.
1386 * @param pVM The cross context VM structure.
1387 * @param enmClock The clock to query.
1388 * @param pu64Now Optional pointer where to store the return time
1389 */
1390DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1391{
1392 uint64_t u64Now;
1393 switch (enmClock)
1394 {
1395 case TMCLOCK_VIRTUAL_SYNC:
1396 u64Now = TMVirtualSyncGet(pVM);
1397 break;
1398 case TMCLOCK_VIRTUAL:
1399 u64Now = TMVirtualGet(pVM);
1400 break;
1401 case TMCLOCK_REAL:
1402 u64Now = TMRealGet(pVM);
1403 break;
1404 default:
1405 AssertFatalMsgFailed(("%d\n", enmClock));
1406 }
1407
1408 if (pu64Now)
1409 *pu64Now = u64Now;
1410 return u64Now;
1411}
1412
1413
1414/**
1415 * Optimized TMTimerSetRelative code path.
1416 *
1417 * @returns VBox status code.
1418 *
1419 * @param pVM The cross context VM structure.
1420 * @param pTimer The timer handle.
1421 * @param cTicksToNext Clock ticks until the next time expiration.
1422 * @param pu64Now Where to return the current time stamp used.
1423 * Optional.
1424 */
1425static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1426{
1427 Assert(!pTimer->offPrev);
1428 Assert(!pTimer->offNext);
1429 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1430
1431 /*
1432 * Calculate and set the expiration time.
1433 */
1434 TMCLOCK const enmClock = pTimer->enmClock;
1435 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1436 pTimer->u64Expire = u64Expire;
1437 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1438
1439 /*
1440 * Link the timer into the active list.
1441 */
1442 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1443 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1444
1445 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1446 TM_UNLOCK_TIMERS(pVM);
1447 return VINF_SUCCESS;
1448}
1449
1450
1451/**
1452 * TMTimerSetRelative for the virtual sync timer queue.
1453 *
1454 * This employs a greatly simplified state machine by always acquiring the
1455 * queue lock and bypassing the scheduling list.
1456 *
1457 * @returns VBox status code
1458 * @param pVM The cross context VM structure.
1459 * @param pTimer The timer to (re-)arm.
1460 * @param cTicksToNext Clock ticks until the next time expiration.
1461 * @param pu64Now Where to return the current time stamp used.
1462 * Optional.
1463 */
1464static int tmTimerVirtualSyncSetRelative(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1465{
1466 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1467 VM_ASSERT_EMT(pVM);
1468 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1469 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1470 AssertRCReturn(rc, rc);
1471
1472 /* Calculate the expiration tick. */
1473 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1474 if (pu64Now)
1475 *pu64Now = u64Expire;
1476 u64Expire += cTicksToNext;
1477
1478 /* Update the timer. */
1479 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1480 TMTIMERSTATE enmState = pTimer->enmState;
1481 switch (enmState)
1482 {
1483 case TMTIMERSTATE_EXPIRED_DELIVER:
1484 case TMTIMERSTATE_STOPPED:
1485 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1486 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1487 else
1488 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1489 pTimer->u64Expire = u64Expire;
1490 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1491 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1492 rc = VINF_SUCCESS;
1493 break;
1494
1495 case TMTIMERSTATE_ACTIVE:
1496 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1497 tmTimerQueueUnlinkActive(pQueue, pTimer);
1498 pTimer->u64Expire = u64Expire;
1499 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1500 rc = VINF_SUCCESS;
1501 break;
1502
1503 case TMTIMERSTATE_PENDING_RESCHEDULE:
1504 case TMTIMERSTATE_PENDING_STOP:
1505 case TMTIMERSTATE_PENDING_SCHEDULE:
1506 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1507 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1508 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1509 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1510 case TMTIMERSTATE_DESTROY:
1511 case TMTIMERSTATE_FREE:
1512 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1513 rc = VERR_TM_INVALID_STATE;
1514 break;
1515
1516 default:
1517 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1518 rc = VERR_TM_UNKNOWN_STATE;
1519 break;
1520 }
1521
1522 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1523 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1524 return rc;
1525}
1526
1527
1528/**
1529 * Arm a timer with a expire time relative to the current time.
1530 *
1531 * @returns VBox status code.
1532 * @param pTimer Timer handle as returned by one of the create functions.
1533 * @param cTicksToNext Clock ticks until the next time expiration.
1534 * @param pu64Now Where to return the current time stamp used.
1535 * Optional.
1536 */
1537VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1538{
1539 PVM pVM = pTimer->CTX_SUFF(pVM);
1540
1541 /* Treat virtual sync timers specially. */
1542 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1543 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1544
1545 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1546 TMTIMER_ASSERT_CRITSECT(pTimer);
1547
1548 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1549
1550#ifdef VBOX_WITH_STATISTICS
1551 /*
1552 * Gather optimization info.
1553 */
1554 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1555 TMTIMERSTATE enmOrgState = pTimer->enmState;
1556 switch (enmOrgState)
1557 {
1558 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1559 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1560 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1561 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1562 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1563 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1564 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1565 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1566 }
1567#endif
1568
1569 /*
1570 * Try to take the TM lock and optimize the common cases.
1571 *
1572 * With the TM lock we can safely make optimizations like immediate
1573 * scheduling and we can also be 100% sure that we're not racing the
1574 * running of the timer queues. As an additional restraint we require the
1575 * timer to have a critical section associated with to be 100% there aren't
1576 * concurrent operations on the timer. (This latter isn't necessary any
1577 * longer as this isn't supported for any timers, critsect or not.)
1578 *
1579 * Note! Lock ordering doesn't apply when we only tries to
1580 * get the innermost locks.
1581 */
1582 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1583#if 1
1584 if ( fOwnTMLock
1585 && pTimer->pCritSect)
1586 {
1587 TMTIMERSTATE enmState = pTimer->enmState;
1588 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1589 || enmState == TMTIMERSTATE_STOPPED)
1590 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1591 {
1592 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1593 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1594 return VINF_SUCCESS;
1595 }
1596
1597 /* Optimize other states when it becomes necessary. */
1598 }
1599#endif
1600
1601 /*
1602 * Unoptimized path.
1603 */
1604 int rc;
1605 TMCLOCK const enmClock = pTimer->enmClock;
1606 for (int cRetries = 1000; ; cRetries--)
1607 {
1608 /*
1609 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1610 */
1611 TMTIMERSTATE enmState = pTimer->enmState;
1612 switch (enmState)
1613 {
1614 case TMTIMERSTATE_STOPPED:
1615 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1616 {
1617 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1618 * Figure a safe way of activating this timer while the queue is
1619 * being run.
1620 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1621 * re-starting the timer in response to a initial_count write.) */
1622 }
1623 RT_FALL_THRU();
1624 case TMTIMERSTATE_EXPIRED_DELIVER:
1625 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1626 {
1627 Assert(!pTimer->offPrev);
1628 Assert(!pTimer->offNext);
1629 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1630 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1631 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1632 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1633 tmSchedule(pTimer);
1634 rc = VINF_SUCCESS;
1635 break;
1636 }
1637 rc = VERR_TRY_AGAIN;
1638 break;
1639
1640 case TMTIMERSTATE_PENDING_SCHEDULE:
1641 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1642 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1643 {
1644 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1645 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1646 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1647 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1648 tmSchedule(pTimer);
1649 rc = VINF_SUCCESS;
1650 break;
1651 }
1652 rc = VERR_TRY_AGAIN;
1653 break;
1654
1655
1656 case TMTIMERSTATE_ACTIVE:
1657 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1658 {
1659 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1660 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1661 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1662 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1663 tmSchedule(pTimer);
1664 rc = VINF_SUCCESS;
1665 break;
1666 }
1667 rc = VERR_TRY_AGAIN;
1668 break;
1669
1670 case TMTIMERSTATE_PENDING_RESCHEDULE:
1671 case TMTIMERSTATE_PENDING_STOP:
1672 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1673 {
1674 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1675 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1676 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1677 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1678 tmSchedule(pTimer);
1679 rc = VINF_SUCCESS;
1680 break;
1681 }
1682 rc = VERR_TRY_AGAIN;
1683 break;
1684
1685
1686 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1687 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1688 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1689#ifdef IN_RING3
1690 if (!RTThreadYield())
1691 RTThreadSleep(1);
1692#else
1693/** @todo call host context and yield after a couple of iterations */
1694#endif
1695 rc = VERR_TRY_AGAIN;
1696 break;
1697
1698 /*
1699 * Invalid states.
1700 */
1701 case TMTIMERSTATE_DESTROY:
1702 case TMTIMERSTATE_FREE:
1703 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1704 rc = VERR_TM_INVALID_STATE;
1705 break;
1706
1707 default:
1708 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1709 rc = VERR_TM_UNKNOWN_STATE;
1710 break;
1711 }
1712
1713 /* switch + loop is tedious to break out of. */
1714 if (rc == VINF_SUCCESS)
1715 break;
1716
1717 if (rc != VERR_TRY_AGAIN)
1718 {
1719 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1720 break;
1721 }
1722 if (cRetries <= 0)
1723 {
1724 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1725 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1726 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1727 break;
1728 }
1729
1730 /*
1731 * Retry to gain locks.
1732 */
1733 if (!fOwnTMLock)
1734 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1735
1736 } /* for (;;) */
1737
1738 /*
1739 * Clean up and return.
1740 */
1741 if (fOwnTMLock)
1742 TM_UNLOCK_TIMERS(pVM);
1743
1744 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1745 return rc;
1746}
1747
1748
1749/**
1750 * Drops a hint about the frequency of the timer.
1751 *
1752 * This is used by TM and the VMM to calculate how often guest execution needs
1753 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1754 *
1755 * @returns VBox status code.
1756 * @param pTimer Timer handle as returned by one of the create
1757 * functions.
1758 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1759 *
1760 * @remarks We're using an integer hertz value here since anything above 1 HZ
1761 * is not going to be any trouble satisfying scheduling wise. The
1762 * range where it makes sense is >= 100 HZ.
1763 */
1764VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1765{
1766 TMTIMER_ASSERT_CRITSECT(pTimer);
1767
1768 uint32_t const uHzOldHint = pTimer->uHzHint;
1769 pTimer->uHzHint = uHzHint;
1770
1771 PVM pVM = pTimer->CTX_SUFF(pVM);
1772 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1773 if ( uHzHint > uMaxHzHint
1774 || uHzOldHint >= uMaxHzHint)
1775 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1776
1777 return VINF_SUCCESS;
1778}
1779
1780
1781/**
1782 * TMTimerStop for the virtual sync timer queue.
1783 *
1784 * This employs a greatly simplified state machine by always acquiring the
1785 * queue lock and bypassing the scheduling list.
1786 *
1787 * @returns VBox status code
1788 * @param pVM The cross context VM structure.
1789 * @param pTimer The timer handle.
1790 */
1791static int tmTimerVirtualSyncStop(PVM pVM, PTMTIMER pTimer)
1792{
1793 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1794 VM_ASSERT_EMT(pVM);
1795 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1796 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1797 AssertRCReturn(rc, rc);
1798
1799 /* Reset the HZ hint. */
1800 if (pTimer->uHzHint)
1801 {
1802 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1803 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1804 pTimer->uHzHint = 0;
1805 }
1806
1807 /* Update the timer state. */
1808 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1809 TMTIMERSTATE enmState = pTimer->enmState;
1810 switch (enmState)
1811 {
1812 case TMTIMERSTATE_ACTIVE:
1813 tmTimerQueueUnlinkActive(pQueue, pTimer);
1814 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1815 rc = VINF_SUCCESS;
1816 break;
1817
1818 case TMTIMERSTATE_EXPIRED_DELIVER:
1819 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1820 rc = VINF_SUCCESS;
1821 break;
1822
1823 case TMTIMERSTATE_STOPPED:
1824 rc = VINF_SUCCESS;
1825 break;
1826
1827 case TMTIMERSTATE_PENDING_RESCHEDULE:
1828 case TMTIMERSTATE_PENDING_STOP:
1829 case TMTIMERSTATE_PENDING_SCHEDULE:
1830 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1831 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1832 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1833 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1834 case TMTIMERSTATE_DESTROY:
1835 case TMTIMERSTATE_FREE:
1836 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1837 rc = VERR_TM_INVALID_STATE;
1838 break;
1839
1840 default:
1841 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1842 rc = VERR_TM_UNKNOWN_STATE;
1843 break;
1844 }
1845
1846 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1847 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1848 return rc;
1849}
1850
1851
1852/**
1853 * Stop the timer.
1854 * Use TMR3TimerArm() to "un-stop" the timer.
1855 *
1856 * @returns VBox status code.
1857 * @param pTimer Timer handle as returned by one of the create functions.
1858 */
1859VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1860{
1861 PVM pVM = pTimer->CTX_SUFF(pVM);
1862
1863 /* Treat virtual sync timers specially. */
1864 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1865 return tmTimerVirtualSyncStop(pVM, pTimer);
1866
1867 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1868 TMTIMER_ASSERT_CRITSECT(pTimer);
1869
1870 /*
1871 * Reset the HZ hint.
1872 */
1873 if (pTimer->uHzHint)
1874 {
1875 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1876 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1877 pTimer->uHzHint = 0;
1878 }
1879
1880 /** @todo see if this function needs optimizing. */
1881 int cRetries = 1000;
1882 do
1883 {
1884 /*
1885 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1886 */
1887 TMTIMERSTATE enmState = pTimer->enmState;
1888 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1889 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1890 switch (enmState)
1891 {
1892 case TMTIMERSTATE_EXPIRED_DELIVER:
1893 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1894 return VERR_INVALID_PARAMETER;
1895
1896 case TMTIMERSTATE_STOPPED:
1897 case TMTIMERSTATE_PENDING_STOP:
1898 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1899 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1900 return VINF_SUCCESS;
1901
1902 case TMTIMERSTATE_PENDING_SCHEDULE:
1903 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1904 {
1905 tmSchedule(pTimer);
1906 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1907 return VINF_SUCCESS;
1908 }
1909 break;
1910
1911 case TMTIMERSTATE_PENDING_RESCHEDULE:
1912 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1913 {
1914 tmSchedule(pTimer);
1915 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1916 return VINF_SUCCESS;
1917 }
1918 break;
1919
1920 case TMTIMERSTATE_ACTIVE:
1921 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1922 {
1923 tmSchedule(pTimer);
1924 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1925 return VINF_SUCCESS;
1926 }
1927 break;
1928
1929 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1930 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1931 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1932#ifdef IN_RING3
1933 if (!RTThreadYield())
1934 RTThreadSleep(1);
1935#else
1936/** @todo call host and yield cpu after a while. */
1937#endif
1938 break;
1939
1940 /*
1941 * Invalid states.
1942 */
1943 case TMTIMERSTATE_DESTROY:
1944 case TMTIMERSTATE_FREE:
1945 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1946 return VERR_TM_INVALID_STATE;
1947 default:
1948 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1949 return VERR_TM_UNKNOWN_STATE;
1950 }
1951 } while (cRetries-- > 0);
1952
1953 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1954 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1955 return VERR_TM_TIMER_UNSTABLE_STATE;
1956}
1957
1958
1959/**
1960 * Get the current clock time.
1961 * Handy for calculating the new expire time.
1962 *
1963 * @returns Current clock time.
1964 * @param pTimer Timer handle as returned by one of the create functions.
1965 */
1966VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1967{
1968 PVM pVM = pTimer->CTX_SUFF(pVM);
1969
1970 uint64_t u64;
1971 switch (pTimer->enmClock)
1972 {
1973 case TMCLOCK_VIRTUAL:
1974 u64 = TMVirtualGet(pVM);
1975 break;
1976 case TMCLOCK_VIRTUAL_SYNC:
1977 u64 = TMVirtualSyncGet(pVM);
1978 break;
1979 case TMCLOCK_REAL:
1980 u64 = TMRealGet(pVM);
1981 break;
1982 default:
1983 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1984 return UINT64_MAX;
1985 }
1986 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1987 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1988 return u64;
1989}
1990
1991
1992/**
1993 * Get the frequency of the timer clock.
1994 *
1995 * @returns Clock frequency (as Hz of course).
1996 * @param pTimer Timer handle as returned by one of the create functions.
1997 */
1998VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1999{
2000 switch (pTimer->enmClock)
2001 {
2002 case TMCLOCK_VIRTUAL:
2003 case TMCLOCK_VIRTUAL_SYNC:
2004 return TMCLOCK_FREQ_VIRTUAL;
2005
2006 case TMCLOCK_REAL:
2007 return TMCLOCK_FREQ_REAL;
2008
2009 default:
2010 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2011 return 0;
2012 }
2013}
2014
2015
2016/**
2017 * Get the expire time of the timer.
2018 * Only valid for active timers.
2019 *
2020 * @returns Expire time of the timer.
2021 * @param pTimer Timer handle as returned by one of the create functions.
2022 */
2023VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2024{
2025 TMTIMER_ASSERT_CRITSECT(pTimer);
2026 int cRetries = 1000;
2027 do
2028 {
2029 TMTIMERSTATE enmState = pTimer->enmState;
2030 switch (enmState)
2031 {
2032 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2033 case TMTIMERSTATE_EXPIRED_DELIVER:
2034 case TMTIMERSTATE_STOPPED:
2035 case TMTIMERSTATE_PENDING_STOP:
2036 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2037 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2038 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2039 return ~(uint64_t)0;
2040
2041 case TMTIMERSTATE_ACTIVE:
2042 case TMTIMERSTATE_PENDING_RESCHEDULE:
2043 case TMTIMERSTATE_PENDING_SCHEDULE:
2044 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2045 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2046 return pTimer->u64Expire;
2047
2048 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2049 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2050#ifdef IN_RING3
2051 if (!RTThreadYield())
2052 RTThreadSleep(1);
2053#endif
2054 break;
2055
2056 /*
2057 * Invalid states.
2058 */
2059 case TMTIMERSTATE_DESTROY:
2060 case TMTIMERSTATE_FREE:
2061 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2062 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2063 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2064 return ~(uint64_t)0;
2065 default:
2066 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2067 return ~(uint64_t)0;
2068 }
2069 } while (cRetries-- > 0);
2070
2071 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2072 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2073 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2074 return ~(uint64_t)0;
2075}
2076
2077
2078/**
2079 * Checks if a timer is active or not.
2080 *
2081 * @returns True if active.
2082 * @returns False if not active.
2083 * @param pTimer Timer handle as returned by one of the create functions.
2084 */
2085VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2086{
2087 TMTIMERSTATE enmState = pTimer->enmState;
2088 switch (enmState)
2089 {
2090 case TMTIMERSTATE_STOPPED:
2091 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2092 case TMTIMERSTATE_EXPIRED_DELIVER:
2093 case TMTIMERSTATE_PENDING_STOP:
2094 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2095 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2096 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2097 return false;
2098
2099 case TMTIMERSTATE_ACTIVE:
2100 case TMTIMERSTATE_PENDING_RESCHEDULE:
2101 case TMTIMERSTATE_PENDING_SCHEDULE:
2102 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2103 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2104 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2105 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2106 return true;
2107
2108 /*
2109 * Invalid states.
2110 */
2111 case TMTIMERSTATE_DESTROY:
2112 case TMTIMERSTATE_FREE:
2113 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2114 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2115 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2116 return false;
2117 default:
2118 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2119 return false;
2120 }
2121}
2122
2123
2124/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2125
2126
2127/**
2128 * Arm a timer with a (new) expire time relative to current time.
2129 *
2130 * @returns VBox status code.
2131 * @param pTimer Timer handle as returned by one of the create functions.
2132 * @param cMilliesToNext Number of milliseconds to the next tick.
2133 */
2134VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2135{
2136 switch (pTimer->enmClock)
2137 {
2138 case TMCLOCK_VIRTUAL:
2139 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2140 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2141
2142 case TMCLOCK_VIRTUAL_SYNC:
2143 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2144 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2145
2146 case TMCLOCK_REAL:
2147 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2148 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2149
2150 default:
2151 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2152 return VERR_TM_TIMER_BAD_CLOCK;
2153 }
2154}
2155
2156
2157/**
2158 * Arm a timer with a (new) expire time relative to current time.
2159 *
2160 * @returns VBox status code.
2161 * @param pTimer Timer handle as returned by one of the create functions.
2162 * @param cMicrosToNext Number of microseconds to the next tick.
2163 */
2164VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2165{
2166 switch (pTimer->enmClock)
2167 {
2168 case TMCLOCK_VIRTUAL:
2169 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2170 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2171
2172 case TMCLOCK_VIRTUAL_SYNC:
2173 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2174 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2175
2176 case TMCLOCK_REAL:
2177 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2178 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2179
2180 default:
2181 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2182 return VERR_TM_TIMER_BAD_CLOCK;
2183 }
2184}
2185
2186
2187/**
2188 * Arm a timer with a (new) expire time relative to current time.
2189 *
2190 * @returns VBox status code.
2191 * @param pTimer Timer handle as returned by one of the create functions.
2192 * @param cNanosToNext Number of nanoseconds to the next tick.
2193 */
2194VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2195{
2196 switch (pTimer->enmClock)
2197 {
2198 case TMCLOCK_VIRTUAL:
2199 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2200 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2201
2202 case TMCLOCK_VIRTUAL_SYNC:
2203 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2204 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2205
2206 case TMCLOCK_REAL:
2207 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2208 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2209
2210 default:
2211 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2212 return VERR_TM_TIMER_BAD_CLOCK;
2213 }
2214}
2215
2216
2217/**
2218 * Get the current clock time as nanoseconds.
2219 *
2220 * @returns The timer clock as nanoseconds.
2221 * @param pTimer Timer handle as returned by one of the create functions.
2222 */
2223VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2224{
2225 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2226}
2227
2228
2229/**
2230 * Get the current clock time as microseconds.
2231 *
2232 * @returns The timer clock as microseconds.
2233 * @param pTimer Timer handle as returned by one of the create functions.
2234 */
2235VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2236{
2237 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2238}
2239
2240
2241/**
2242 * Get the current clock time as milliseconds.
2243 *
2244 * @returns The timer clock as milliseconds.
2245 * @param pTimer Timer handle as returned by one of the create functions.
2246 */
2247VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2248{
2249 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2250}
2251
2252
2253/**
2254 * Converts the specified timer clock time to nanoseconds.
2255 *
2256 * @returns nanoseconds.
2257 * @param pTimer Timer handle as returned by one of the create functions.
2258 * @param u64Ticks The clock ticks.
2259 * @remark There could be rounding errors here. We just do a simple integer divide
2260 * without any adjustments.
2261 */
2262VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2263{
2264 switch (pTimer->enmClock)
2265 {
2266 case TMCLOCK_VIRTUAL:
2267 case TMCLOCK_VIRTUAL_SYNC:
2268 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2269 return u64Ticks;
2270
2271 case TMCLOCK_REAL:
2272 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2273 return u64Ticks * 1000000;
2274
2275 default:
2276 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2277 return 0;
2278 }
2279}
2280
2281
2282/**
2283 * Converts the specified timer clock time to microseconds.
2284 *
2285 * @returns microseconds.
2286 * @param pTimer Timer handle as returned by one of the create functions.
2287 * @param u64Ticks The clock ticks.
2288 * @remark There could be rounding errors here. We just do a simple integer divide
2289 * without any adjustments.
2290 */
2291VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2292{
2293 switch (pTimer->enmClock)
2294 {
2295 case TMCLOCK_VIRTUAL:
2296 case TMCLOCK_VIRTUAL_SYNC:
2297 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2298 return u64Ticks / 1000;
2299
2300 case TMCLOCK_REAL:
2301 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2302 return u64Ticks * 1000;
2303
2304 default:
2305 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2306 return 0;
2307 }
2308}
2309
2310
2311/**
2312 * Converts the specified timer clock time to milliseconds.
2313 *
2314 * @returns milliseconds.
2315 * @param pTimer Timer handle as returned by one of the create functions.
2316 * @param u64Ticks The clock ticks.
2317 * @remark There could be rounding errors here. We just do a simple integer divide
2318 * without any adjustments.
2319 */
2320VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2321{
2322 switch (pTimer->enmClock)
2323 {
2324 case TMCLOCK_VIRTUAL:
2325 case TMCLOCK_VIRTUAL_SYNC:
2326 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2327 return u64Ticks / 1000000;
2328
2329 case TMCLOCK_REAL:
2330 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2331 return u64Ticks;
2332
2333 default:
2334 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2335 return 0;
2336 }
2337}
2338
2339
2340/**
2341 * Converts the specified nanosecond timestamp to timer clock ticks.
2342 *
2343 * @returns timer clock ticks.
2344 * @param pTimer Timer handle as returned by one of the create functions.
2345 * @param cNanoSecs The nanosecond value ticks to convert.
2346 * @remark There could be rounding and overflow errors here.
2347 */
2348VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2349{
2350 switch (pTimer->enmClock)
2351 {
2352 case TMCLOCK_VIRTUAL:
2353 case TMCLOCK_VIRTUAL_SYNC:
2354 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2355 return cNanoSecs;
2356
2357 case TMCLOCK_REAL:
2358 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2359 return cNanoSecs / 1000000;
2360
2361 default:
2362 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2363 return 0;
2364 }
2365}
2366
2367
2368/**
2369 * Converts the specified microsecond timestamp to timer clock ticks.
2370 *
2371 * @returns timer clock ticks.
2372 * @param pTimer Timer handle as returned by one of the create functions.
2373 * @param cMicroSecs The microsecond value ticks to convert.
2374 * @remark There could be rounding and overflow errors here.
2375 */
2376VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2377{
2378 switch (pTimer->enmClock)
2379 {
2380 case TMCLOCK_VIRTUAL:
2381 case TMCLOCK_VIRTUAL_SYNC:
2382 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2383 return cMicroSecs * 1000;
2384
2385 case TMCLOCK_REAL:
2386 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2387 return cMicroSecs / 1000;
2388
2389 default:
2390 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2391 return 0;
2392 }
2393}
2394
2395
2396/**
2397 * Converts the specified millisecond timestamp to timer clock ticks.
2398 *
2399 * @returns timer clock ticks.
2400 * @param pTimer Timer handle as returned by one of the create functions.
2401 * @param cMilliSecs The millisecond value ticks to convert.
2402 * @remark There could be rounding and overflow errors here.
2403 */
2404VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2405{
2406 switch (pTimer->enmClock)
2407 {
2408 case TMCLOCK_VIRTUAL:
2409 case TMCLOCK_VIRTUAL_SYNC:
2410 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2411 return cMilliSecs * 1000000;
2412
2413 case TMCLOCK_REAL:
2414 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2415 return cMilliSecs;
2416
2417 default:
2418 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2419 return 0;
2420 }
2421}
2422
2423
2424/**
2425 * Convert state to string.
2426 *
2427 * @returns Readonly status name.
2428 * @param enmState State.
2429 */
2430const char *tmTimerState(TMTIMERSTATE enmState)
2431{
2432 switch (enmState)
2433 {
2434#define CASE(num, state) \
2435 case TMTIMERSTATE_##state: \
2436 AssertCompile(TMTIMERSTATE_##state == (num)); \
2437 return #num "-" #state
2438 CASE( 1,STOPPED);
2439 CASE( 2,ACTIVE);
2440 CASE( 3,EXPIRED_GET_UNLINK);
2441 CASE( 4,EXPIRED_DELIVER);
2442 CASE( 5,PENDING_STOP);
2443 CASE( 6,PENDING_STOP_SCHEDULE);
2444 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2445 CASE( 8,PENDING_SCHEDULE);
2446 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2447 CASE(10,PENDING_RESCHEDULE);
2448 CASE(11,DESTROY);
2449 CASE(12,FREE);
2450 default:
2451 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2452 return "Invalid state!";
2453#undef CASE
2454 }
2455}
2456
2457
2458/**
2459 * Gets the highest frequency hint for all the important timers.
2460 *
2461 * @returns The highest frequency. 0 if no timers care.
2462 * @param pVM The cross context VM structure.
2463 */
2464static uint32_t tmGetFrequencyHint(PVM pVM)
2465{
2466 /*
2467 * Query the value, recalculate it if necessary.
2468 *
2469 * The "right" highest frequency value isn't so important that we'll block
2470 * waiting on the timer semaphore.
2471 */
2472 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2473 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2474 {
2475 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2476 {
2477 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2478
2479 /*
2480 * Loop over the timers associated with each clock.
2481 */
2482 uMaxHzHint = 0;
2483 for (int i = 0; i < TMCLOCK_MAX; i++)
2484 {
2485 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2486 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2487 {
2488 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2489 if (uHzHint > uMaxHzHint)
2490 {
2491 switch (pCur->enmState)
2492 {
2493 case TMTIMERSTATE_ACTIVE:
2494 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2495 case TMTIMERSTATE_EXPIRED_DELIVER:
2496 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2497 case TMTIMERSTATE_PENDING_SCHEDULE:
2498 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2499 case TMTIMERSTATE_PENDING_RESCHEDULE:
2500 uMaxHzHint = uHzHint;
2501 break;
2502
2503 case TMTIMERSTATE_STOPPED:
2504 case TMTIMERSTATE_PENDING_STOP:
2505 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2506 case TMTIMERSTATE_DESTROY:
2507 case TMTIMERSTATE_FREE:
2508 break;
2509 /* no default, want gcc warnings when adding more states. */
2510 }
2511 }
2512 }
2513 }
2514 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2515 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2516 TM_UNLOCK_TIMERS(pVM);
2517 }
2518 }
2519 return uMaxHzHint;
2520}
2521
2522
2523/**
2524 * Calculates a host timer frequency that would be suitable for the current
2525 * timer load.
2526 *
2527 * This will take the highest timer frequency, adjust for catch-up and warp
2528 * driver, and finally add a little fudge factor. The caller (VMM) will use
2529 * the result to adjust the per-cpu preemption timer.
2530 *
2531 * @returns The highest frequency. 0 if no important timers around.
2532 * @param pVM The cross context VM structure.
2533 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2534 */
2535VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2536{
2537 uint32_t uHz = tmGetFrequencyHint(pVM);
2538
2539 /* Catch up, we have to be more aggressive than the % indicates at the
2540 beginning of the effort. */
2541 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2542 {
2543 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2544 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2545 {
2546 if (u32Pct <= 100)
2547 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2548 else if (u32Pct <= 200)
2549 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2550 else if (u32Pct <= 400)
2551 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2552 uHz *= u32Pct + 100;
2553 uHz /= 100;
2554 }
2555 }
2556
2557 /* Warp drive. */
2558 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2559 {
2560 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2561 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2562 {
2563 uHz *= u32Pct;
2564 uHz /= 100;
2565 }
2566 }
2567
2568 /* Fudge factor. */
2569 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2570 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2571 else
2572 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2573 uHz /= 100;
2574
2575 /* Make sure it isn't too high. */
2576 if (uHz > pVM->tm.s.cHostHzMax)
2577 uHz = pVM->tm.s.cHostHzMax;
2578
2579 return uHz;
2580}
2581
2582
2583/**
2584 * Whether the guest virtual clock is ticking.
2585 *
2586 * @returns true if ticking, false otherwise.
2587 * @param pVM The cross context VM structure.
2588 */
2589VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2590{
2591 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2592}
2593
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use