VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 81150

Last change on this file since 81150 was 81150, checked in by vboxsync, 6 years ago

VMM,/Makefile.kmk: Kicked out more recompiler related code. bugref:9576

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 93.3 KB
Line 
1/* $Id: TMAll.cpp 81150 2019-10-08 12:53:47Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(pTimer) ((pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(pTimer) tmRZTimerGetCritSect(pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(pTimer) \
70 do { \
71 if ((pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
81 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
133 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
134 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
135 return pDevInsR0->pCritSectRoR0;
136 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
137 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
138 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
139 }
140 return (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), pTimer->pCritSect);
141}
142#endif /* VBOX_STRICT && IN_RING0 */
143
144
145/**
146 * Notification that execution is about to start.
147 *
148 * This call must always be paired with a TMNotifyEndOfExecution call.
149 *
150 * The function may, depending on the configuration, resume the TSC and future
151 * clocks that only ticks when we're executing guest code.
152 *
153 * @param pVM The cross context VM structure.
154 * @param pVCpu The cross context virtual CPU structure.
155 */
156VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
157{
158#ifndef VBOX_WITHOUT_NS_ACCOUNTING
159 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
160#endif
161 if (pVM->tm.s.fTSCTiedToExecution)
162 tmCpuTickResume(pVM, pVCpu);
163}
164
165
166/**
167 * Notification that execution has ended.
168 *
169 * This call must always be paired with a TMNotifyStartOfExecution call.
170 *
171 * The function may, depending on the configuration, suspend the TSC and future
172 * clocks that only ticks when we're executing guest code.
173 *
174 * @param pVM The cross context VM structure.
175 * @param pVCpu The cross context virtual CPU structure.
176 */
177VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
178{
179 if (pVM->tm.s.fTSCTiedToExecution)
180 tmCpuTickPause(pVCpu);
181
182#ifndef VBOX_WITHOUT_NS_ACCOUNTING
183 uint64_t const u64NsTs = RTTimeNanoTS();
184 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
185 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
186 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
187 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
188
189# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
190 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
191 if (cNsExecutingDelta < 5000)
192 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
193 else if (cNsExecutingDelta < 50000)
194 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
195 else
196 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
197 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
198 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
199 if (cNsOtherNewDelta > 0)
200 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
201# endif
202
203 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
204 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
205 pVCpu->tm.s.cNsTotal = cNsTotalNew;
206 pVCpu->tm.s.cNsOther = cNsOtherNew;
207 pVCpu->tm.s.cPeriodsExecuting++;
208 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
209#endif
210}
211
212
213/**
214 * Notification that the cpu is entering the halt state
215 *
216 * This call must always be paired with a TMNotifyEndOfExecution call.
217 *
218 * The function may, depending on the configuration, resume the TSC and future
219 * clocks that only ticks when we're halted.
220 *
221 * @param pVCpu The cross context virtual CPU structure.
222 */
223VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
224{
225 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
226
227#ifndef VBOX_WITHOUT_NS_ACCOUNTING
228 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
229#endif
230
231 if ( pVM->tm.s.fTSCTiedToExecution
232 && !pVM->tm.s.fTSCNotTiedToHalt)
233 tmCpuTickResume(pVM, pVCpu);
234}
235
236
237/**
238 * Notification that the cpu is leaving the halt state
239 *
240 * This call must always be paired with a TMNotifyStartOfHalt call.
241 *
242 * The function may, depending on the configuration, suspend the TSC and future
243 * clocks that only ticks when we're halted.
244 *
245 * @param pVCpu The cross context virtual CPU structure.
246 */
247VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
248{
249 PVM pVM = pVCpu->CTX_SUFF(pVM);
250
251 if ( pVM->tm.s.fTSCTiedToExecution
252 && !pVM->tm.s.fTSCNotTiedToHalt)
253 tmCpuTickPause(pVCpu);
254
255#ifndef VBOX_WITHOUT_NS_ACCOUNTING
256 uint64_t const u64NsTs = RTTimeNanoTS();
257 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
258 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
259 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
260 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
261
262# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
263 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
264 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
265 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
266 if (cNsOtherNewDelta > 0)
267 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
268# endif
269
270 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
271 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
272 pVCpu->tm.s.cNsTotal = cNsTotalNew;
273 pVCpu->tm.s.cNsOther = cNsOtherNew;
274 pVCpu->tm.s.cPeriodsHalted++;
275 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
276#endif
277}
278
279
280/**
281 * Raise the timer force action flag and notify the dedicated timer EMT.
282 *
283 * @param pVM The cross context VM structure.
284 */
285DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
286{
287 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
288 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
289 {
290 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
291 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
292#ifdef IN_RING3
293# ifdef VBOX_WITH_REM
294 REMR3NotifyTimerPending(pVM, pVCpuDst);
295# endif
296 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
297#endif
298 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
299 }
300}
301
302
303/**
304 * Schedule the queue which was changed.
305 */
306DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
307{
308 PVMCC pVM = pTimer->CTX_SUFF(pVM);
309 if ( VM_IS_EMT(pVM)
310 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
311 {
312 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
313 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
314 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
315#ifdef VBOX_STRICT
316 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
317#endif
318 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
319 TM_UNLOCK_TIMERS(pVM);
320 }
321 else
322 {
323 TMTIMERSTATE enmState = pTimer->enmState;
324 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
325 tmScheduleNotify(pVM);
326 }
327}
328
329
330/**
331 * Try change the state to enmStateNew from enmStateOld
332 * and link the timer into the scheduling queue.
333 *
334 * @returns Success indicator.
335 * @param pTimer Timer in question.
336 * @param enmStateNew The new timer state.
337 * @param enmStateOld The old timer state.
338 */
339DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
340{
341 /*
342 * Attempt state change.
343 */
344 bool fRc;
345 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
346 return fRc;
347}
348
349
350/**
351 * Links the timer onto the scheduling queue.
352 *
353 * @param pQueue The timer queue the timer belongs to.
354 * @param pTimer The timer.
355 *
356 * @todo FIXME: Look into potential race with the thread running the queues
357 * and stuff.
358 */
359DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
360{
361 Assert(!pTimer->offScheduleNext);
362 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
363 int32_t offHead;
364 do
365 {
366 offHead = pQueue->offSchedule;
367 if (offHead)
368 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
369 else
370 pTimer->offScheduleNext = 0;
371 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
372}
373
374
375/**
376 * Try change the state to enmStateNew from enmStateOld
377 * and link the timer into the scheduling queue.
378 *
379 * @returns Success indicator.
380 * @param pTimer Timer in question.
381 * @param enmStateNew The new timer state.
382 * @param enmStateOld The old timer state.
383 */
384DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
385{
386 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
387 {
388 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
389 return true;
390 }
391 return false;
392}
393
394
395/**
396 * Links a timer into the active list of a timer queue.
397 *
398 * @param pQueue The queue.
399 * @param pTimer The timer.
400 * @param u64Expire The timer expiration time.
401 *
402 * @remarks Called while owning the relevant queue lock.
403 */
404DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
405{
406 Assert(!pTimer->offNext);
407 Assert(!pTimer->offPrev);
408 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
409
410 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
411 if (pCur)
412 {
413 for (;; pCur = TMTIMER_GET_NEXT(pCur))
414 {
415 if (pCur->u64Expire > u64Expire)
416 {
417 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
418 TMTIMER_SET_NEXT(pTimer, pCur);
419 TMTIMER_SET_PREV(pTimer, pPrev);
420 if (pPrev)
421 TMTIMER_SET_NEXT(pPrev, pTimer);
422 else
423 {
424 TMTIMER_SET_HEAD(pQueue, pTimer);
425 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
426 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
427 }
428 TMTIMER_SET_PREV(pCur, pTimer);
429 return;
430 }
431 if (!pCur->offNext)
432 {
433 TMTIMER_SET_NEXT(pCur, pTimer);
434 TMTIMER_SET_PREV(pTimer, pCur);
435 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
436 return;
437 }
438 }
439 }
440 else
441 {
442 TMTIMER_SET_HEAD(pQueue, pTimer);
443 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
444 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
445 }
446}
447
448
449
450/**
451 * Schedules the given timer on the given queue.
452 *
453 * @param pQueue The timer queue.
454 * @param pTimer The timer that needs scheduling.
455 *
456 * @remarks Called while owning the lock.
457 */
458DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
459{
460 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
461
462 /*
463 * Processing.
464 */
465 unsigned cRetries = 2;
466 do
467 {
468 TMTIMERSTATE enmState = pTimer->enmState;
469 switch (enmState)
470 {
471 /*
472 * Reschedule timer (in the active list).
473 */
474 case TMTIMERSTATE_PENDING_RESCHEDULE:
475 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
476 break; /* retry */
477 tmTimerQueueUnlinkActive(pQueue, pTimer);
478 RT_FALL_THRU();
479
480 /*
481 * Schedule timer (insert into the active list).
482 */
483 case TMTIMERSTATE_PENDING_SCHEDULE:
484 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
485 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
486 break; /* retry */
487 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
488 return;
489
490 /*
491 * Stop the timer in active list.
492 */
493 case TMTIMERSTATE_PENDING_STOP:
494 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
495 break; /* retry */
496 tmTimerQueueUnlinkActive(pQueue, pTimer);
497 RT_FALL_THRU();
498
499 /*
500 * Stop the timer (not on the active list).
501 */
502 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
503 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
504 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
505 break;
506 return;
507
508 /*
509 * The timer is pending destruction by TMR3TimerDestroy, our caller.
510 * Nothing to do here.
511 */
512 case TMTIMERSTATE_DESTROY:
513 break;
514
515 /*
516 * Postpone these until they get into the right state.
517 */
518 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
519 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
520 tmTimerLinkSchedule(pQueue, pTimer);
521 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
522 return;
523
524 /*
525 * None of these can be in the schedule.
526 */
527 case TMTIMERSTATE_FREE:
528 case TMTIMERSTATE_STOPPED:
529 case TMTIMERSTATE_ACTIVE:
530 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
531 case TMTIMERSTATE_EXPIRED_DELIVER:
532 default:
533 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
534 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
535 return;
536 }
537 } while (cRetries-- > 0);
538}
539
540
541/**
542 * Schedules the specified timer queue.
543 *
544 * @param pVM The cross context VM structure.
545 * @param pQueue The queue to schedule.
546 *
547 * @remarks Called while owning the lock.
548 */
549void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
550{
551 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
552 NOREF(pVM);
553
554 /*
555 * Dequeue the scheduling list and iterate it.
556 */
557 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
558 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
559 if (!offNext)
560 return;
561 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
562 while (pNext)
563 {
564 /*
565 * Unlink the head timer and find the next one.
566 */
567 PTMTIMER pTimer = pNext;
568 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
569 pTimer->offScheduleNext = 0;
570
571 /*
572 * Do the scheduling.
573 */
574 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
575 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
576 tmTimerQueueScheduleOne(pQueue, pTimer);
577 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
578 } /* foreach timer in current schedule batch. */
579 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
580}
581
582
583#ifdef VBOX_STRICT
584/**
585 * Checks that the timer queues are sane.
586 *
587 * @param pVM The cross context VM structure.
588 * @param pszWhere Caller location clue.
589 *
590 * @remarks Called while owning the lock.
591 */
592void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
593{
594 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
595
596 /*
597 * Check the linking of the active lists.
598 */
599 bool fHaveVirtualSyncLock = false;
600 for (int i = 0; i < TMCLOCK_MAX; i++)
601 {
602 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
603 Assert((int)pQueue->enmClock == i);
604 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
605 {
606 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
607 continue;
608 fHaveVirtualSyncLock = true;
609 }
610 PTMTIMER pPrev = NULL;
611 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
612 {
613 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
614 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
615 TMTIMERSTATE enmState = pCur->enmState;
616 switch (enmState)
617 {
618 case TMTIMERSTATE_ACTIVE:
619 AssertMsg( !pCur->offScheduleNext
620 || pCur->enmState != TMTIMERSTATE_ACTIVE,
621 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
622 break;
623 case TMTIMERSTATE_PENDING_STOP:
624 case TMTIMERSTATE_PENDING_RESCHEDULE:
625 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
626 break;
627 default:
628 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
629 break;
630 }
631 }
632 }
633
634
635# ifdef IN_RING3
636 /*
637 * Do the big list and check that active timers all are in the active lists.
638 */
639 PTMTIMERR3 pPrev = NULL;
640 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
641 {
642 Assert(pCur->pBigPrev == pPrev);
643 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
644
645 TMTIMERSTATE enmState = pCur->enmState;
646 switch (enmState)
647 {
648 case TMTIMERSTATE_ACTIVE:
649 case TMTIMERSTATE_PENDING_STOP:
650 case TMTIMERSTATE_PENDING_RESCHEDULE:
651 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
652 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
653 {
654 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
655 Assert(pCur->offPrev || pCur == pCurAct);
656 while (pCurAct && pCurAct != pCur)
657 pCurAct = TMTIMER_GET_NEXT(pCurAct);
658 Assert(pCurAct == pCur);
659 }
660 break;
661
662 case TMTIMERSTATE_PENDING_SCHEDULE:
663 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
664 case TMTIMERSTATE_STOPPED:
665 case TMTIMERSTATE_EXPIRED_DELIVER:
666 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
667 {
668 Assert(!pCur->offNext);
669 Assert(!pCur->offPrev);
670 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
671 pCurAct;
672 pCurAct = TMTIMER_GET_NEXT(pCurAct))
673 {
674 Assert(pCurAct != pCur);
675 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
676 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
677 }
678 }
679 break;
680
681 /* ignore */
682 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
683 break;
684
685 /* shouldn't get here! */
686 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
687 case TMTIMERSTATE_DESTROY:
688 default:
689 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
690 break;
691 }
692 }
693# endif /* IN_RING3 */
694
695 if (fHaveVirtualSyncLock)
696 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
697}
698#endif /* !VBOX_STRICT */
699
700#ifdef VBOX_HIGH_RES_TIMERS_HACK
701
702/**
703 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
704 * EMT is polling.
705 *
706 * @returns See tmTimerPollInternal.
707 * @param pVM The cross context VM structure.
708 * @param u64Now Current virtual clock timestamp.
709 * @param u64Delta The delta to the next even in ticks of the
710 * virtual clock.
711 * @param pu64Delta Where to return the delta.
712 */
713DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
714{
715 Assert(!(u64Delta & RT_BIT_64(63)));
716
717 if (!pVM->tm.s.fVirtualWarpDrive)
718 {
719 *pu64Delta = u64Delta;
720 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
721 }
722
723 /*
724 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
725 */
726 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
727 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
728
729 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
730 u64GipTime -= u64Start; /* the start is GIP time. */
731 if (u64GipTime >= u64Delta)
732 {
733 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
734 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
735 }
736 else
737 {
738 u64Delta -= u64GipTime;
739 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
740 u64Delta += u64GipTime;
741 }
742 *pu64Delta = u64Delta;
743 u64GipTime += u64Start;
744 return u64GipTime;
745}
746
747
748/**
749 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
750 * than the one dedicated to timer work.
751 *
752 * @returns See tmTimerPollInternal.
753 * @param pVM The cross context VM structure.
754 * @param u64Now Current virtual clock timestamp.
755 * @param pu64Delta Where to return the delta.
756 */
757DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
758{
759 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
760 *pu64Delta = s_u64OtherRet;
761 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
762}
763
764
765/**
766 * Worker for tmTimerPollInternal.
767 *
768 * @returns See tmTimerPollInternal.
769 * @param pVM The cross context VM structure.
770 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
771 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
772 * timer EMT.
773 * @param u64Now Current virtual clock timestamp.
774 * @param pu64Delta Where to return the delta.
775 * @param pCounter The statistics counter to update.
776 */
777DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
778 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
779{
780 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
781 if (pVCpuDst != pVCpu)
782 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
783 *pu64Delta = 0;
784 return 0;
785}
786
787/**
788 * Common worker for TMTimerPollGIP and TMTimerPoll.
789 *
790 * This function is called before FFs are checked in the inner execution EM loops.
791 *
792 * @returns The GIP timestamp of the next event.
793 * 0 if the next event has already expired.
794 *
795 * @param pVM The cross context VM structure.
796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
797 * @param pu64Delta Where to store the delta.
798 *
799 * @thread The emulation thread.
800 *
801 * @remarks GIP uses ns ticks.
802 */
803DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
804{
805 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
806 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
807 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
808
809 /*
810 * Return straight away if the timer FF is already set ...
811 */
812 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
813 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
814
815 /*
816 * ... or if timers are being run.
817 */
818 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
819 {
820 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
821 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
822 }
823
824 /*
825 * Check for TMCLOCK_VIRTUAL expiration.
826 */
827 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
828 const int64_t i64Delta1 = u64Expire1 - u64Now;
829 if (i64Delta1 <= 0)
830 {
831 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
832 {
833 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
834 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
835#if defined(IN_RING3) && defined(VBOX_WITH_REM)
836 REMR3NotifyTimerPending(pVM, pVCpuDst);
837#endif
838 }
839 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
840 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
841 }
842
843 /*
844 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
845 * This isn't quite as straight forward if in a catch-up, not only do
846 * we have to adjust the 'now' but when have to adjust the delta as well.
847 */
848
849 /*
850 * Optimistic lockless approach.
851 */
852 uint64_t u64VirtualSyncNow;
853 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
854 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
855 {
856 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
857 {
858 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
859 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
860 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
861 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
862 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
863 {
864 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
865 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
866 if (i64Delta2 > 0)
867 {
868 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
869 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
870
871 if (pVCpu == pVCpuDst)
872 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
873 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
874 }
875
876 if ( !pVM->tm.s.fRunningQueues
877 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
878 {
879 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
880 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
881#if defined(IN_RING3) && defined(VBOX_WITH_REM)
882 REMR3NotifyTimerPending(pVM, pVCpuDst);
883#endif
884 }
885
886 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
887 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
888 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
889 }
890 }
891 }
892 else
893 {
894 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
895 LogFlow(("TMTimerPoll: stopped\n"));
896 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
897 }
898
899 /*
900 * Complicated lockless approach.
901 */
902 uint64_t off;
903 uint32_t u32Pct = 0;
904 bool fCatchUp;
905 int cOuterTries = 42;
906 for (;; cOuterTries--)
907 {
908 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
909 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
910 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
911 if (fCatchUp)
912 {
913 /* No changes allowed, try get a consistent set of parameters. */
914 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
915 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
916 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
917 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
918 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
919 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
920 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
921 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
922 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
923 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
924 || cOuterTries <= 0)
925 {
926 uint64_t u64Delta = u64Now - u64Prev;
927 if (RT_LIKELY(!(u64Delta >> 32)))
928 {
929 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
930 if (off > u64Sub + offGivenUp)
931 off -= u64Sub;
932 else /* we've completely caught up. */
933 off = offGivenUp;
934 }
935 else
936 /* More than 4 seconds since last time (or negative), ignore it. */
937 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
938
939 /* Check that we're still running and in catch up. */
940 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
941 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
942 break;
943 }
944 }
945 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
946 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
947 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
948 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
949 break; /* Got an consistent offset */
950
951 /* Repeat the initial checks before iterating. */
952 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
953 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
954 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
955 {
956 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
957 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
958 }
959 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
960 {
961 LogFlow(("TMTimerPoll: stopped\n"));
962 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
963 }
964 if (cOuterTries <= 0)
965 break; /* that's enough */
966 }
967 if (cOuterTries <= 0)
968 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
969 u64VirtualSyncNow = u64Now - off;
970
971 /* Calc delta and see if we've got a virtual sync hit. */
972 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
973 if (i64Delta2 <= 0)
974 {
975 if ( !pVM->tm.s.fRunningQueues
976 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
977 {
978 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
979 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
980#if defined(IN_RING3) && defined(VBOX_WITH_REM)
981 REMR3NotifyTimerPending(pVM, pVCpuDst);
982#endif
983 }
984 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
985 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
986 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
987 }
988
989 /*
990 * Return the time left to the next event.
991 */
992 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
993 if (pVCpu == pVCpuDst)
994 {
995 if (fCatchUp)
996 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
997 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
998 }
999 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1000}
1001
1002
1003/**
1004 * Set FF if we've passed the next virtual event.
1005 *
1006 * This function is called before FFs are checked in the inner execution EM loops.
1007 *
1008 * @returns true if timers are pending, false if not.
1009 *
1010 * @param pVM The cross context VM structure.
1011 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1012 * @thread The emulation thread.
1013 */
1014VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1015{
1016 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1017 uint64_t off = 0;
1018 tmTimerPollInternal(pVM, pVCpu, &off);
1019 return off == 0;
1020}
1021
1022
1023/**
1024 * Set FF if we've passed the next virtual event.
1025 *
1026 * This function is called before FFs are checked in the inner execution EM loops.
1027 *
1028 * @param pVM The cross context VM structure.
1029 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1030 * @thread The emulation thread.
1031 */
1032VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1033{
1034 uint64_t off;
1035 tmTimerPollInternal(pVM, pVCpu, &off);
1036}
1037
1038
1039/**
1040 * Set FF if we've passed the next virtual event.
1041 *
1042 * This function is called before FFs are checked in the inner execution EM loops.
1043 *
1044 * @returns The GIP timestamp of the next event.
1045 * 0 if the next event has already expired.
1046 * @param pVM The cross context VM structure.
1047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1048 * @param pu64Delta Where to store the delta.
1049 * @thread The emulation thread.
1050 */
1051VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1052{
1053 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1054}
1055
1056#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1057
1058/**
1059 * Gets the host context ring-3 pointer of the timer.
1060 *
1061 * @returns HC R3 pointer.
1062 * @param pTimer Timer handle as returned by one of the create functions.
1063 */
1064VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1065{
1066 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1067}
1068
1069
1070/**
1071 * Gets the host context ring-0 pointer of the timer.
1072 *
1073 * @returns HC R0 pointer.
1074 * @param pTimer Timer handle as returned by one of the create functions.
1075 */
1076VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1077{
1078 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1079}
1080
1081
1082/**
1083 * Gets the RC pointer of the timer.
1084 *
1085 * @returns RC pointer.
1086 * @param pTimer Timer handle as returned by one of the create functions.
1087 */
1088VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1089{
1090 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1091}
1092
1093
1094/**
1095 * Locks the timer clock.
1096 *
1097 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1098 * if the clock does not have a lock.
1099 * @param pTimer The timer which clock lock we wish to take.
1100 * @param rcBusy What to return in ring-0 and raw-mode context
1101 * if the lock is busy. Pass VINF_SUCCESS to
1102 * acquired the critical section thru a ring-3
1103 call if necessary.
1104 *
1105 * @remarks Currently only supported on timers using the virtual sync clock.
1106 */
1107VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1108{
1109 AssertPtr(pTimer);
1110 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1111 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1112}
1113
1114
1115/**
1116 * Unlocks a timer clock locked by TMTimerLock.
1117 *
1118 * @param pTimer The timer which clock to unlock.
1119 */
1120VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1121{
1122 AssertPtr(pTimer);
1123 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1124 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1125}
1126
1127
1128/**
1129 * Checks if the current thread owns the timer clock lock.
1130 *
1131 * @returns @c true if its the owner, @c false if not.
1132 * @param pTimer The timer handle.
1133 */
1134VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1135{
1136 AssertPtr(pTimer);
1137 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1138 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1139}
1140
1141
1142/**
1143 * Optimized TMTimerSet code path for starting an inactive timer.
1144 *
1145 * @returns VBox status code.
1146 *
1147 * @param pVM The cross context VM structure.
1148 * @param pTimer The timer handle.
1149 * @param u64Expire The new expire time.
1150 */
1151static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1152{
1153 Assert(!pTimer->offPrev);
1154 Assert(!pTimer->offNext);
1155 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1156
1157 TMCLOCK const enmClock = pTimer->enmClock;
1158
1159 /*
1160 * Calculate and set the expiration time.
1161 */
1162 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1163 {
1164 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1165 AssertMsgStmt(u64Expire >= u64Last,
1166 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1167 u64Expire = u64Last);
1168 }
1169 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1170 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1171
1172 /*
1173 * Link the timer into the active list.
1174 */
1175 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1176
1177 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1178 TM_UNLOCK_TIMERS(pVM);
1179 return VINF_SUCCESS;
1180}
1181
1182
1183/**
1184 * TMTimerSet for the virtual sync timer queue.
1185 *
1186 * This employs a greatly simplified state machine by always acquiring the
1187 * queue lock and bypassing the scheduling list.
1188 *
1189 * @returns VBox status code
1190 * @param pVM The cross context VM structure.
1191 * @param pTimer The timer handle.
1192 * @param u64Expire The expiration time.
1193 */
1194static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1195{
1196 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1197 VM_ASSERT_EMT(pVM);
1198 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1199 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1200 AssertRCReturn(rc, rc);
1201
1202 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1203 TMTIMERSTATE enmState = pTimer->enmState;
1204 switch (enmState)
1205 {
1206 case TMTIMERSTATE_EXPIRED_DELIVER:
1207 case TMTIMERSTATE_STOPPED:
1208 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1209 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1210 else
1211 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1212
1213 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1214 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1215 pTimer->u64Expire = u64Expire;
1216 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1217 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1218 rc = VINF_SUCCESS;
1219 break;
1220
1221 case TMTIMERSTATE_ACTIVE:
1222 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1223 tmTimerQueueUnlinkActive(pQueue, pTimer);
1224 pTimer->u64Expire = u64Expire;
1225 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1226 rc = VINF_SUCCESS;
1227 break;
1228
1229 case TMTIMERSTATE_PENDING_RESCHEDULE:
1230 case TMTIMERSTATE_PENDING_STOP:
1231 case TMTIMERSTATE_PENDING_SCHEDULE:
1232 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1233 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1234 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1235 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1236 case TMTIMERSTATE_DESTROY:
1237 case TMTIMERSTATE_FREE:
1238 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1239 rc = VERR_TM_INVALID_STATE;
1240 break;
1241
1242 default:
1243 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1244 rc = VERR_TM_UNKNOWN_STATE;
1245 break;
1246 }
1247
1248 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1249 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1250 return rc;
1251}
1252
1253
1254/**
1255 * Arm a timer with a (new) expire time.
1256 *
1257 * @returns VBox status code.
1258 * @param pTimer Timer handle as returned by one of the create functions.
1259 * @param u64Expire New expire time.
1260 */
1261VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1262{
1263 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1264
1265 /* Treat virtual sync timers specially. */
1266 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1267 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1268
1269 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1270 TMTIMER_ASSERT_CRITSECT(pTimer);
1271
1272 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1273
1274#ifdef VBOX_WITH_STATISTICS
1275 /*
1276 * Gather optimization info.
1277 */
1278 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1279 TMTIMERSTATE enmOrgState = pTimer->enmState;
1280 switch (enmOrgState)
1281 {
1282 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1283 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1284 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1285 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1286 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1287 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1288 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1289 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1290 }
1291#endif
1292
1293 /*
1294 * The most common case is setting the timer again during the callback.
1295 * The second most common case is starting a timer at some other time.
1296 */
1297#if 1
1298 TMTIMERSTATE enmState1 = pTimer->enmState;
1299 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1300 || ( enmState1 == TMTIMERSTATE_STOPPED
1301 && pTimer->pCritSect))
1302 {
1303 /* Try take the TM lock and check the state again. */
1304 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1305 {
1306 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1307 {
1308 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1309 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1310 return VINF_SUCCESS;
1311 }
1312 TM_UNLOCK_TIMERS(pVM);
1313 }
1314 }
1315#endif
1316
1317 /*
1318 * Unoptimized code path.
1319 */
1320 int cRetries = 1000;
1321 do
1322 {
1323 /*
1324 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1325 */
1326 TMTIMERSTATE enmState = pTimer->enmState;
1327 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1328 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1329 switch (enmState)
1330 {
1331 case TMTIMERSTATE_EXPIRED_DELIVER:
1332 case TMTIMERSTATE_STOPPED:
1333 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1334 {
1335 Assert(!pTimer->offPrev);
1336 Assert(!pTimer->offNext);
1337 pTimer->u64Expire = u64Expire;
1338 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1339 tmSchedule(pTimer);
1340 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1341 return VINF_SUCCESS;
1342 }
1343 break;
1344
1345 case TMTIMERSTATE_PENDING_SCHEDULE:
1346 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1347 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1348 {
1349 pTimer->u64Expire = u64Expire;
1350 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1351 tmSchedule(pTimer);
1352 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1353 return VINF_SUCCESS;
1354 }
1355 break;
1356
1357
1358 case TMTIMERSTATE_ACTIVE:
1359 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1360 {
1361 pTimer->u64Expire = u64Expire;
1362 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1363 tmSchedule(pTimer);
1364 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1365 return VINF_SUCCESS;
1366 }
1367 break;
1368
1369 case TMTIMERSTATE_PENDING_RESCHEDULE:
1370 case TMTIMERSTATE_PENDING_STOP:
1371 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1372 {
1373 pTimer->u64Expire = u64Expire;
1374 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1375 tmSchedule(pTimer);
1376 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1377 return VINF_SUCCESS;
1378 }
1379 break;
1380
1381
1382 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1383 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1384 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1385#ifdef IN_RING3
1386 if (!RTThreadYield())
1387 RTThreadSleep(1);
1388#else
1389/** @todo call host context and yield after a couple of iterations */
1390#endif
1391 break;
1392
1393 /*
1394 * Invalid states.
1395 */
1396 case TMTIMERSTATE_DESTROY:
1397 case TMTIMERSTATE_FREE:
1398 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1399 return VERR_TM_INVALID_STATE;
1400 default:
1401 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1402 return VERR_TM_UNKNOWN_STATE;
1403 }
1404 } while (cRetries-- > 0);
1405
1406 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1407 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1408 return VERR_TM_TIMER_UNSTABLE_STATE;
1409}
1410
1411
1412/**
1413 * Return the current time for the specified clock, setting pu64Now if not NULL.
1414 *
1415 * @returns Current time.
1416 * @param pVM The cross context VM structure.
1417 * @param enmClock The clock to query.
1418 * @param pu64Now Optional pointer where to store the return time
1419 */
1420DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1421{
1422 uint64_t u64Now;
1423 switch (enmClock)
1424 {
1425 case TMCLOCK_VIRTUAL_SYNC:
1426 u64Now = TMVirtualSyncGet(pVM);
1427 break;
1428 case TMCLOCK_VIRTUAL:
1429 u64Now = TMVirtualGet(pVM);
1430 break;
1431 case TMCLOCK_REAL:
1432 u64Now = TMRealGet(pVM);
1433 break;
1434 default:
1435 AssertFatalMsgFailed(("%d\n", enmClock));
1436 }
1437
1438 if (pu64Now)
1439 *pu64Now = u64Now;
1440 return u64Now;
1441}
1442
1443
1444/**
1445 * Optimized TMTimerSetRelative code path.
1446 *
1447 * @returns VBox status code.
1448 *
1449 * @param pVM The cross context VM structure.
1450 * @param pTimer The timer handle.
1451 * @param cTicksToNext Clock ticks until the next time expiration.
1452 * @param pu64Now Where to return the current time stamp used.
1453 * Optional.
1454 */
1455static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1456{
1457 Assert(!pTimer->offPrev);
1458 Assert(!pTimer->offNext);
1459 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1460
1461 /*
1462 * Calculate and set the expiration time.
1463 */
1464 TMCLOCK const enmClock = pTimer->enmClock;
1465 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1466 pTimer->u64Expire = u64Expire;
1467 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1468
1469 /*
1470 * Link the timer into the active list.
1471 */
1472 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1473 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1474
1475 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1476 TM_UNLOCK_TIMERS(pVM);
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * TMTimerSetRelative for the virtual sync timer queue.
1483 *
1484 * This employs a greatly simplified state machine by always acquiring the
1485 * queue lock and bypassing the scheduling list.
1486 *
1487 * @returns VBox status code
1488 * @param pVM The cross context VM structure.
1489 * @param pTimer The timer to (re-)arm.
1490 * @param cTicksToNext Clock ticks until the next time expiration.
1491 * @param pu64Now Where to return the current time stamp used.
1492 * Optional.
1493 */
1494static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1495{
1496 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1497 VM_ASSERT_EMT(pVM);
1498 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1499 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1500 AssertRCReturn(rc, rc);
1501
1502 /* Calculate the expiration tick. */
1503 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1504 if (pu64Now)
1505 *pu64Now = u64Expire;
1506 u64Expire += cTicksToNext;
1507
1508 /* Update the timer. */
1509 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1510 TMTIMERSTATE enmState = pTimer->enmState;
1511 switch (enmState)
1512 {
1513 case TMTIMERSTATE_EXPIRED_DELIVER:
1514 case TMTIMERSTATE_STOPPED:
1515 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1516 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1517 else
1518 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1519 pTimer->u64Expire = u64Expire;
1520 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1521 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1522 rc = VINF_SUCCESS;
1523 break;
1524
1525 case TMTIMERSTATE_ACTIVE:
1526 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1527 tmTimerQueueUnlinkActive(pQueue, pTimer);
1528 pTimer->u64Expire = u64Expire;
1529 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1530 rc = VINF_SUCCESS;
1531 break;
1532
1533 case TMTIMERSTATE_PENDING_RESCHEDULE:
1534 case TMTIMERSTATE_PENDING_STOP:
1535 case TMTIMERSTATE_PENDING_SCHEDULE:
1536 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1537 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1538 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1539 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1540 case TMTIMERSTATE_DESTROY:
1541 case TMTIMERSTATE_FREE:
1542 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1543 rc = VERR_TM_INVALID_STATE;
1544 break;
1545
1546 default:
1547 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1548 rc = VERR_TM_UNKNOWN_STATE;
1549 break;
1550 }
1551
1552 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1553 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1554 return rc;
1555}
1556
1557
1558/**
1559 * Arm a timer with a expire time relative to the current time.
1560 *
1561 * @returns VBox status code.
1562 * @param pTimer Timer handle as returned by one of the create functions.
1563 * @param cTicksToNext Clock ticks until the next time expiration.
1564 * @param pu64Now Where to return the current time stamp used.
1565 * Optional.
1566 */
1567VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1568{
1569 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1570
1571 /* Treat virtual sync timers specially. */
1572 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1573 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1574
1575 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1576 TMTIMER_ASSERT_CRITSECT(pTimer);
1577
1578 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1579
1580#ifdef VBOX_WITH_STATISTICS
1581 /*
1582 * Gather optimization info.
1583 */
1584 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1585 TMTIMERSTATE enmOrgState = pTimer->enmState;
1586 switch (enmOrgState)
1587 {
1588 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1589 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1590 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1591 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1592 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1593 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1594 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1595 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1596 }
1597#endif
1598
1599 /*
1600 * Try to take the TM lock and optimize the common cases.
1601 *
1602 * With the TM lock we can safely make optimizations like immediate
1603 * scheduling and we can also be 100% sure that we're not racing the
1604 * running of the timer queues. As an additional restraint we require the
1605 * timer to have a critical section associated with to be 100% there aren't
1606 * concurrent operations on the timer. (This latter isn't necessary any
1607 * longer as this isn't supported for any timers, critsect or not.)
1608 *
1609 * Note! Lock ordering doesn't apply when we only tries to
1610 * get the innermost locks.
1611 */
1612 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1613#if 1
1614 if ( fOwnTMLock
1615 && pTimer->pCritSect)
1616 {
1617 TMTIMERSTATE enmState = pTimer->enmState;
1618 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1619 || enmState == TMTIMERSTATE_STOPPED)
1620 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1621 {
1622 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1623 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1624 return VINF_SUCCESS;
1625 }
1626
1627 /* Optimize other states when it becomes necessary. */
1628 }
1629#endif
1630
1631 /*
1632 * Unoptimized path.
1633 */
1634 int rc;
1635 TMCLOCK const enmClock = pTimer->enmClock;
1636 for (int cRetries = 1000; ; cRetries--)
1637 {
1638 /*
1639 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1640 */
1641 TMTIMERSTATE enmState = pTimer->enmState;
1642 switch (enmState)
1643 {
1644 case TMTIMERSTATE_STOPPED:
1645 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1646 {
1647 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1648 * Figure a safe way of activating this timer while the queue is
1649 * being run.
1650 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1651 * re-starting the timer in response to a initial_count write.) */
1652 }
1653 RT_FALL_THRU();
1654 case TMTIMERSTATE_EXPIRED_DELIVER:
1655 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1656 {
1657 Assert(!pTimer->offPrev);
1658 Assert(!pTimer->offNext);
1659 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1660 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1661 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1662 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1663 tmSchedule(pTimer);
1664 rc = VINF_SUCCESS;
1665 break;
1666 }
1667 rc = VERR_TRY_AGAIN;
1668 break;
1669
1670 case TMTIMERSTATE_PENDING_SCHEDULE:
1671 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1672 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1673 {
1674 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1675 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1676 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1677 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1678 tmSchedule(pTimer);
1679 rc = VINF_SUCCESS;
1680 break;
1681 }
1682 rc = VERR_TRY_AGAIN;
1683 break;
1684
1685
1686 case TMTIMERSTATE_ACTIVE:
1687 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1688 {
1689 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1690 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1691 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1692 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1693 tmSchedule(pTimer);
1694 rc = VINF_SUCCESS;
1695 break;
1696 }
1697 rc = VERR_TRY_AGAIN;
1698 break;
1699
1700 case TMTIMERSTATE_PENDING_RESCHEDULE:
1701 case TMTIMERSTATE_PENDING_STOP:
1702 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1703 {
1704 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1705 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1706 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1707 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1708 tmSchedule(pTimer);
1709 rc = VINF_SUCCESS;
1710 break;
1711 }
1712 rc = VERR_TRY_AGAIN;
1713 break;
1714
1715
1716 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1717 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1718 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1719#ifdef IN_RING3
1720 if (!RTThreadYield())
1721 RTThreadSleep(1);
1722#else
1723/** @todo call host context and yield after a couple of iterations */
1724#endif
1725 rc = VERR_TRY_AGAIN;
1726 break;
1727
1728 /*
1729 * Invalid states.
1730 */
1731 case TMTIMERSTATE_DESTROY:
1732 case TMTIMERSTATE_FREE:
1733 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1734 rc = VERR_TM_INVALID_STATE;
1735 break;
1736
1737 default:
1738 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1739 rc = VERR_TM_UNKNOWN_STATE;
1740 break;
1741 }
1742
1743 /* switch + loop is tedious to break out of. */
1744 if (rc == VINF_SUCCESS)
1745 break;
1746
1747 if (rc != VERR_TRY_AGAIN)
1748 {
1749 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1750 break;
1751 }
1752 if (cRetries <= 0)
1753 {
1754 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1755 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1756 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1757 break;
1758 }
1759
1760 /*
1761 * Retry to gain locks.
1762 */
1763 if (!fOwnTMLock)
1764 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1765
1766 } /* for (;;) */
1767
1768 /*
1769 * Clean up and return.
1770 */
1771 if (fOwnTMLock)
1772 TM_UNLOCK_TIMERS(pVM);
1773
1774 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1775 return rc;
1776}
1777
1778
1779/**
1780 * Drops a hint about the frequency of the timer.
1781 *
1782 * This is used by TM and the VMM to calculate how often guest execution needs
1783 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1784 *
1785 * @returns VBox status code.
1786 * @param pTimer Timer handle as returned by one of the create
1787 * functions.
1788 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1789 *
1790 * @remarks We're using an integer hertz value here since anything above 1 HZ
1791 * is not going to be any trouble satisfying scheduling wise. The
1792 * range where it makes sense is >= 100 HZ.
1793 */
1794VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1795{
1796 TMTIMER_ASSERT_CRITSECT(pTimer);
1797
1798 uint32_t const uHzOldHint = pTimer->uHzHint;
1799 pTimer->uHzHint = uHzHint;
1800
1801 PVM pVM = pTimer->CTX_SUFF(pVM);
1802 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1803 if ( uHzHint > uMaxHzHint
1804 || uHzOldHint >= uMaxHzHint)
1805 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1806
1807 return VINF_SUCCESS;
1808}
1809
1810
1811/**
1812 * TMTimerStop for the virtual sync timer queue.
1813 *
1814 * This employs a greatly simplified state machine by always acquiring the
1815 * queue lock and bypassing the scheduling list.
1816 *
1817 * @returns VBox status code
1818 * @param pVM The cross context VM structure.
1819 * @param pTimer The timer handle.
1820 */
1821static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1822{
1823 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1824 VM_ASSERT_EMT(pVM);
1825 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1826 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1827 AssertRCReturn(rc, rc);
1828
1829 /* Reset the HZ hint. */
1830 if (pTimer->uHzHint)
1831 {
1832 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1833 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1834 pTimer->uHzHint = 0;
1835 }
1836
1837 /* Update the timer state. */
1838 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1839 TMTIMERSTATE enmState = pTimer->enmState;
1840 switch (enmState)
1841 {
1842 case TMTIMERSTATE_ACTIVE:
1843 tmTimerQueueUnlinkActive(pQueue, pTimer);
1844 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1845 rc = VINF_SUCCESS;
1846 break;
1847
1848 case TMTIMERSTATE_EXPIRED_DELIVER:
1849 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1850 rc = VINF_SUCCESS;
1851 break;
1852
1853 case TMTIMERSTATE_STOPPED:
1854 rc = VINF_SUCCESS;
1855 break;
1856
1857 case TMTIMERSTATE_PENDING_RESCHEDULE:
1858 case TMTIMERSTATE_PENDING_STOP:
1859 case TMTIMERSTATE_PENDING_SCHEDULE:
1860 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1861 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1862 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1863 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1864 case TMTIMERSTATE_DESTROY:
1865 case TMTIMERSTATE_FREE:
1866 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1867 rc = VERR_TM_INVALID_STATE;
1868 break;
1869
1870 default:
1871 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1872 rc = VERR_TM_UNKNOWN_STATE;
1873 break;
1874 }
1875
1876 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1877 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1878 return rc;
1879}
1880
1881
1882/**
1883 * Stop the timer.
1884 * Use TMR3TimerArm() to "un-stop" the timer.
1885 *
1886 * @returns VBox status code.
1887 * @param pTimer Timer handle as returned by one of the create functions.
1888 */
1889VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1890{
1891 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1892
1893 /* Treat virtual sync timers specially. */
1894 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1895 return tmTimerVirtualSyncStop(pVM, pTimer);
1896
1897 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1898 TMTIMER_ASSERT_CRITSECT(pTimer);
1899
1900 /*
1901 * Reset the HZ hint.
1902 */
1903 if (pTimer->uHzHint)
1904 {
1905 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1906 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1907 pTimer->uHzHint = 0;
1908 }
1909
1910 /** @todo see if this function needs optimizing. */
1911 int cRetries = 1000;
1912 do
1913 {
1914 /*
1915 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1916 */
1917 TMTIMERSTATE enmState = pTimer->enmState;
1918 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1919 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1920 switch (enmState)
1921 {
1922 case TMTIMERSTATE_EXPIRED_DELIVER:
1923 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1924 return VERR_INVALID_PARAMETER;
1925
1926 case TMTIMERSTATE_STOPPED:
1927 case TMTIMERSTATE_PENDING_STOP:
1928 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1929 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1930 return VINF_SUCCESS;
1931
1932 case TMTIMERSTATE_PENDING_SCHEDULE:
1933 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1934 {
1935 tmSchedule(pTimer);
1936 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1937 return VINF_SUCCESS;
1938 }
1939 break;
1940
1941 case TMTIMERSTATE_PENDING_RESCHEDULE:
1942 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1943 {
1944 tmSchedule(pTimer);
1945 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1946 return VINF_SUCCESS;
1947 }
1948 break;
1949
1950 case TMTIMERSTATE_ACTIVE:
1951 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1952 {
1953 tmSchedule(pTimer);
1954 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1955 return VINF_SUCCESS;
1956 }
1957 break;
1958
1959 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1960 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1961 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1962#ifdef IN_RING3
1963 if (!RTThreadYield())
1964 RTThreadSleep(1);
1965#else
1966/** @todo call host and yield cpu after a while. */
1967#endif
1968 break;
1969
1970 /*
1971 * Invalid states.
1972 */
1973 case TMTIMERSTATE_DESTROY:
1974 case TMTIMERSTATE_FREE:
1975 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1976 return VERR_TM_INVALID_STATE;
1977 default:
1978 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1979 return VERR_TM_UNKNOWN_STATE;
1980 }
1981 } while (cRetries-- > 0);
1982
1983 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1984 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1985 return VERR_TM_TIMER_UNSTABLE_STATE;
1986}
1987
1988
1989/**
1990 * Get the current clock time.
1991 * Handy for calculating the new expire time.
1992 *
1993 * @returns Current clock time.
1994 * @param pTimer Timer handle as returned by one of the create functions.
1995 */
1996VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1997{
1998 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1999
2000 uint64_t u64;
2001 switch (pTimer->enmClock)
2002 {
2003 case TMCLOCK_VIRTUAL:
2004 u64 = TMVirtualGet(pVM);
2005 break;
2006 case TMCLOCK_VIRTUAL_SYNC:
2007 u64 = TMVirtualSyncGet(pVM);
2008 break;
2009 case TMCLOCK_REAL:
2010 u64 = TMRealGet(pVM);
2011 break;
2012 default:
2013 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2014 return UINT64_MAX;
2015 }
2016 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2017 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2018 return u64;
2019}
2020
2021
2022/**
2023 * Get the frequency of the timer clock.
2024 *
2025 * @returns Clock frequency (as Hz of course).
2026 * @param pTimer Timer handle as returned by one of the create functions.
2027 */
2028VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2029{
2030 switch (pTimer->enmClock)
2031 {
2032 case TMCLOCK_VIRTUAL:
2033 case TMCLOCK_VIRTUAL_SYNC:
2034 return TMCLOCK_FREQ_VIRTUAL;
2035
2036 case TMCLOCK_REAL:
2037 return TMCLOCK_FREQ_REAL;
2038
2039 default:
2040 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2041 return 0;
2042 }
2043}
2044
2045
2046/**
2047 * Get the expire time of the timer.
2048 * Only valid for active timers.
2049 *
2050 * @returns Expire time of the timer.
2051 * @param pTimer Timer handle as returned by one of the create functions.
2052 */
2053VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2054{
2055 TMTIMER_ASSERT_CRITSECT(pTimer);
2056 int cRetries = 1000;
2057 do
2058 {
2059 TMTIMERSTATE enmState = pTimer->enmState;
2060 switch (enmState)
2061 {
2062 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2063 case TMTIMERSTATE_EXPIRED_DELIVER:
2064 case TMTIMERSTATE_STOPPED:
2065 case TMTIMERSTATE_PENDING_STOP:
2066 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2067 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2068 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2069 return ~(uint64_t)0;
2070
2071 case TMTIMERSTATE_ACTIVE:
2072 case TMTIMERSTATE_PENDING_RESCHEDULE:
2073 case TMTIMERSTATE_PENDING_SCHEDULE:
2074 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2075 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2076 return pTimer->u64Expire;
2077
2078 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2079 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2080#ifdef IN_RING3
2081 if (!RTThreadYield())
2082 RTThreadSleep(1);
2083#endif
2084 break;
2085
2086 /*
2087 * Invalid states.
2088 */
2089 case TMTIMERSTATE_DESTROY:
2090 case TMTIMERSTATE_FREE:
2091 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2092 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2093 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2094 return ~(uint64_t)0;
2095 default:
2096 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2097 return ~(uint64_t)0;
2098 }
2099 } while (cRetries-- > 0);
2100
2101 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2102 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2103 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2104 return ~(uint64_t)0;
2105}
2106
2107
2108/**
2109 * Checks if a timer is active or not.
2110 *
2111 * @returns True if active.
2112 * @returns False if not active.
2113 * @param pTimer Timer handle as returned by one of the create functions.
2114 */
2115VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2116{
2117 TMTIMERSTATE enmState = pTimer->enmState;
2118 switch (enmState)
2119 {
2120 case TMTIMERSTATE_STOPPED:
2121 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2122 case TMTIMERSTATE_EXPIRED_DELIVER:
2123 case TMTIMERSTATE_PENDING_STOP:
2124 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2125 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2126 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2127 return false;
2128
2129 case TMTIMERSTATE_ACTIVE:
2130 case TMTIMERSTATE_PENDING_RESCHEDULE:
2131 case TMTIMERSTATE_PENDING_SCHEDULE:
2132 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2133 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2134 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2135 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2136 return true;
2137
2138 /*
2139 * Invalid states.
2140 */
2141 case TMTIMERSTATE_DESTROY:
2142 case TMTIMERSTATE_FREE:
2143 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2144 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2145 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2146 return false;
2147 default:
2148 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2149 return false;
2150 }
2151}
2152
2153
2154/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2155
2156
2157/**
2158 * Arm a timer with a (new) expire time relative to current time.
2159 *
2160 * @returns VBox status code.
2161 * @param pTimer Timer handle as returned by one of the create functions.
2162 * @param cMilliesToNext Number of milliseconds to the next tick.
2163 */
2164VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2165{
2166 switch (pTimer->enmClock)
2167 {
2168 case TMCLOCK_VIRTUAL:
2169 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2170 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2171
2172 case TMCLOCK_VIRTUAL_SYNC:
2173 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2174 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2175
2176 case TMCLOCK_REAL:
2177 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2178 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2179
2180 default:
2181 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2182 return VERR_TM_TIMER_BAD_CLOCK;
2183 }
2184}
2185
2186
2187/**
2188 * Arm a timer with a (new) expire time relative to current time.
2189 *
2190 * @returns VBox status code.
2191 * @param pTimer Timer handle as returned by one of the create functions.
2192 * @param cMicrosToNext Number of microseconds to the next tick.
2193 */
2194VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2195{
2196 switch (pTimer->enmClock)
2197 {
2198 case TMCLOCK_VIRTUAL:
2199 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2200 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2201
2202 case TMCLOCK_VIRTUAL_SYNC:
2203 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2204 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2205
2206 case TMCLOCK_REAL:
2207 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2208 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2209
2210 default:
2211 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2212 return VERR_TM_TIMER_BAD_CLOCK;
2213 }
2214}
2215
2216
2217/**
2218 * Arm a timer with a (new) expire time relative to current time.
2219 *
2220 * @returns VBox status code.
2221 * @param pTimer Timer handle as returned by one of the create functions.
2222 * @param cNanosToNext Number of nanoseconds to the next tick.
2223 */
2224VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2225{
2226 switch (pTimer->enmClock)
2227 {
2228 case TMCLOCK_VIRTUAL:
2229 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2230 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2231
2232 case TMCLOCK_VIRTUAL_SYNC:
2233 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2234 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2235
2236 case TMCLOCK_REAL:
2237 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2238 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2239
2240 default:
2241 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2242 return VERR_TM_TIMER_BAD_CLOCK;
2243 }
2244}
2245
2246
2247/**
2248 * Get the current clock time as nanoseconds.
2249 *
2250 * @returns The timer clock as nanoseconds.
2251 * @param pTimer Timer handle as returned by one of the create functions.
2252 */
2253VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2254{
2255 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2256}
2257
2258
2259/**
2260 * Get the current clock time as microseconds.
2261 *
2262 * @returns The timer clock as microseconds.
2263 * @param pTimer Timer handle as returned by one of the create functions.
2264 */
2265VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2266{
2267 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2268}
2269
2270
2271/**
2272 * Get the current clock time as milliseconds.
2273 *
2274 * @returns The timer clock as milliseconds.
2275 * @param pTimer Timer handle as returned by one of the create functions.
2276 */
2277VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2278{
2279 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2280}
2281
2282
2283/**
2284 * Converts the specified timer clock time to nanoseconds.
2285 *
2286 * @returns nanoseconds.
2287 * @param pTimer Timer handle as returned by one of the create functions.
2288 * @param u64Ticks The clock ticks.
2289 * @remark There could be rounding errors here. We just do a simple integer divide
2290 * without any adjustments.
2291 */
2292VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2293{
2294 switch (pTimer->enmClock)
2295 {
2296 case TMCLOCK_VIRTUAL:
2297 case TMCLOCK_VIRTUAL_SYNC:
2298 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2299 return u64Ticks;
2300
2301 case TMCLOCK_REAL:
2302 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2303 return u64Ticks * 1000000;
2304
2305 default:
2306 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2307 return 0;
2308 }
2309}
2310
2311
2312/**
2313 * Converts the specified timer clock time to microseconds.
2314 *
2315 * @returns microseconds.
2316 * @param pTimer Timer handle as returned by one of the create functions.
2317 * @param u64Ticks The clock ticks.
2318 * @remark There could be rounding errors here. We just do a simple integer divide
2319 * without any adjustments.
2320 */
2321VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2322{
2323 switch (pTimer->enmClock)
2324 {
2325 case TMCLOCK_VIRTUAL:
2326 case TMCLOCK_VIRTUAL_SYNC:
2327 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2328 return u64Ticks / 1000;
2329
2330 case TMCLOCK_REAL:
2331 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2332 return u64Ticks * 1000;
2333
2334 default:
2335 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2336 return 0;
2337 }
2338}
2339
2340
2341/**
2342 * Converts the specified timer clock time to milliseconds.
2343 *
2344 * @returns milliseconds.
2345 * @param pTimer Timer handle as returned by one of the create functions.
2346 * @param u64Ticks The clock ticks.
2347 * @remark There could be rounding errors here. We just do a simple integer divide
2348 * without any adjustments.
2349 */
2350VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2351{
2352 switch (pTimer->enmClock)
2353 {
2354 case TMCLOCK_VIRTUAL:
2355 case TMCLOCK_VIRTUAL_SYNC:
2356 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2357 return u64Ticks / 1000000;
2358
2359 case TMCLOCK_REAL:
2360 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2361 return u64Ticks;
2362
2363 default:
2364 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2365 return 0;
2366 }
2367}
2368
2369
2370/**
2371 * Converts the specified nanosecond timestamp to timer clock ticks.
2372 *
2373 * @returns timer clock ticks.
2374 * @param pTimer Timer handle as returned by one of the create functions.
2375 * @param cNanoSecs The nanosecond value ticks to convert.
2376 * @remark There could be rounding and overflow errors here.
2377 */
2378VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2379{
2380 switch (pTimer->enmClock)
2381 {
2382 case TMCLOCK_VIRTUAL:
2383 case TMCLOCK_VIRTUAL_SYNC:
2384 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2385 return cNanoSecs;
2386
2387 case TMCLOCK_REAL:
2388 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2389 return cNanoSecs / 1000000;
2390
2391 default:
2392 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2393 return 0;
2394 }
2395}
2396
2397
2398/**
2399 * Converts the specified microsecond timestamp to timer clock ticks.
2400 *
2401 * @returns timer clock ticks.
2402 * @param pTimer Timer handle as returned by one of the create functions.
2403 * @param cMicroSecs The microsecond value ticks to convert.
2404 * @remark There could be rounding and overflow errors here.
2405 */
2406VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2407{
2408 switch (pTimer->enmClock)
2409 {
2410 case TMCLOCK_VIRTUAL:
2411 case TMCLOCK_VIRTUAL_SYNC:
2412 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2413 return cMicroSecs * 1000;
2414
2415 case TMCLOCK_REAL:
2416 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2417 return cMicroSecs / 1000;
2418
2419 default:
2420 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2421 return 0;
2422 }
2423}
2424
2425
2426/**
2427 * Converts the specified millisecond timestamp to timer clock ticks.
2428 *
2429 * @returns timer clock ticks.
2430 * @param pTimer Timer handle as returned by one of the create functions.
2431 * @param cMilliSecs The millisecond value ticks to convert.
2432 * @remark There could be rounding and overflow errors here.
2433 */
2434VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2435{
2436 switch (pTimer->enmClock)
2437 {
2438 case TMCLOCK_VIRTUAL:
2439 case TMCLOCK_VIRTUAL_SYNC:
2440 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2441 return cMilliSecs * 1000000;
2442
2443 case TMCLOCK_REAL:
2444 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2445 return cMilliSecs;
2446
2447 default:
2448 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2449 return 0;
2450 }
2451}
2452
2453
2454/**
2455 * Convert state to string.
2456 *
2457 * @returns Readonly status name.
2458 * @param enmState State.
2459 */
2460const char *tmTimerState(TMTIMERSTATE enmState)
2461{
2462 switch (enmState)
2463 {
2464#define CASE(num, state) \
2465 case TMTIMERSTATE_##state: \
2466 AssertCompile(TMTIMERSTATE_##state == (num)); \
2467 return #num "-" #state
2468 CASE( 1,STOPPED);
2469 CASE( 2,ACTIVE);
2470 CASE( 3,EXPIRED_GET_UNLINK);
2471 CASE( 4,EXPIRED_DELIVER);
2472 CASE( 5,PENDING_STOP);
2473 CASE( 6,PENDING_STOP_SCHEDULE);
2474 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2475 CASE( 8,PENDING_SCHEDULE);
2476 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2477 CASE(10,PENDING_RESCHEDULE);
2478 CASE(11,DESTROY);
2479 CASE(12,FREE);
2480 default:
2481 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2482 return "Invalid state!";
2483#undef CASE
2484 }
2485}
2486
2487
2488/**
2489 * Gets the highest frequency hint for all the important timers.
2490 *
2491 * @returns The highest frequency. 0 if no timers care.
2492 * @param pVM The cross context VM structure.
2493 */
2494static uint32_t tmGetFrequencyHint(PVM pVM)
2495{
2496 /*
2497 * Query the value, recalculate it if necessary.
2498 *
2499 * The "right" highest frequency value isn't so important that we'll block
2500 * waiting on the timer semaphore.
2501 */
2502 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2503 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2504 {
2505 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2506 {
2507 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2508
2509 /*
2510 * Loop over the timers associated with each clock.
2511 */
2512 uMaxHzHint = 0;
2513 for (int i = 0; i < TMCLOCK_MAX; i++)
2514 {
2515 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2516 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2517 {
2518 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2519 if (uHzHint > uMaxHzHint)
2520 {
2521 switch (pCur->enmState)
2522 {
2523 case TMTIMERSTATE_ACTIVE:
2524 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2525 case TMTIMERSTATE_EXPIRED_DELIVER:
2526 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2527 case TMTIMERSTATE_PENDING_SCHEDULE:
2528 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2529 case TMTIMERSTATE_PENDING_RESCHEDULE:
2530 uMaxHzHint = uHzHint;
2531 break;
2532
2533 case TMTIMERSTATE_STOPPED:
2534 case TMTIMERSTATE_PENDING_STOP:
2535 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2536 case TMTIMERSTATE_DESTROY:
2537 case TMTIMERSTATE_FREE:
2538 break;
2539 /* no default, want gcc warnings when adding more states. */
2540 }
2541 }
2542 }
2543 }
2544 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2545 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2546 TM_UNLOCK_TIMERS(pVM);
2547 }
2548 }
2549 return uMaxHzHint;
2550}
2551
2552
2553/**
2554 * Calculates a host timer frequency that would be suitable for the current
2555 * timer load.
2556 *
2557 * This will take the highest timer frequency, adjust for catch-up and warp
2558 * driver, and finally add a little fudge factor. The caller (VMM) will use
2559 * the result to adjust the per-cpu preemption timer.
2560 *
2561 * @returns The highest frequency. 0 if no important timers around.
2562 * @param pVM The cross context VM structure.
2563 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2564 */
2565VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2566{
2567 uint32_t uHz = tmGetFrequencyHint(pVM);
2568
2569 /* Catch up, we have to be more aggressive than the % indicates at the
2570 beginning of the effort. */
2571 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2572 {
2573 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2574 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2575 {
2576 if (u32Pct <= 100)
2577 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2578 else if (u32Pct <= 200)
2579 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2580 else if (u32Pct <= 400)
2581 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2582 uHz *= u32Pct + 100;
2583 uHz /= 100;
2584 }
2585 }
2586
2587 /* Warp drive. */
2588 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2589 {
2590 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2591 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2592 {
2593 uHz *= u32Pct;
2594 uHz /= 100;
2595 }
2596 }
2597
2598 /* Fudge factor. */
2599 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2600 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2601 else
2602 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2603 uHz /= 100;
2604
2605 /* Make sure it isn't too high. */
2606 if (uHz > pVM->tm.s.cHostHzMax)
2607 uHz = pVM->tm.s.cHostHzMax;
2608
2609 return uHz;
2610}
2611
2612
2613/**
2614 * Whether the guest virtual clock is ticking.
2615 *
2616 * @returns true if ticking, false otherwise.
2617 * @param pVM The cross context VM structure.
2618 */
2619VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2620{
2621 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2622}
2623
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette