VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87748

Last change on this file since 87748 was 87748, checked in by vboxsync, 4 years ago

TM: Speed up TMNotifyEndOfExecution by using TSC instead of RTTimeNanoTS() to measure time spend in guest context. Should give 5-10% speedup. bugref:9941

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 94.5 KB
Line 
1/* $Id: TMAll.cpp 87748 2021-02-13 03:03:20Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(pTimer) ((pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(pTimer) tmRZTimerGetCritSect(pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(pTimer) \
70 do { \
71 if ((pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
81 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 */
180VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
181{
182 if (pVM->tm.s.fTSCTiedToExecution)
183 tmCpuTickPause(pVCpu);
184
185#ifndef VBOX_WITHOUT_NS_ACCOUNTING
186 /*
187 * Calculate the elapsed tick count and convert it to nanoseconds.
188 */
189 /** @todo get TSC from caller (HMR0A.asm) */
190 uint64_t cTicks = SUPReadTsc() - pVCpu->tm.s.uTscStartExecuting;
191# ifdef IN_RING3
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
195# endif
196 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
197
198 uint64_t cNsExecutingDelta;
199 if (uCpuHz < _4G)
200 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
201 else if (uCpuHz < 16*_1G64)
202 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
203 else
204 {
205 Assert(uCpuHz < 64 * _1G64);
206 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
207 }
208
209 /*
210 * Update the data.
211 */
212 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
213 /** @todo try relax ordering here */
214 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
215 pVCpu->tm.s.fExecuting = false;
216 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
217 pVCpu->tm.s.cPeriodsExecuting++;
218 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
219
220 /*
221 * Update stats.
222 */
223# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
224 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
225 if (cNsExecutingDelta < 5000)
226 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
227 else if (cNsExecutingDelta < 50000)
228 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
229 else
230 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
231# endif
232
233 /* The timer triggers occational updating of the others and total stats: */
234 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
235 { /*likely*/ }
236 else
237 {
238 pVCpu->tm.s.fUpdateStats = false;
239
240 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
241 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
242
243# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
244 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
245 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
246 if (cNsOtherNewDelta > 0)
247 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
248# endif
249
250 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
251 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
252 }
253
254#endif
255}
256
257
258/**
259 * Notification that the cpu is entering the halt state
260 *
261 * This call must always be paired with a TMNotifyEndOfExecution call.
262 *
263 * The function may, depending on the configuration, resume the TSC and future
264 * clocks that only ticks when we're halted.
265 *
266 * @param pVCpu The cross context virtual CPU structure.
267 */
268VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
269{
270 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
271
272#ifndef VBOX_WITHOUT_NS_ACCOUNTING
273 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
274 pVCpu->tm.s.fHalting = true;
275#endif
276
277 if ( pVM->tm.s.fTSCTiedToExecution
278 && !pVM->tm.s.fTSCNotTiedToHalt)
279 tmCpuTickResume(pVM, pVCpu);
280}
281
282
283/**
284 * Notification that the cpu is leaving the halt state
285 *
286 * This call must always be paired with a TMNotifyStartOfHalt call.
287 *
288 * The function may, depending on the configuration, suspend the TSC and future
289 * clocks that only ticks when we're halted.
290 *
291 * @param pVCpu The cross context virtual CPU structure.
292 */
293VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
294{
295 PVM pVM = pVCpu->CTX_SUFF(pVM);
296
297 if ( pVM->tm.s.fTSCTiedToExecution
298 && !pVM->tm.s.fTSCNotTiedToHalt)
299 tmCpuTickPause(pVCpu);
300
301#ifndef VBOX_WITHOUT_NS_ACCOUNTING
302 uint64_t const u64NsTs = RTTimeNanoTS();
303 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
304 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
305 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
306 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
307
308 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
309 pVCpu->tm.s.fHalting = false;
310 pVCpu->tm.s.fUpdateStats = false;
311 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
312 pVCpu->tm.s.cPeriodsHalted++;
313 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
314
315# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
316 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
317 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
318 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
319 if (cNsOtherNewDelta > 0)
320 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
321# endif
322 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
323 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
324#endif
325}
326
327
328/**
329 * Raise the timer force action flag and notify the dedicated timer EMT.
330 *
331 * @param pVM The cross context VM structure.
332 */
333DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
334{
335 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
336 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
337 {
338 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
339 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
340#ifdef IN_RING3
341 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
342#endif
343 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
344 }
345}
346
347
348/**
349 * Schedule the queue which was changed.
350 */
351DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
352{
353 PVMCC pVM = pTimer->CTX_SUFF(pVM);
354 if ( VM_IS_EMT(pVM)
355 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
356 {
357 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
358 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
359 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
360#ifdef VBOX_STRICT
361 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
362#endif
363 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
364 TM_UNLOCK_TIMERS(pVM);
365 }
366 else
367 {
368 TMTIMERSTATE enmState = pTimer->enmState;
369 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
370 tmScheduleNotify(pVM);
371 }
372}
373
374
375/**
376 * Try change the state to enmStateNew from enmStateOld
377 * and link the timer into the scheduling queue.
378 *
379 * @returns Success indicator.
380 * @param pTimer Timer in question.
381 * @param enmStateNew The new timer state.
382 * @param enmStateOld The old timer state.
383 */
384DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
385{
386 /*
387 * Attempt state change.
388 */
389 bool fRc;
390 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
391 return fRc;
392}
393
394
395/**
396 * Links the timer onto the scheduling queue.
397 *
398 * @param pQueue The timer queue the timer belongs to.
399 * @param pTimer The timer.
400 *
401 * @todo FIXME: Look into potential race with the thread running the queues
402 * and stuff.
403 */
404DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
405{
406 Assert(!pTimer->offScheduleNext);
407 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
408 int32_t offHead;
409 do
410 {
411 offHead = pQueue->offSchedule;
412 if (offHead)
413 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
414 else
415 pTimer->offScheduleNext = 0;
416 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
417}
418
419
420/**
421 * Try change the state to enmStateNew from enmStateOld
422 * and link the timer into the scheduling queue.
423 *
424 * @returns Success indicator.
425 * @param pTimer Timer in question.
426 * @param enmStateNew The new timer state.
427 * @param enmStateOld The old timer state.
428 */
429DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
430{
431 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
432 {
433 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
434 return true;
435 }
436 return false;
437}
438
439
440/**
441 * Links a timer into the active list of a timer queue.
442 *
443 * @param pQueue The queue.
444 * @param pTimer The timer.
445 * @param u64Expire The timer expiration time.
446 *
447 * @remarks Called while owning the relevant queue lock.
448 */
449DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
450{
451 Assert(!pTimer->offNext);
452 Assert(!pTimer->offPrev);
453 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
454
455 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
456 if (pCur)
457 {
458 for (;; pCur = TMTIMER_GET_NEXT(pCur))
459 {
460 if (pCur->u64Expire > u64Expire)
461 {
462 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
463 TMTIMER_SET_NEXT(pTimer, pCur);
464 TMTIMER_SET_PREV(pTimer, pPrev);
465 if (pPrev)
466 TMTIMER_SET_NEXT(pPrev, pTimer);
467 else
468 {
469 TMTIMER_SET_HEAD(pQueue, pTimer);
470 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
471 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
472 }
473 TMTIMER_SET_PREV(pCur, pTimer);
474 return;
475 }
476 if (!pCur->offNext)
477 {
478 TMTIMER_SET_NEXT(pCur, pTimer);
479 TMTIMER_SET_PREV(pTimer, pCur);
480 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
481 return;
482 }
483 }
484 }
485 else
486 {
487 TMTIMER_SET_HEAD(pQueue, pTimer);
488 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
489 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
490 }
491}
492
493
494
495/**
496 * Schedules the given timer on the given queue.
497 *
498 * @param pQueue The timer queue.
499 * @param pTimer The timer that needs scheduling.
500 *
501 * @remarks Called while owning the lock.
502 */
503DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
504{
505 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
506
507 /*
508 * Processing.
509 */
510 unsigned cRetries = 2;
511 do
512 {
513 TMTIMERSTATE enmState = pTimer->enmState;
514 switch (enmState)
515 {
516 /*
517 * Reschedule timer (in the active list).
518 */
519 case TMTIMERSTATE_PENDING_RESCHEDULE:
520 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
521 break; /* retry */
522 tmTimerQueueUnlinkActive(pQueue, pTimer);
523 RT_FALL_THRU();
524
525 /*
526 * Schedule timer (insert into the active list).
527 */
528 case TMTIMERSTATE_PENDING_SCHEDULE:
529 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
530 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
531 break; /* retry */
532 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
533 return;
534
535 /*
536 * Stop the timer in active list.
537 */
538 case TMTIMERSTATE_PENDING_STOP:
539 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
540 break; /* retry */
541 tmTimerQueueUnlinkActive(pQueue, pTimer);
542 RT_FALL_THRU();
543
544 /*
545 * Stop the timer (not on the active list).
546 */
547 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
548 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
549 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
550 break;
551 return;
552
553 /*
554 * The timer is pending destruction by TMR3TimerDestroy, our caller.
555 * Nothing to do here.
556 */
557 case TMTIMERSTATE_DESTROY:
558 break;
559
560 /*
561 * Postpone these until they get into the right state.
562 */
563 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
564 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
565 tmTimerLinkSchedule(pQueue, pTimer);
566 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
567 return;
568
569 /*
570 * None of these can be in the schedule.
571 */
572 case TMTIMERSTATE_FREE:
573 case TMTIMERSTATE_STOPPED:
574 case TMTIMERSTATE_ACTIVE:
575 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
576 case TMTIMERSTATE_EXPIRED_DELIVER:
577 default:
578 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
579 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
580 return;
581 }
582 } while (cRetries-- > 0);
583}
584
585
586/**
587 * Schedules the specified timer queue.
588 *
589 * @param pVM The cross context VM structure.
590 * @param pQueue The queue to schedule.
591 *
592 * @remarks Called while owning the lock.
593 */
594void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
595{
596 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
597 NOREF(pVM);
598
599 /*
600 * Dequeue the scheduling list and iterate it.
601 */
602 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
603 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
604 if (!offNext)
605 return;
606 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
607 while (pNext)
608 {
609 /*
610 * Unlink the head timer and find the next one.
611 */
612 PTMTIMER pTimer = pNext;
613 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
614 pTimer->offScheduleNext = 0;
615
616 /*
617 * Do the scheduling.
618 */
619 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
620 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
621 tmTimerQueueScheduleOne(pQueue, pTimer);
622 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
623 } /* foreach timer in current schedule batch. */
624 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
625}
626
627
628#ifdef VBOX_STRICT
629/**
630 * Checks that the timer queues are sane.
631 *
632 * @param pVM The cross context VM structure.
633 * @param pszWhere Caller location clue.
634 *
635 * @remarks Called while owning the lock.
636 */
637void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
638{
639 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
640
641 /*
642 * Check the linking of the active lists.
643 */
644 bool fHaveVirtualSyncLock = false;
645 for (int i = 0; i < TMCLOCK_MAX; i++)
646 {
647 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
648 Assert((int)pQueue->enmClock == i);
649 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
650 {
651 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
652 continue;
653 fHaveVirtualSyncLock = true;
654 }
655 PTMTIMER pPrev = NULL;
656 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
657 {
658 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
659 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
660 TMTIMERSTATE enmState = pCur->enmState;
661 switch (enmState)
662 {
663 case TMTIMERSTATE_ACTIVE:
664 AssertMsg( !pCur->offScheduleNext
665 || pCur->enmState != TMTIMERSTATE_ACTIVE,
666 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
667 break;
668 case TMTIMERSTATE_PENDING_STOP:
669 case TMTIMERSTATE_PENDING_RESCHEDULE:
670 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
671 break;
672 default:
673 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
674 break;
675 }
676 }
677 }
678
679
680# ifdef IN_RING3
681 /*
682 * Do the big list and check that active timers all are in the active lists.
683 */
684 PTMTIMERR3 pPrev = NULL;
685 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
686 {
687 Assert(pCur->pBigPrev == pPrev);
688 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
689
690 TMTIMERSTATE enmState = pCur->enmState;
691 switch (enmState)
692 {
693 case TMTIMERSTATE_ACTIVE:
694 case TMTIMERSTATE_PENDING_STOP:
695 case TMTIMERSTATE_PENDING_RESCHEDULE:
696 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
697 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
698 {
699 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
700 Assert(pCur->offPrev || pCur == pCurAct);
701 while (pCurAct && pCurAct != pCur)
702 pCurAct = TMTIMER_GET_NEXT(pCurAct);
703 Assert(pCurAct == pCur);
704 }
705 break;
706
707 case TMTIMERSTATE_PENDING_SCHEDULE:
708 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
709 case TMTIMERSTATE_STOPPED:
710 case TMTIMERSTATE_EXPIRED_DELIVER:
711 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
712 {
713 Assert(!pCur->offNext);
714 Assert(!pCur->offPrev);
715 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
716 pCurAct;
717 pCurAct = TMTIMER_GET_NEXT(pCurAct))
718 {
719 Assert(pCurAct != pCur);
720 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
721 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
722 }
723 }
724 break;
725
726 /* ignore */
727 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
728 break;
729
730 /* shouldn't get here! */
731 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
732 case TMTIMERSTATE_DESTROY:
733 default:
734 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
735 break;
736 }
737 }
738# endif /* IN_RING3 */
739
740 if (fHaveVirtualSyncLock)
741 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
742}
743#endif /* !VBOX_STRICT */
744
745#ifdef VBOX_HIGH_RES_TIMERS_HACK
746
747/**
748 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
749 * EMT is polling.
750 *
751 * @returns See tmTimerPollInternal.
752 * @param pVM The cross context VM structure.
753 * @param u64Now Current virtual clock timestamp.
754 * @param u64Delta The delta to the next even in ticks of the
755 * virtual clock.
756 * @param pu64Delta Where to return the delta.
757 */
758DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
759{
760 Assert(!(u64Delta & RT_BIT_64(63)));
761
762 if (!pVM->tm.s.fVirtualWarpDrive)
763 {
764 *pu64Delta = u64Delta;
765 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
766 }
767
768 /*
769 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
770 */
771 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
772 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
773
774 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
775 u64GipTime -= u64Start; /* the start is GIP time. */
776 if (u64GipTime >= u64Delta)
777 {
778 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
779 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
780 }
781 else
782 {
783 u64Delta -= u64GipTime;
784 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
785 u64Delta += u64GipTime;
786 }
787 *pu64Delta = u64Delta;
788 u64GipTime += u64Start;
789 return u64GipTime;
790}
791
792
793/**
794 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
795 * than the one dedicated to timer work.
796 *
797 * @returns See tmTimerPollInternal.
798 * @param pVM The cross context VM structure.
799 * @param u64Now Current virtual clock timestamp.
800 * @param pu64Delta Where to return the delta.
801 */
802DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
803{
804 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
805 *pu64Delta = s_u64OtherRet;
806 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
807}
808
809
810/**
811 * Worker for tmTimerPollInternal.
812 *
813 * @returns See tmTimerPollInternal.
814 * @param pVM The cross context VM structure.
815 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
816 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
817 * timer EMT.
818 * @param u64Now Current virtual clock timestamp.
819 * @param pu64Delta Where to return the delta.
820 * @param pCounter The statistics counter to update.
821 */
822DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
823 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
824{
825 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
826 if (pVCpuDst != pVCpu)
827 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
828 *pu64Delta = 0;
829 return 0;
830}
831
832/**
833 * Common worker for TMTimerPollGIP and TMTimerPoll.
834 *
835 * This function is called before FFs are checked in the inner execution EM loops.
836 *
837 * @returns The GIP timestamp of the next event.
838 * 0 if the next event has already expired.
839 *
840 * @param pVM The cross context VM structure.
841 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
842 * @param pu64Delta Where to store the delta.
843 *
844 * @thread The emulation thread.
845 *
846 * @remarks GIP uses ns ticks.
847 */
848DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
849{
850 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
851 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
852 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
853
854 /*
855 * Return straight away if the timer FF is already set ...
856 */
857 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
858 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
859
860 /*
861 * ... or if timers are being run.
862 */
863 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
864 {
865 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
866 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
867 }
868
869 /*
870 * Check for TMCLOCK_VIRTUAL expiration.
871 */
872 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
873 const int64_t i64Delta1 = u64Expire1 - u64Now;
874 if (i64Delta1 <= 0)
875 {
876 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
877 {
878 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
879 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
880 }
881 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
882 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
883 }
884
885 /*
886 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
887 * This isn't quite as straight forward if in a catch-up, not only do
888 * we have to adjust the 'now' but when have to adjust the delta as well.
889 */
890
891 /*
892 * Optimistic lockless approach.
893 */
894 uint64_t u64VirtualSyncNow;
895 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
896 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
897 {
898 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
899 {
900 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
901 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
902 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
903 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
904 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
905 {
906 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
907 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
908 if (i64Delta2 > 0)
909 {
910 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
911 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
912
913 if (pVCpu == pVCpuDst)
914 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
915 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
916 }
917
918 if ( !pVM->tm.s.fRunningQueues
919 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
920 {
921 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
922 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
923 }
924
925 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
926 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
927 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
928 }
929 }
930 }
931 else
932 {
933 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
934 LogFlow(("TMTimerPoll: stopped\n"));
935 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
936 }
937
938 /*
939 * Complicated lockless approach.
940 */
941 uint64_t off;
942 uint32_t u32Pct = 0;
943 bool fCatchUp;
944 int cOuterTries = 42;
945 for (;; cOuterTries--)
946 {
947 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
948 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
949 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
950 if (fCatchUp)
951 {
952 /* No changes allowed, try get a consistent set of parameters. */
953 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
954 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
955 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
956 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
957 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
958 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
959 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
960 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
961 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
962 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
963 || cOuterTries <= 0)
964 {
965 uint64_t u64Delta = u64Now - u64Prev;
966 if (RT_LIKELY(!(u64Delta >> 32)))
967 {
968 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
969 if (off > u64Sub + offGivenUp)
970 off -= u64Sub;
971 else /* we've completely caught up. */
972 off = offGivenUp;
973 }
974 else
975 /* More than 4 seconds since last time (or negative), ignore it. */
976 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
977
978 /* Check that we're still running and in catch up. */
979 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
980 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
981 break;
982 }
983 }
984 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
985 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
986 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
987 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
988 break; /* Got an consistent offset */
989
990 /* Repeat the initial checks before iterating. */
991 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
992 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
993 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
994 {
995 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
996 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
997 }
998 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
999 {
1000 LogFlow(("TMTimerPoll: stopped\n"));
1001 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1002 }
1003 if (cOuterTries <= 0)
1004 break; /* that's enough */
1005 }
1006 if (cOuterTries <= 0)
1007 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1008 u64VirtualSyncNow = u64Now - off;
1009
1010 /* Calc delta and see if we've got a virtual sync hit. */
1011 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1012 if (i64Delta2 <= 0)
1013 {
1014 if ( !pVM->tm.s.fRunningQueues
1015 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1016 {
1017 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1018 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1019 }
1020 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1021 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1022 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1023 }
1024
1025 /*
1026 * Return the time left to the next event.
1027 */
1028 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1029 if (pVCpu == pVCpuDst)
1030 {
1031 if (fCatchUp)
1032 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1033 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1034 }
1035 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1036}
1037
1038
1039/**
1040 * Set FF if we've passed the next virtual event.
1041 *
1042 * This function is called before FFs are checked in the inner execution EM loops.
1043 *
1044 * @returns true if timers are pending, false if not.
1045 *
1046 * @param pVM The cross context VM structure.
1047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1048 * @thread The emulation thread.
1049 */
1050VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1051{
1052 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1053 uint64_t off = 0;
1054 tmTimerPollInternal(pVM, pVCpu, &off);
1055 return off == 0;
1056}
1057
1058
1059/**
1060 * Set FF if we've passed the next virtual event.
1061 *
1062 * This function is called before FFs are checked in the inner execution EM loops.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1066 * @thread The emulation thread.
1067 */
1068VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1069{
1070 uint64_t off;
1071 tmTimerPollInternal(pVM, pVCpu, &off);
1072}
1073
1074
1075/**
1076 * Set FF if we've passed the next virtual event.
1077 *
1078 * This function is called before FFs are checked in the inner execution EM loops.
1079 *
1080 * @returns The GIP timestamp of the next event.
1081 * 0 if the next event has already expired.
1082 * @param pVM The cross context VM structure.
1083 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1084 * @param pu64Delta Where to store the delta.
1085 * @thread The emulation thread.
1086 */
1087VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1088{
1089 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1090}
1091
1092#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1093
1094/**
1095 * Gets the host context ring-3 pointer of the timer.
1096 *
1097 * @returns HC R3 pointer.
1098 * @param pTimer Timer handle as returned by one of the create functions.
1099 */
1100VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1101{
1102 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1103}
1104
1105
1106/**
1107 * Gets the host context ring-0 pointer of the timer.
1108 *
1109 * @returns HC R0 pointer.
1110 * @param pTimer Timer handle as returned by one of the create functions.
1111 */
1112VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1113{
1114 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1115}
1116
1117
1118/**
1119 * Gets the RC pointer of the timer.
1120 *
1121 * @returns RC pointer.
1122 * @param pTimer Timer handle as returned by one of the create functions.
1123 */
1124VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1125{
1126 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1127}
1128
1129
1130/**
1131 * Locks the timer clock.
1132 *
1133 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1134 * if the clock does not have a lock.
1135 * @param pTimer The timer which clock lock we wish to take.
1136 * @param rcBusy What to return in ring-0 and raw-mode context
1137 * if the lock is busy. Pass VINF_SUCCESS to
1138 * acquired the critical section thru a ring-3
1139 call if necessary.
1140 *
1141 * @remarks Currently only supported on timers using the virtual sync clock.
1142 */
1143VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1144{
1145 AssertPtr(pTimer);
1146 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1147 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1148}
1149
1150
1151/**
1152 * Unlocks a timer clock locked by TMTimerLock.
1153 *
1154 * @param pTimer The timer which clock to unlock.
1155 */
1156VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1157{
1158 AssertPtr(pTimer);
1159 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1160 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1161}
1162
1163
1164/**
1165 * Checks if the current thread owns the timer clock lock.
1166 *
1167 * @returns @c true if its the owner, @c false if not.
1168 * @param pTimer The timer handle.
1169 */
1170VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1171{
1172 AssertPtr(pTimer);
1173 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1174 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1175}
1176
1177
1178/**
1179 * Optimized TMTimerSet code path for starting an inactive timer.
1180 *
1181 * @returns VBox status code.
1182 *
1183 * @param pVM The cross context VM structure.
1184 * @param pTimer The timer handle.
1185 * @param u64Expire The new expire time.
1186 */
1187static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1188{
1189 Assert(!pTimer->offPrev);
1190 Assert(!pTimer->offNext);
1191 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1192
1193 TMCLOCK const enmClock = pTimer->enmClock;
1194
1195 /*
1196 * Calculate and set the expiration time.
1197 */
1198 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1199 {
1200 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1201 AssertMsgStmt(u64Expire >= u64Last,
1202 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1203 u64Expire = u64Last);
1204 }
1205 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1206 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1207
1208 /*
1209 * Link the timer into the active list.
1210 */
1211 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1212
1213 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1214 TM_UNLOCK_TIMERS(pVM);
1215 return VINF_SUCCESS;
1216}
1217
1218
1219/**
1220 * TMTimerSet for the virtual sync timer queue.
1221 *
1222 * This employs a greatly simplified state machine by always acquiring the
1223 * queue lock and bypassing the scheduling list.
1224 *
1225 * @returns VBox status code
1226 * @param pVM The cross context VM structure.
1227 * @param pTimer The timer handle.
1228 * @param u64Expire The expiration time.
1229 */
1230static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1231{
1232 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1233 VM_ASSERT_EMT(pVM);
1234 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1235 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1236 AssertRCReturn(rc, rc);
1237
1238 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1239 TMTIMERSTATE enmState = pTimer->enmState;
1240 switch (enmState)
1241 {
1242 case TMTIMERSTATE_EXPIRED_DELIVER:
1243 case TMTIMERSTATE_STOPPED:
1244 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1245 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1246 else
1247 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1248
1249 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1250 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1251 pTimer->u64Expire = u64Expire;
1252 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1253 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1254 rc = VINF_SUCCESS;
1255 break;
1256
1257 case TMTIMERSTATE_ACTIVE:
1258 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1259 tmTimerQueueUnlinkActive(pQueue, pTimer);
1260 pTimer->u64Expire = u64Expire;
1261 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1262 rc = VINF_SUCCESS;
1263 break;
1264
1265 case TMTIMERSTATE_PENDING_RESCHEDULE:
1266 case TMTIMERSTATE_PENDING_STOP:
1267 case TMTIMERSTATE_PENDING_SCHEDULE:
1268 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1269 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1270 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1271 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1272 case TMTIMERSTATE_DESTROY:
1273 case TMTIMERSTATE_FREE:
1274 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1275 rc = VERR_TM_INVALID_STATE;
1276 break;
1277
1278 default:
1279 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1280 rc = VERR_TM_UNKNOWN_STATE;
1281 break;
1282 }
1283
1284 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1285 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1286 return rc;
1287}
1288
1289
1290/**
1291 * Arm a timer with a (new) expire time.
1292 *
1293 * @returns VBox status code.
1294 * @param pTimer Timer handle as returned by one of the create functions.
1295 * @param u64Expire New expire time.
1296 */
1297VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1298{
1299 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1300 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1301
1302 /* Treat virtual sync timers specially. */
1303 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1304 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1305
1306 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1307 TMTIMER_ASSERT_CRITSECT(pTimer);
1308
1309 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1310
1311#ifdef VBOX_WITH_STATISTICS
1312 /*
1313 * Gather optimization info.
1314 */
1315 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1316 TMTIMERSTATE enmOrgState = pTimer->enmState;
1317 switch (enmOrgState)
1318 {
1319 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1320 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1321 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1322 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1323 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1324 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1325 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1326 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1327 }
1328#endif
1329
1330 /*
1331 * The most common case is setting the timer again during the callback.
1332 * The second most common case is starting a timer at some other time.
1333 */
1334#if 1
1335 TMTIMERSTATE enmState1 = pTimer->enmState;
1336 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1337 || ( enmState1 == TMTIMERSTATE_STOPPED
1338 && pTimer->pCritSect))
1339 {
1340 /* Try take the TM lock and check the state again. */
1341 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1342 {
1343 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1344 {
1345 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1346 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1347 return VINF_SUCCESS;
1348 }
1349 TM_UNLOCK_TIMERS(pVM);
1350 }
1351 }
1352#endif
1353
1354 /*
1355 * Unoptimized code path.
1356 */
1357 int cRetries = 1000;
1358 do
1359 {
1360 /*
1361 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1362 */
1363 TMTIMERSTATE enmState = pTimer->enmState;
1364 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1365 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1366 switch (enmState)
1367 {
1368 case TMTIMERSTATE_EXPIRED_DELIVER:
1369 case TMTIMERSTATE_STOPPED:
1370 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1371 {
1372 Assert(!pTimer->offPrev);
1373 Assert(!pTimer->offNext);
1374 pTimer->u64Expire = u64Expire;
1375 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1376 tmSchedule(pTimer);
1377 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1378 return VINF_SUCCESS;
1379 }
1380 break;
1381
1382 case TMTIMERSTATE_PENDING_SCHEDULE:
1383 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1384 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1385 {
1386 pTimer->u64Expire = u64Expire;
1387 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1388 tmSchedule(pTimer);
1389 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1390 return VINF_SUCCESS;
1391 }
1392 break;
1393
1394
1395 case TMTIMERSTATE_ACTIVE:
1396 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1397 {
1398 pTimer->u64Expire = u64Expire;
1399 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1400 tmSchedule(pTimer);
1401 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1402 return VINF_SUCCESS;
1403 }
1404 break;
1405
1406 case TMTIMERSTATE_PENDING_RESCHEDULE:
1407 case TMTIMERSTATE_PENDING_STOP:
1408 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1409 {
1410 pTimer->u64Expire = u64Expire;
1411 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1412 tmSchedule(pTimer);
1413 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1414 return VINF_SUCCESS;
1415 }
1416 break;
1417
1418
1419 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1420 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1421 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1422#ifdef IN_RING3
1423 if (!RTThreadYield())
1424 RTThreadSleep(1);
1425#else
1426/** @todo call host context and yield after a couple of iterations */
1427#endif
1428 break;
1429
1430 /*
1431 * Invalid states.
1432 */
1433 case TMTIMERSTATE_DESTROY:
1434 case TMTIMERSTATE_FREE:
1435 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1436 return VERR_TM_INVALID_STATE;
1437 default:
1438 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1439 return VERR_TM_UNKNOWN_STATE;
1440 }
1441 } while (cRetries-- > 0);
1442
1443 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1444 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1445 return VERR_TM_TIMER_UNSTABLE_STATE;
1446}
1447
1448
1449/**
1450 * Return the current time for the specified clock, setting pu64Now if not NULL.
1451 *
1452 * @returns Current time.
1453 * @param pVM The cross context VM structure.
1454 * @param enmClock The clock to query.
1455 * @param pu64Now Optional pointer where to store the return time
1456 */
1457DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1458{
1459 uint64_t u64Now;
1460 switch (enmClock)
1461 {
1462 case TMCLOCK_VIRTUAL_SYNC:
1463 u64Now = TMVirtualSyncGet(pVM);
1464 break;
1465 case TMCLOCK_VIRTUAL:
1466 u64Now = TMVirtualGet(pVM);
1467 break;
1468 case TMCLOCK_REAL:
1469 u64Now = TMRealGet(pVM);
1470 break;
1471 default:
1472 AssertFatalMsgFailed(("%d\n", enmClock));
1473 }
1474
1475 if (pu64Now)
1476 *pu64Now = u64Now;
1477 return u64Now;
1478}
1479
1480
1481/**
1482 * Optimized TMTimerSetRelative code path.
1483 *
1484 * @returns VBox status code.
1485 *
1486 * @param pVM The cross context VM structure.
1487 * @param pTimer The timer handle.
1488 * @param cTicksToNext Clock ticks until the next time expiration.
1489 * @param pu64Now Where to return the current time stamp used.
1490 * Optional.
1491 */
1492static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1493{
1494 Assert(!pTimer->offPrev);
1495 Assert(!pTimer->offNext);
1496 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1497
1498 /*
1499 * Calculate and set the expiration time.
1500 */
1501 TMCLOCK const enmClock = pTimer->enmClock;
1502 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1503 pTimer->u64Expire = u64Expire;
1504 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1505
1506 /*
1507 * Link the timer into the active list.
1508 */
1509 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1510 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1511
1512 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1513 TM_UNLOCK_TIMERS(pVM);
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * TMTimerSetRelative for the virtual sync timer queue.
1520 *
1521 * This employs a greatly simplified state machine by always acquiring the
1522 * queue lock and bypassing the scheduling list.
1523 *
1524 * @returns VBox status code
1525 * @param pVM The cross context VM structure.
1526 * @param pTimer The timer to (re-)arm.
1527 * @param cTicksToNext Clock ticks until the next time expiration.
1528 * @param pu64Now Where to return the current time stamp used.
1529 * Optional.
1530 */
1531static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1532{
1533 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1534 VM_ASSERT_EMT(pVM);
1535 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1536 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1537 AssertRCReturn(rc, rc);
1538
1539 /* Calculate the expiration tick. */
1540 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1541 if (pu64Now)
1542 *pu64Now = u64Expire;
1543 u64Expire += cTicksToNext;
1544
1545 /* Update the timer. */
1546 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1547 TMTIMERSTATE enmState = pTimer->enmState;
1548 switch (enmState)
1549 {
1550 case TMTIMERSTATE_EXPIRED_DELIVER:
1551 case TMTIMERSTATE_STOPPED:
1552 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1553 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1554 else
1555 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1556 pTimer->u64Expire = u64Expire;
1557 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1558 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1559 rc = VINF_SUCCESS;
1560 break;
1561
1562 case TMTIMERSTATE_ACTIVE:
1563 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1564 tmTimerQueueUnlinkActive(pQueue, pTimer);
1565 pTimer->u64Expire = u64Expire;
1566 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1567 rc = VINF_SUCCESS;
1568 break;
1569
1570 case TMTIMERSTATE_PENDING_RESCHEDULE:
1571 case TMTIMERSTATE_PENDING_STOP:
1572 case TMTIMERSTATE_PENDING_SCHEDULE:
1573 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1574 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1575 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1576 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1577 case TMTIMERSTATE_DESTROY:
1578 case TMTIMERSTATE_FREE:
1579 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1580 rc = VERR_TM_INVALID_STATE;
1581 break;
1582
1583 default:
1584 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1585 rc = VERR_TM_UNKNOWN_STATE;
1586 break;
1587 }
1588
1589 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1590 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1591 return rc;
1592}
1593
1594
1595/**
1596 * Arm a timer with a expire time relative to the current time.
1597 *
1598 * @returns VBox status code.
1599 * @param pTimer Timer handle as returned by one of the create functions.
1600 * @param cTicksToNext Clock ticks until the next time expiration.
1601 * @param pu64Now Where to return the current time stamp used.
1602 * Optional.
1603 */
1604VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1605{
1606 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1607 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1608
1609 /* Treat virtual sync timers specially. */
1610 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1611 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1612
1613 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1614 TMTIMER_ASSERT_CRITSECT(pTimer);
1615
1616 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1617
1618#ifdef VBOX_WITH_STATISTICS
1619 /*
1620 * Gather optimization info.
1621 */
1622 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1623 TMTIMERSTATE enmOrgState = pTimer->enmState;
1624 switch (enmOrgState)
1625 {
1626 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1627 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1628 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1629 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1630 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1631 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1632 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1633 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1634 }
1635#endif
1636
1637 /*
1638 * Try to take the TM lock and optimize the common cases.
1639 *
1640 * With the TM lock we can safely make optimizations like immediate
1641 * scheduling and we can also be 100% sure that we're not racing the
1642 * running of the timer queues. As an additional restraint we require the
1643 * timer to have a critical section associated with to be 100% there aren't
1644 * concurrent operations on the timer. (This latter isn't necessary any
1645 * longer as this isn't supported for any timers, critsect or not.)
1646 *
1647 * Note! Lock ordering doesn't apply when we only tries to
1648 * get the innermost locks.
1649 */
1650 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1651#if 1
1652 if ( fOwnTMLock
1653 && pTimer->pCritSect)
1654 {
1655 TMTIMERSTATE enmState = pTimer->enmState;
1656 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1657 || enmState == TMTIMERSTATE_STOPPED)
1658 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1659 {
1660 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1661 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1662 return VINF_SUCCESS;
1663 }
1664
1665 /* Optimize other states when it becomes necessary. */
1666 }
1667#endif
1668
1669 /*
1670 * Unoptimized path.
1671 */
1672 int rc;
1673 TMCLOCK const enmClock = pTimer->enmClock;
1674 for (int cRetries = 1000; ; cRetries--)
1675 {
1676 /*
1677 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1678 */
1679 TMTIMERSTATE enmState = pTimer->enmState;
1680 switch (enmState)
1681 {
1682 case TMTIMERSTATE_STOPPED:
1683 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1684 {
1685 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1686 * Figure a safe way of activating this timer while the queue is
1687 * being run.
1688 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1689 * re-starting the timer in response to a initial_count write.) */
1690 }
1691 RT_FALL_THRU();
1692 case TMTIMERSTATE_EXPIRED_DELIVER:
1693 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1694 {
1695 Assert(!pTimer->offPrev);
1696 Assert(!pTimer->offNext);
1697 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1698 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1699 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1700 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1701 tmSchedule(pTimer);
1702 rc = VINF_SUCCESS;
1703 break;
1704 }
1705 rc = VERR_TRY_AGAIN;
1706 break;
1707
1708 case TMTIMERSTATE_PENDING_SCHEDULE:
1709 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1710 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1711 {
1712 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1713 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1714 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1715 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1716 tmSchedule(pTimer);
1717 rc = VINF_SUCCESS;
1718 break;
1719 }
1720 rc = VERR_TRY_AGAIN;
1721 break;
1722
1723
1724 case TMTIMERSTATE_ACTIVE:
1725 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1726 {
1727 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1728 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1729 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1730 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1731 tmSchedule(pTimer);
1732 rc = VINF_SUCCESS;
1733 break;
1734 }
1735 rc = VERR_TRY_AGAIN;
1736 break;
1737
1738 case TMTIMERSTATE_PENDING_RESCHEDULE:
1739 case TMTIMERSTATE_PENDING_STOP:
1740 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1741 {
1742 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1743 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1744 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1745 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1746 tmSchedule(pTimer);
1747 rc = VINF_SUCCESS;
1748 break;
1749 }
1750 rc = VERR_TRY_AGAIN;
1751 break;
1752
1753
1754 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1755 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1756 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1757#ifdef IN_RING3
1758 if (!RTThreadYield())
1759 RTThreadSleep(1);
1760#else
1761/** @todo call host context and yield after a couple of iterations */
1762#endif
1763 rc = VERR_TRY_AGAIN;
1764 break;
1765
1766 /*
1767 * Invalid states.
1768 */
1769 case TMTIMERSTATE_DESTROY:
1770 case TMTIMERSTATE_FREE:
1771 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1772 rc = VERR_TM_INVALID_STATE;
1773 break;
1774
1775 default:
1776 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1777 rc = VERR_TM_UNKNOWN_STATE;
1778 break;
1779 }
1780
1781 /* switch + loop is tedious to break out of. */
1782 if (rc == VINF_SUCCESS)
1783 break;
1784
1785 if (rc != VERR_TRY_AGAIN)
1786 {
1787 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1788 break;
1789 }
1790 if (cRetries <= 0)
1791 {
1792 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1793 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1794 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1795 break;
1796 }
1797
1798 /*
1799 * Retry to gain locks.
1800 */
1801 if (!fOwnTMLock)
1802 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1803
1804 } /* for (;;) */
1805
1806 /*
1807 * Clean up and return.
1808 */
1809 if (fOwnTMLock)
1810 TM_UNLOCK_TIMERS(pVM);
1811
1812 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1813 return rc;
1814}
1815
1816
1817/**
1818 * Drops a hint about the frequency of the timer.
1819 *
1820 * This is used by TM and the VMM to calculate how often guest execution needs
1821 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1822 *
1823 * @returns VBox status code.
1824 * @param pTimer Timer handle as returned by one of the create
1825 * functions.
1826 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1827 *
1828 * @remarks We're using an integer hertz value here since anything above 1 HZ
1829 * is not going to be any trouble satisfying scheduling wise. The
1830 * range where it makes sense is >= 100 HZ.
1831 */
1832VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1833{
1834 TMTIMER_ASSERT_CRITSECT(pTimer);
1835
1836 uint32_t const uHzOldHint = pTimer->uHzHint;
1837 pTimer->uHzHint = uHzHint;
1838
1839 PVM pVM = pTimer->CTX_SUFF(pVM);
1840 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1841 if ( uHzHint > uMaxHzHint
1842 || uHzOldHint >= uMaxHzHint)
1843 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1844
1845 return VINF_SUCCESS;
1846}
1847
1848
1849/**
1850 * TMTimerStop for the virtual sync timer queue.
1851 *
1852 * This employs a greatly simplified state machine by always acquiring the
1853 * queue lock and bypassing the scheduling list.
1854 *
1855 * @returns VBox status code
1856 * @param pVM The cross context VM structure.
1857 * @param pTimer The timer handle.
1858 */
1859static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1860{
1861 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1862 VM_ASSERT_EMT(pVM);
1863 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1864 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1865 AssertRCReturn(rc, rc);
1866
1867 /* Reset the HZ hint. */
1868 if (pTimer->uHzHint)
1869 {
1870 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1871 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1872 pTimer->uHzHint = 0;
1873 }
1874
1875 /* Update the timer state. */
1876 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1877 TMTIMERSTATE enmState = pTimer->enmState;
1878 switch (enmState)
1879 {
1880 case TMTIMERSTATE_ACTIVE:
1881 tmTimerQueueUnlinkActive(pQueue, pTimer);
1882 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1883 rc = VINF_SUCCESS;
1884 break;
1885
1886 case TMTIMERSTATE_EXPIRED_DELIVER:
1887 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1888 rc = VINF_SUCCESS;
1889 break;
1890
1891 case TMTIMERSTATE_STOPPED:
1892 rc = VINF_SUCCESS;
1893 break;
1894
1895 case TMTIMERSTATE_PENDING_RESCHEDULE:
1896 case TMTIMERSTATE_PENDING_STOP:
1897 case TMTIMERSTATE_PENDING_SCHEDULE:
1898 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1899 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1900 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1901 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1902 case TMTIMERSTATE_DESTROY:
1903 case TMTIMERSTATE_FREE:
1904 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1905 rc = VERR_TM_INVALID_STATE;
1906 break;
1907
1908 default:
1909 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1910 rc = VERR_TM_UNKNOWN_STATE;
1911 break;
1912 }
1913
1914 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1915 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1916 return rc;
1917}
1918
1919
1920/**
1921 * Stop the timer.
1922 * Use TMR3TimerArm() to "un-stop" the timer.
1923 *
1924 * @returns VBox status code.
1925 * @param pTimer Timer handle as returned by one of the create functions.
1926 */
1927VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1928{
1929 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1930 STAM_COUNTER_INC(&pTimer->StatStop);
1931
1932 /* Treat virtual sync timers specially. */
1933 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1934 return tmTimerVirtualSyncStop(pVM, pTimer);
1935
1936 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1937 TMTIMER_ASSERT_CRITSECT(pTimer);
1938
1939 /*
1940 * Reset the HZ hint.
1941 */
1942 if (pTimer->uHzHint)
1943 {
1944 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1945 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1946 pTimer->uHzHint = 0;
1947 }
1948
1949 /** @todo see if this function needs optimizing. */
1950 int cRetries = 1000;
1951 do
1952 {
1953 /*
1954 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1955 */
1956 TMTIMERSTATE enmState = pTimer->enmState;
1957 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1958 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1959 switch (enmState)
1960 {
1961 case TMTIMERSTATE_EXPIRED_DELIVER:
1962 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1963 return VERR_INVALID_PARAMETER;
1964
1965 case TMTIMERSTATE_STOPPED:
1966 case TMTIMERSTATE_PENDING_STOP:
1967 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1968 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1969 return VINF_SUCCESS;
1970
1971 case TMTIMERSTATE_PENDING_SCHEDULE:
1972 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1973 {
1974 tmSchedule(pTimer);
1975 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1976 return VINF_SUCCESS;
1977 }
1978 break;
1979
1980 case TMTIMERSTATE_PENDING_RESCHEDULE:
1981 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1982 {
1983 tmSchedule(pTimer);
1984 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1985 return VINF_SUCCESS;
1986 }
1987 break;
1988
1989 case TMTIMERSTATE_ACTIVE:
1990 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1991 {
1992 tmSchedule(pTimer);
1993 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1994 return VINF_SUCCESS;
1995 }
1996 break;
1997
1998 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1999 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2000 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2001#ifdef IN_RING3
2002 if (!RTThreadYield())
2003 RTThreadSleep(1);
2004#else
2005/** @todo call host and yield cpu after a while. */
2006#endif
2007 break;
2008
2009 /*
2010 * Invalid states.
2011 */
2012 case TMTIMERSTATE_DESTROY:
2013 case TMTIMERSTATE_FREE:
2014 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2015 return VERR_TM_INVALID_STATE;
2016 default:
2017 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2018 return VERR_TM_UNKNOWN_STATE;
2019 }
2020 } while (cRetries-- > 0);
2021
2022 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2023 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2024 return VERR_TM_TIMER_UNSTABLE_STATE;
2025}
2026
2027
2028/**
2029 * Get the current clock time.
2030 * Handy for calculating the new expire time.
2031 *
2032 * @returns Current clock time.
2033 * @param pTimer Timer handle as returned by one of the create functions.
2034 */
2035VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
2036{
2037 PVMCC pVM = pTimer->CTX_SUFF(pVM);
2038 STAM_COUNTER_INC(&pTimer->StatGet);
2039
2040 uint64_t u64;
2041 switch (pTimer->enmClock)
2042 {
2043 case TMCLOCK_VIRTUAL:
2044 u64 = TMVirtualGet(pVM);
2045 break;
2046 case TMCLOCK_VIRTUAL_SYNC:
2047 u64 = TMVirtualSyncGet(pVM);
2048 break;
2049 case TMCLOCK_REAL:
2050 u64 = TMRealGet(pVM);
2051 break;
2052 default:
2053 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2054 return UINT64_MAX;
2055 }
2056 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2057 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2058 return u64;
2059}
2060
2061
2062/**
2063 * Get the frequency of the timer clock.
2064 *
2065 * @returns Clock frequency (as Hz of course).
2066 * @param pTimer Timer handle as returned by one of the create functions.
2067 */
2068VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2069{
2070 switch (pTimer->enmClock)
2071 {
2072 case TMCLOCK_VIRTUAL:
2073 case TMCLOCK_VIRTUAL_SYNC:
2074 return TMCLOCK_FREQ_VIRTUAL;
2075
2076 case TMCLOCK_REAL:
2077 return TMCLOCK_FREQ_REAL;
2078
2079 default:
2080 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2081 return 0;
2082 }
2083}
2084
2085
2086/**
2087 * Get the expire time of the timer.
2088 * Only valid for active timers.
2089 *
2090 * @returns Expire time of the timer.
2091 * @param pTimer Timer handle as returned by one of the create functions.
2092 */
2093VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2094{
2095 TMTIMER_ASSERT_CRITSECT(pTimer);
2096 int cRetries = 1000;
2097 do
2098 {
2099 TMTIMERSTATE enmState = pTimer->enmState;
2100 switch (enmState)
2101 {
2102 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2103 case TMTIMERSTATE_EXPIRED_DELIVER:
2104 case TMTIMERSTATE_STOPPED:
2105 case TMTIMERSTATE_PENDING_STOP:
2106 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2107 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2108 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2109 return ~(uint64_t)0;
2110
2111 case TMTIMERSTATE_ACTIVE:
2112 case TMTIMERSTATE_PENDING_RESCHEDULE:
2113 case TMTIMERSTATE_PENDING_SCHEDULE:
2114 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2115 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2116 return pTimer->u64Expire;
2117
2118 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2119 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2120#ifdef IN_RING3
2121 if (!RTThreadYield())
2122 RTThreadSleep(1);
2123#endif
2124 break;
2125
2126 /*
2127 * Invalid states.
2128 */
2129 case TMTIMERSTATE_DESTROY:
2130 case TMTIMERSTATE_FREE:
2131 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2132 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2133 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2134 return ~(uint64_t)0;
2135 default:
2136 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2137 return ~(uint64_t)0;
2138 }
2139 } while (cRetries-- > 0);
2140
2141 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2142 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2143 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2144 return ~(uint64_t)0;
2145}
2146
2147
2148/**
2149 * Checks if a timer is active or not.
2150 *
2151 * @returns True if active.
2152 * @returns False if not active.
2153 * @param pTimer Timer handle as returned by one of the create functions.
2154 */
2155VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2156{
2157 TMTIMERSTATE enmState = pTimer->enmState;
2158 switch (enmState)
2159 {
2160 case TMTIMERSTATE_STOPPED:
2161 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2162 case TMTIMERSTATE_EXPIRED_DELIVER:
2163 case TMTIMERSTATE_PENDING_STOP:
2164 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2165 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2166 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2167 return false;
2168
2169 case TMTIMERSTATE_ACTIVE:
2170 case TMTIMERSTATE_PENDING_RESCHEDULE:
2171 case TMTIMERSTATE_PENDING_SCHEDULE:
2172 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2173 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2174 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2175 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2176 return true;
2177
2178 /*
2179 * Invalid states.
2180 */
2181 case TMTIMERSTATE_DESTROY:
2182 case TMTIMERSTATE_FREE:
2183 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2184 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2185 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2186 return false;
2187 default:
2188 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2189 return false;
2190 }
2191}
2192
2193
2194/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2195
2196
2197/**
2198 * Arm a timer with a (new) expire time relative to current time.
2199 *
2200 * @returns VBox status code.
2201 * @param pTimer Timer handle as returned by one of the create functions.
2202 * @param cMilliesToNext Number of milliseconds to the next tick.
2203 */
2204VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2205{
2206 switch (pTimer->enmClock)
2207 {
2208 case TMCLOCK_VIRTUAL:
2209 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2210 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2211
2212 case TMCLOCK_VIRTUAL_SYNC:
2213 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2214 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2215
2216 case TMCLOCK_REAL:
2217 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2218 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2219
2220 default:
2221 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2222 return VERR_TM_TIMER_BAD_CLOCK;
2223 }
2224}
2225
2226
2227/**
2228 * Arm a timer with a (new) expire time relative to current time.
2229 *
2230 * @returns VBox status code.
2231 * @param pTimer Timer handle as returned by one of the create functions.
2232 * @param cMicrosToNext Number of microseconds to the next tick.
2233 */
2234VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2235{
2236 switch (pTimer->enmClock)
2237 {
2238 case TMCLOCK_VIRTUAL:
2239 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2240 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2241
2242 case TMCLOCK_VIRTUAL_SYNC:
2243 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2244 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2245
2246 case TMCLOCK_REAL:
2247 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2248 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2249
2250 default:
2251 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2252 return VERR_TM_TIMER_BAD_CLOCK;
2253 }
2254}
2255
2256
2257/**
2258 * Arm a timer with a (new) expire time relative to current time.
2259 *
2260 * @returns VBox status code.
2261 * @param pTimer Timer handle as returned by one of the create functions.
2262 * @param cNanosToNext Number of nanoseconds to the next tick.
2263 */
2264VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2265{
2266 switch (pTimer->enmClock)
2267 {
2268 case TMCLOCK_VIRTUAL:
2269 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2270 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2271
2272 case TMCLOCK_VIRTUAL_SYNC:
2273 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2274 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2275
2276 case TMCLOCK_REAL:
2277 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2278 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2279
2280 default:
2281 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2282 return VERR_TM_TIMER_BAD_CLOCK;
2283 }
2284}
2285
2286
2287/**
2288 * Get the current clock time as nanoseconds.
2289 *
2290 * @returns The timer clock as nanoseconds.
2291 * @param pTimer Timer handle as returned by one of the create functions.
2292 */
2293VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2294{
2295 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2296}
2297
2298
2299/**
2300 * Get the current clock time as microseconds.
2301 *
2302 * @returns The timer clock as microseconds.
2303 * @param pTimer Timer handle as returned by one of the create functions.
2304 */
2305VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2306{
2307 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2308}
2309
2310
2311/**
2312 * Get the current clock time as milliseconds.
2313 *
2314 * @returns The timer clock as milliseconds.
2315 * @param pTimer Timer handle as returned by one of the create functions.
2316 */
2317VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2318{
2319 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2320}
2321
2322
2323/**
2324 * Converts the specified timer clock time to nanoseconds.
2325 *
2326 * @returns nanoseconds.
2327 * @param pTimer Timer handle as returned by one of the create functions.
2328 * @param u64Ticks The clock ticks.
2329 * @remark There could be rounding errors here. We just do a simple integer divide
2330 * without any adjustments.
2331 */
2332VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2333{
2334 switch (pTimer->enmClock)
2335 {
2336 case TMCLOCK_VIRTUAL:
2337 case TMCLOCK_VIRTUAL_SYNC:
2338 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2339 return u64Ticks;
2340
2341 case TMCLOCK_REAL:
2342 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2343 return u64Ticks * 1000000;
2344
2345 default:
2346 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2347 return 0;
2348 }
2349}
2350
2351
2352/**
2353 * Converts the specified timer clock time to microseconds.
2354 *
2355 * @returns microseconds.
2356 * @param pTimer Timer handle as returned by one of the create functions.
2357 * @param u64Ticks The clock ticks.
2358 * @remark There could be rounding errors here. We just do a simple integer divide
2359 * without any adjustments.
2360 */
2361VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2362{
2363 switch (pTimer->enmClock)
2364 {
2365 case TMCLOCK_VIRTUAL:
2366 case TMCLOCK_VIRTUAL_SYNC:
2367 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2368 return u64Ticks / 1000;
2369
2370 case TMCLOCK_REAL:
2371 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2372 return u64Ticks * 1000;
2373
2374 default:
2375 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2376 return 0;
2377 }
2378}
2379
2380
2381/**
2382 * Converts the specified timer clock time to milliseconds.
2383 *
2384 * @returns milliseconds.
2385 * @param pTimer Timer handle as returned by one of the create functions.
2386 * @param u64Ticks The clock ticks.
2387 * @remark There could be rounding errors here. We just do a simple integer divide
2388 * without any adjustments.
2389 */
2390VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2391{
2392 switch (pTimer->enmClock)
2393 {
2394 case TMCLOCK_VIRTUAL:
2395 case TMCLOCK_VIRTUAL_SYNC:
2396 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2397 return u64Ticks / 1000000;
2398
2399 case TMCLOCK_REAL:
2400 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2401 return u64Ticks;
2402
2403 default:
2404 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2405 return 0;
2406 }
2407}
2408
2409
2410/**
2411 * Converts the specified nanosecond timestamp to timer clock ticks.
2412 *
2413 * @returns timer clock ticks.
2414 * @param pTimer Timer handle as returned by one of the create functions.
2415 * @param cNanoSecs The nanosecond value ticks to convert.
2416 * @remark There could be rounding and overflow errors here.
2417 */
2418VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2419{
2420 switch (pTimer->enmClock)
2421 {
2422 case TMCLOCK_VIRTUAL:
2423 case TMCLOCK_VIRTUAL_SYNC:
2424 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2425 return cNanoSecs;
2426
2427 case TMCLOCK_REAL:
2428 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2429 return cNanoSecs / 1000000;
2430
2431 default:
2432 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2433 return 0;
2434 }
2435}
2436
2437
2438/**
2439 * Converts the specified microsecond timestamp to timer clock ticks.
2440 *
2441 * @returns timer clock ticks.
2442 * @param pTimer Timer handle as returned by one of the create functions.
2443 * @param cMicroSecs The microsecond value ticks to convert.
2444 * @remark There could be rounding and overflow errors here.
2445 */
2446VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2447{
2448 switch (pTimer->enmClock)
2449 {
2450 case TMCLOCK_VIRTUAL:
2451 case TMCLOCK_VIRTUAL_SYNC:
2452 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2453 return cMicroSecs * 1000;
2454
2455 case TMCLOCK_REAL:
2456 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2457 return cMicroSecs / 1000;
2458
2459 default:
2460 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2461 return 0;
2462 }
2463}
2464
2465
2466/**
2467 * Converts the specified millisecond timestamp to timer clock ticks.
2468 *
2469 * @returns timer clock ticks.
2470 * @param pTimer Timer handle as returned by one of the create functions.
2471 * @param cMilliSecs The millisecond value ticks to convert.
2472 * @remark There could be rounding and overflow errors here.
2473 */
2474VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2475{
2476 switch (pTimer->enmClock)
2477 {
2478 case TMCLOCK_VIRTUAL:
2479 case TMCLOCK_VIRTUAL_SYNC:
2480 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2481 return cMilliSecs * 1000000;
2482
2483 case TMCLOCK_REAL:
2484 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2485 return cMilliSecs;
2486
2487 default:
2488 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2489 return 0;
2490 }
2491}
2492
2493
2494/**
2495 * Convert state to string.
2496 *
2497 * @returns Readonly status name.
2498 * @param enmState State.
2499 */
2500const char *tmTimerState(TMTIMERSTATE enmState)
2501{
2502 switch (enmState)
2503 {
2504#define CASE(num, state) \
2505 case TMTIMERSTATE_##state: \
2506 AssertCompile(TMTIMERSTATE_##state == (num)); \
2507 return #num "-" #state
2508 CASE( 1,STOPPED);
2509 CASE( 2,ACTIVE);
2510 CASE( 3,EXPIRED_GET_UNLINK);
2511 CASE( 4,EXPIRED_DELIVER);
2512 CASE( 5,PENDING_STOP);
2513 CASE( 6,PENDING_STOP_SCHEDULE);
2514 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2515 CASE( 8,PENDING_SCHEDULE);
2516 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2517 CASE(10,PENDING_RESCHEDULE);
2518 CASE(11,DESTROY);
2519 CASE(12,FREE);
2520 default:
2521 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2522 return "Invalid state!";
2523#undef CASE
2524 }
2525}
2526
2527
2528/**
2529 * Gets the highest frequency hint for all the important timers.
2530 *
2531 * @returns The highest frequency. 0 if no timers care.
2532 * @param pVM The cross context VM structure.
2533 */
2534static uint32_t tmGetFrequencyHint(PVM pVM)
2535{
2536 /*
2537 * Query the value, recalculate it if necessary.
2538 *
2539 * The "right" highest frequency value isn't so important that we'll block
2540 * waiting on the timer semaphore.
2541 */
2542 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2543 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2544 {
2545 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2546 {
2547 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2548
2549 /*
2550 * Loop over the timers associated with each clock.
2551 */
2552 uMaxHzHint = 0;
2553 for (int i = 0; i < TMCLOCK_MAX; i++)
2554 {
2555 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2556 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2557 {
2558 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2559 if (uHzHint > uMaxHzHint)
2560 {
2561 switch (pCur->enmState)
2562 {
2563 case TMTIMERSTATE_ACTIVE:
2564 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2565 case TMTIMERSTATE_EXPIRED_DELIVER:
2566 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2567 case TMTIMERSTATE_PENDING_SCHEDULE:
2568 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2569 case TMTIMERSTATE_PENDING_RESCHEDULE:
2570 uMaxHzHint = uHzHint;
2571 break;
2572
2573 case TMTIMERSTATE_STOPPED:
2574 case TMTIMERSTATE_PENDING_STOP:
2575 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2576 case TMTIMERSTATE_DESTROY:
2577 case TMTIMERSTATE_FREE:
2578 break;
2579 /* no default, want gcc warnings when adding more states. */
2580 }
2581 }
2582 }
2583 }
2584 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2585 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2586 TM_UNLOCK_TIMERS(pVM);
2587 }
2588 }
2589 return uMaxHzHint;
2590}
2591
2592
2593/**
2594 * Calculates a host timer frequency that would be suitable for the current
2595 * timer load.
2596 *
2597 * This will take the highest timer frequency, adjust for catch-up and warp
2598 * driver, and finally add a little fudge factor. The caller (VMM) will use
2599 * the result to adjust the per-cpu preemption timer.
2600 *
2601 * @returns The highest frequency. 0 if no important timers around.
2602 * @param pVM The cross context VM structure.
2603 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2604 */
2605VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2606{
2607 uint32_t uHz = tmGetFrequencyHint(pVM);
2608
2609 /* Catch up, we have to be more aggressive than the % indicates at the
2610 beginning of the effort. */
2611 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2612 {
2613 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2614 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2615 {
2616 if (u32Pct <= 100)
2617 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2618 else if (u32Pct <= 200)
2619 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2620 else if (u32Pct <= 400)
2621 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2622 uHz *= u32Pct + 100;
2623 uHz /= 100;
2624 }
2625 }
2626
2627 /* Warp drive. */
2628 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2629 {
2630 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2631 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2632 {
2633 uHz *= u32Pct;
2634 uHz /= 100;
2635 }
2636 }
2637
2638 /* Fudge factor. */
2639 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2640 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2641 else
2642 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2643 uHz /= 100;
2644
2645 /* Make sure it isn't too high. */
2646 if (uHz > pVM->tm.s.cHostHzMax)
2647 uHz = pVM->tm.s.cHostHzMax;
2648
2649 return uHz;
2650}
2651
2652
2653/**
2654 * Whether the guest virtual clock is ticking.
2655 *
2656 * @returns true if ticking, false otherwise.
2657 * @param pVM The cross context VM structure.
2658 */
2659VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2660{
2661 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2662}
2663
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette