VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c

Last change on this file was 108895, checked in by vboxsync, 4 weeks ago

iprt: Linux: Add initial support for kernel 6.15 (fix typo), bugref:10891.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 59.8 KB
Line 
1/* $Id: timer-r0drv-linux.c 108895 2025-04-08 15:41:17Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-linux-kernel.h"
42#include "internal/iprt.h"
43
44#include <iprt/timer.h>
45#include <iprt/time.h>
46#include <iprt/mp.h>
47#include <iprt/cpuset.h>
48#include <iprt/spinlock.h>
49#include <iprt/err.h>
50#include <iprt/asm.h>
51#include <iprt/assert.h>
52#include <iprt/alloc.h>
53
54#include "internal/magics.h"
55
56/** @def RTTIMER_LINUX_WITH_HRTIMER
57 * Whether to use high resolution timers. */
58#if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
59 && defined(IPRT_LINUX_HAS_HRTIMER)
60# define RTTIMER_LINUX_WITH_HRTIMER
61#endif
62
63#if RTLNX_VER_MAX(2,6,31)
64# define mod_timer_pinned mod_timer
65# define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
66#endif
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72/**
73 * Timer state machine.
74 *
75 * This is used to try handle the issues with MP events and
76 * timers that runs on all CPUs. It's relatively nasty :-/
77 */
78typedef enum RTTIMERLNXSTATE
79{
80 /** Stopped. */
81 RTTIMERLNXSTATE_STOPPED = 0,
82 /** Transient state; next ACTIVE. */
83 RTTIMERLNXSTATE_STARTING,
84 /** Transient state; next ACTIVE. (not really necessary) */
85 RTTIMERLNXSTATE_MP_STARTING,
86 /** Active. */
87 RTTIMERLNXSTATE_ACTIVE,
88 /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
89 RTTIMERLNXSTATE_CALLBACK,
90 /** Stopped while in the callback; next STOPPED. */
91 RTTIMERLNXSTATE_CB_STOPPING,
92 /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
93 RTTIMERLNXSTATE_CB_RESTARTING,
94 /** The callback shall destroy the timer; next STOPPED. */
95 RTTIMERLNXSTATE_CB_DESTROYING,
96 /** Transient state; next STOPPED. */
97 RTTIMERLNXSTATE_STOPPING,
98 /** Transient state; next STOPPED. */
99 RTTIMERLNXSTATE_MP_STOPPING,
100 /** The usual 32-bit hack. */
101 RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
102} RTTIMERLNXSTATE;
103
104
105/**
106 * A Linux sub-timer.
107 */
108typedef struct RTTIMERLNXSUBTIMER
109{
110 /** Timer specific data. */
111 union
112 {
113#if defined(RTTIMER_LINUX_WITH_HRTIMER)
114 /** High resolution timer. */
115 struct
116 {
117 /** The linux timer structure. */
118 struct hrtimer LnxTimer;
119 } Hr;
120#endif
121 /** Standard timer. */
122 struct
123 {
124 /** The linux timer structure. */
125 struct timer_list LnxTimer;
126 /** The start of the current run (ns).
127 * This is used to calculate when the timer ought to fire the next time. */
128 uint64_t u64NextTS;
129 /** When the timer was started. */
130 uint64_t nsStartTS;
131 /** The u64NextTS in jiffies. */
132 unsigned long ulNextJiffies;
133 /** Set when starting or changing the timer so that u64StartTs
134 * and u64NextTS gets reinitialized (eliminating some jitter). */
135 bool volatile fFirstAfterChg;
136 } Std;
137 } u;
138 /** The current tick number. */
139 uint64_t iTick;
140 /** Restart the single shot timer at this specific time.
141 * Used when a single shot timer is restarted from the callback. */
142 uint64_t volatile uNsRestartAt;
143 /** Pointer to the parent timer. */
144 PRTTIMER pParent;
145 /** The current sub-timer state. */
146 RTTIMERLNXSTATE volatile enmState;
147} RTTIMERLNXSUBTIMER;
148/** Pointer to a linux sub-timer. */
149typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
150
151
152/**
153 * The internal representation of an Linux timer handle.
154 */
155typedef struct RTTIMER
156{
157 /** Magic.
158 * This is RTTIMER_MAGIC, but changes to something else before the timer
159 * is destroyed to indicate clearly that thread should exit. */
160 uint32_t volatile u32Magic;
161 /** Spinlock synchronizing the fSuspended and MP event handling.
162 * This is NIL_RTSPINLOCK if cCpus == 1. */
163 RTSPINLOCK hSpinlock;
164 /** Flag indicating that the timer is suspended. */
165 bool volatile fSuspended;
166 /** Whether the timer must run on one specific CPU or not. */
167 bool fSpecificCpu;
168#ifdef CONFIG_SMP
169 /** Whether the timer must run on all CPUs or not. */
170 bool fAllCpus;
171#endif /* else: All -> specific on non-SMP kernels */
172 /** Whether it is a high resolution timer or a standard one. */
173 bool fHighRes;
174 /** The id of the CPU it must run on if fSpecificCpu is set. */
175 RTCPUID idCpu;
176 /** The number of CPUs this timer should run on. */
177 RTCPUID cCpus;
178 /** Callback. */
179 PFNRTTIMER pfnTimer;
180 /** User argument. */
181 void *pvUser;
182 /** The timer interval. 0 if one-shot. */
183 uint64_t volatile u64NanoInterval;
184 /** This is set to the number of jiffies between ticks if the interval is
185 * an exact number of jiffies. (Standard timers only.) */
186 unsigned long volatile cJiffies;
187 /** The change interval spinlock for standard timers only. */
188 spinlock_t ChgIntLock;
189 /** Workqueue item for delayed destruction. */
190 RTR0LNXWORKQUEUEITEM DtorWorkqueueItem;
191 /** Sub-timers.
192 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
193 * an entry for all possible cpus. In that case the index will be the same as
194 * for the RTCpuSet. */
195 RTTIMERLNXSUBTIMER aSubTimers[1];
196} RTTIMER;
197
198
199/**
200 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
201 */
202typedef struct RTTIMERLINUXSTARTONCPUARGS
203{
204 /** The current time (RTTimeSystemNanoTS). */
205 uint64_t u64Now;
206 /** When to start firing (delta). */
207 uint64_t u64First;
208} RTTIMERLINUXSTARTONCPUARGS;
209/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
210typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
211
212
213/*********************************************************************************************************************************
214* Internal Functions *
215*********************************************************************************************************************************/
216#ifdef CONFIG_SMP
217static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
218#endif
219
220#if 0
221#define DEBUG_HACKING
222#include <iprt/string.h>
223#include <iprt/asm-amd64-x86.h>
224static void myLogBackdoorPrintf(const char *pszFormat, ...)
225{
226 char szTmp[256];
227 va_list args;
228 size_t cb;
229
230 cb = RTStrPrintf(szTmp, sizeof(szTmp) - 10, "%d: ", RTMpCpuId());
231 va_start(args, pszFormat);
232 cb += RTStrPrintfV(&szTmp[cb], sizeof(szTmp) - cb, pszFormat, args);
233 va_end(args);
234
235 ASMOutStrU8(0x504, (uint8_t *)&szTmp[0], cb);
236}
237# define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
238 myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
239# define RTAssertMsg2Weak myLogBackdoorPrintf
240# define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
241#else
242# define RTTIMERLNX_LOG(a) do { } while (0)
243#endif
244
245/**
246 * Sets the state.
247 */
248DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
249{
250#ifdef DEBUG_HACKING
251 RTTIMERLNX_LOG(("set %d -> %d\n", *penmState, enmNewState));
252#endif
253 ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
254}
255
256
257/**
258 * Sets the state if it has a certain value.
259 *
260 * @return true if xchg was done.
261 * @return false if xchg wasn't done.
262 */
263#ifdef DEBUG_HACKING
264#define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
265static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
266 RTTIMERLNXSTATE enmCurState, uint32_t uLine)
267{
268 RTTIMERLNXSTATE enmOldState = enmCurState;
269 bool fRc = ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState, enmNewState, enmCurState, (uint32_t *)&enmOldState);
270 RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState, enmNewState, fRc, uLine));
271 return fRc;
272}
273#else
274DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
275 RTTIMERLNXSTATE enmCurState)
276{
277 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
278}
279#endif
280
281
282/**
283 * Gets the state.
284 */
285DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
286{
287 return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
288}
289
290#ifdef RTTIMER_LINUX_WITH_HRTIMER
291
292/**
293 * Converts a nano second time stamp to ktime_t.
294 *
295 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
296 *
297 * @returns ktime_t.
298 * @param cNanoSecs Nanoseconds.
299 */
300DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
301{
302 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
303 return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
304}
305
306/**
307 * Converts ktime_t to a nano second time stamp.
308 *
309 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
310 *
311 * @returns nano second time stamp.
312 * @param Kt ktime_t.
313 */
314DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
315{
316 return ktime_to_ns(Kt);
317}
318
319#endif /* RTTIMER_LINUX_WITH_HRTIMER */
320
321/**
322 * Converts a nano second interval to jiffies.
323 *
324 * @returns Jiffies.
325 * @param cNanoSecs Nanoseconds.
326 */
327DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
328{
329 /* this can be made even better... */
330 if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
331 return MAX_JIFFY_OFFSET;
332# if ARCH_BITS == 32
333 if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
334 return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
335# endif
336 return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
337}
338
339
340/**
341 * Starts a sub-timer (RTTimerStart).
342 *
343 * @param pSubTimer The sub-timer to start.
344 * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
345 * @param u64First The interval from u64Now to the first time the timer should fire.
346 * @param fPinned true = timer pinned to a specific CPU,
347 * false = timer can migrate between CPUs
348 * @param fHighRes Whether the user requested a high resolution timer or not.
349 * @param enmOldState The old timer state.
350 */
351static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First,
352 bool fPinned, bool fHighRes)
353{
354 /*
355 * Calc when it should start firing.
356 */
357 uint64_t u64NextTS = u64Now + u64First;
358 if (!fHighRes)
359 {
360 pSubTimer->u.Std.u64NextTS = u64NextTS;
361 pSubTimer->u.Std.nsStartTS = u64NextTS;
362 }
363 RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer->pParent));
364
365 pSubTimer->iTick = 0;
366
367#ifdef RTTIMER_LINUX_WITH_HRTIMER
368 if (fHighRes)
369 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(u64NextTS),
370 fPinned ? HRTIMER_MODE_ABS_PINNED : HRTIMER_MODE_ABS);
371 else
372#endif
373 {
374 unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
375 pSubTimer->u.Std.ulNextJiffies = jiffies + cJiffies;
376 pSubTimer->u.Std.fFirstAfterChg = true;
377#ifdef CONFIG_SMP
378 if (fPinned)
379 {
380# if RTLNX_VER_MIN(4,8,0)
381 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
382# else
383 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
384# endif
385 }
386 else
387#endif
388 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
389 }
390
391 /* Be a bit careful here since we could be racing the callback. */
392 if (!rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_STARTING))
393 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_MP_STARTING);
394}
395
396
397/**
398 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
399 *
400 * The caller has already changed the state, so we will not be in a callback
401 * situation wrt to the calling thread.
402 *
403 * @param pSubTimer The sub-timer.
404 * @param fHighRes Whether the user requested a high resolution timer or not.
405 */
406static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, bool fHighRes)
407{
408 RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer->pParent, fHighRes));
409#ifdef RTTIMER_LINUX_WITH_HRTIMER
410 if (fHighRes)
411 {
412 /* There is no equivalent to del_timer in the hrtimer API,
413 hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
414 del_timer_sync() asserts, waiting for a timer callback to complete
415 is deadlock prone, so don't do it. */
416 int rc = hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
417 if (rc < 0)
418 {
419 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, ktime_set(KTIME_SEC_MAX, 0), HRTIMER_MODE_ABS);
420 hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
421 }
422 }
423 else
424#endif
425# if RTLNX_VER_MIN(6,15,0)
426 timer_delete(&pSubTimer->u.Std.LnxTimer);
427#else
428 del_timer(&pSubTimer->u.Std.LnxTimer);
429#endif
430
431 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
432}
433
434
435/**
436 * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
437 *
438 * @param pTimer The timer in question.
439 */
440static void rtTimerLnxDestroyIt(PRTTIMER pTimer)
441{
442 RTSPINLOCK hSpinlock = pTimer->hSpinlock;
443 RTCPUID iCpu;
444 Assert(pTimer->fSuspended);
445 RTTIMERLNX_LOG(("destroyit %p\n", pTimer));
446
447 /*
448 * Remove the MP notifications first because it'll reduce the risk of
449 * us overtaking any MP event that might theoretically be racing us here.
450 */
451#ifdef CONFIG_SMP
452 if ( pTimer->cCpus > 1
453 && hSpinlock != NIL_RTSPINLOCK)
454 {
455 int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
456 AssertRC(rc);
457 }
458#endif /* CONFIG_SMP */
459
460 /*
461 * Invalidate the handle.
462 */
463 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
464
465 /*
466 * Make sure all timers have stopped executing since we're stopping them in
467 * an asynchronous manner up in rtTimerLnxStopSubTimer.
468 */
469 iCpu = pTimer->cCpus;
470 while (iCpu-- > 0)
471 {
472#ifdef RTTIMER_LINUX_WITH_HRTIMER
473 if (pTimer->fHighRes)
474 hrtimer_cancel(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer);
475 else
476#endif
477# if RTLNX_VER_MIN(6,15,0)
478 timer_delete_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
479#else
480 del_timer_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
481#endif
482 }
483
484 /*
485 * Finally, free the resources.
486 */
487 RTMemFreeEx(pTimer, RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[pTimer->cCpus]));
488 if (hSpinlock != NIL_RTSPINLOCK)
489 RTSpinlockDestroy(hSpinlock);
490}
491
492
493/**
494 * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
495 *
496 * @param pWork Pointer to the DtorWorkqueueItem member of our timer
497 * structure.
498 */
499static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM *pWork)
500{
501 PRTTIMER pTimer = RT_FROM_MEMBER(pWork, RTTIMER, DtorWorkqueueItem);
502 rtTimerLnxDestroyIt(pTimer);
503}
504
505
506/**
507 * Called when the timer was destroyed by the callback function.
508 *
509 * @param pTimer The timer.
510 * @param pSubTimer The sub-timer which we're handling, the state of this
511 * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
512 */
513static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
514{
515 /*
516 * If it's an omni timer, the last dude does the destroying.
517 */
518 if (pTimer->cCpus > 1)
519 {
520 uint32_t iCpu = pTimer->cCpus;
521 RTSpinlockAcquire(pTimer->hSpinlock);
522
523 Assert(pSubTimer->enmState == RTTIMERLNXSTATE_CB_DESTROYING);
524 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
525
526 while (iCpu-- > 0)
527 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
528 {
529 RTSpinlockRelease(pTimer->hSpinlock);
530 return;
531 }
532
533 RTSpinlockRelease(pTimer->hSpinlock);
534 }
535
536 /*
537 * Destroying a timer from the callback is unsafe since the callout code
538 * might be touching the timer structure upon return (hrtimer does!). So,
539 * we have to defer the actual destruction to the IRPT workqueue.
540 */
541 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
542}
543
544
545#ifdef CONFIG_SMP
546/**
547 * Deal with a sub-timer that has migrated.
548 *
549 * @param pTimer The timer.
550 * @param pSubTimer The sub-timer.
551 */
552static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
553{
554 RTTIMERLNXSTATE enmState;
555 if (pTimer->cCpus > 1)
556 RTSpinlockAcquire(pTimer->hSpinlock);
557
558 do
559 {
560 enmState = rtTimerLnxGetState(&pSubTimer->enmState);
561 switch (enmState)
562 {
563 case RTTIMERLNXSTATE_STOPPING:
564 case RTTIMERLNXSTATE_MP_STOPPING:
565 enmState = RTTIMERLNXSTATE_STOPPED;
566 RT_FALL_THRU();
567 case RTTIMERLNXSTATE_STOPPED:
568 break;
569
570 default:
571 AssertMsgFailed(("%d\n", enmState));
572 RT_FALL_THRU();
573 case RTTIMERLNXSTATE_STARTING:
574 case RTTIMERLNXSTATE_MP_STARTING:
575 case RTTIMERLNXSTATE_ACTIVE:
576 case RTTIMERLNXSTATE_CALLBACK:
577 case RTTIMERLNXSTATE_CB_STOPPING:
578 case RTTIMERLNXSTATE_CB_RESTARTING:
579 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, enmState))
580 enmState = RTTIMERLNXSTATE_STOPPED;
581 break;
582
583 case RTTIMERLNXSTATE_CB_DESTROYING:
584 {
585 if (pTimer->cCpus > 1)
586 RTSpinlockRelease(pTimer->hSpinlock);
587
588 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
589 return;
590 }
591 }
592 } while (enmState != RTTIMERLNXSTATE_STOPPED);
593
594 if (pTimer->cCpus > 1)
595 RTSpinlockRelease(pTimer->hSpinlock);
596}
597#endif /* CONFIG_SMP */
598
599
600/**
601 * The slow path of rtTimerLnxChangeToCallbackState.
602 *
603 * @returns true if changed successfully, false if not.
604 * @param pSubTimer The sub-timer.
605 */
606static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
607{
608 for (;;)
609 {
610 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
611 switch (enmState)
612 {
613 case RTTIMERLNXSTATE_ACTIVE:
614 case RTTIMERLNXSTATE_STARTING:
615 case RTTIMERLNXSTATE_MP_STARTING:
616 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, enmState))
617 return true;
618 break;
619
620 case RTTIMERLNXSTATE_CALLBACK:
621 case RTTIMERLNXSTATE_CB_STOPPING:
622 case RTTIMERLNXSTATE_CB_RESTARTING:
623 case RTTIMERLNXSTATE_CB_DESTROYING:
624 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
625 default:
626 return false;
627 }
628 ASMNopPause();
629 }
630}
631
632
633/**
634 * Tries to change the sub-timer state to 'callback'.
635 *
636 * @returns true if changed successfully, false if not.
637 * @param pSubTimer The sub-timer.
638 */
639DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer)
640{
641 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, RTTIMERLNXSTATE_ACTIVE)))
642 return true;
643 return rtTimerLnxChangeToCallbackStateSlow(pSubTimer);
644}
645
646
647#ifdef RTTIMER_LINUX_WITH_HRTIMER
648/**
649 * Timer callback function for high resolution timers.
650 *
651 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
652 * one-shot or interval timer.
653 * @param pHrTimer Pointer to the sub-timer structure.
654 */
655static enum hrtimer_restart rtTimerLinuxHrCallback(struct hrtimer *pHrTimer)
656{
657 PRTTIMERLNXSUBTIMER pSubTimer = RT_FROM_MEMBER(pHrTimer, RTTIMERLNXSUBTIMER, u.Hr.LnxTimer);
658 PRTTIMER pTimer = pSubTimer->pParent;
659
660
661 RTTIMERLNX_LOG(("hrcallback %p\n", pTimer));
662 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
663 return HRTIMER_NORESTART;
664
665#ifdef CONFIG_SMP
666 /*
667 * Check for unwanted migration.
668 */
669 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
670 {
671 RTCPUID idCpu = RTMpCpuId();
672 if (RT_UNLIKELY( pTimer->fAllCpus
673 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
674 : pTimer->idCpu != idCpu))
675 {
676 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
677 return HRTIMER_NORESTART;
678 }
679 }
680#endif
681
682 if (pTimer->u64NanoInterval)
683 {
684 /*
685 * Periodic timer, run it and update the native timer afterwards so
686 * we can handle RTTimerStop and RTTimerChangeInterval from the
687 * callback as well as a racing control thread.
688 */
689 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
690 hrtimer_add_expires_ns(&pSubTimer->u.Hr.LnxTimer, ASMAtomicReadU64(&pTimer->u64NanoInterval));
691 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
692 return HRTIMER_RESTART;
693 }
694 else
695 {
696 /*
697 * One shot timer (no omni), stop it before dispatching it.
698 * Allow RTTimerStart as well as RTTimerDestroy to be called from
699 * the callback.
700 */
701 ASMAtomicWriteBool(&pTimer->fSuspended, true);
702 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
703 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
704 return HRTIMER_NORESTART;
705 }
706
707 /*
708 * Some state change occurred while we were in the callback routine.
709 */
710 for (;;)
711 {
712 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
713 switch (enmState)
714 {
715 case RTTIMERLNXSTATE_CB_DESTROYING:
716 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
717 return HRTIMER_NORESTART;
718
719 case RTTIMERLNXSTATE_CB_STOPPING:
720 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
721 return HRTIMER_NORESTART;
722 break;
723
724 case RTTIMERLNXSTATE_CB_RESTARTING:
725 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
726 {
727 pSubTimer->iTick = 0;
728 hrtimer_set_expires(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(pSubTimer->uNsRestartAt));
729 return HRTIMER_RESTART;
730 }
731 break;
732
733 default:
734 AssertMsgFailed(("%d\n", enmState));
735 return HRTIMER_NORESTART;
736 }
737 ASMNopPause();
738 }
739}
740#endif /* RTTIMER_LINUX_WITH_HRTIMER */
741
742
743#if RTLNX_VER_MIN(4,15,0)
744/**
745 * Timer callback function for standard timers.
746 *
747 * @param pLnxTimer Pointer to the Linux timer structure.
748 */
749static void rtTimerLinuxStdCallback(struct timer_list *pLnxTimer)
750{
751 PRTTIMERLNXSUBTIMER pSubTimer = from_timer(pSubTimer, pLnxTimer, u.Std.LnxTimer);
752#else
753/**
754 * Timer callback function for standard timers.
755 *
756 * @param ulUser Address of the sub-timer structure.
757 */
758static void rtTimerLinuxStdCallback(unsigned long ulUser)
759{
760 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
761#endif
762 PRTTIMER pTimer = pSubTimer->pParent;
763
764 RTTIMERLNX_LOG(("stdcallback %p\n", pTimer));
765 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
766 return;
767
768#ifdef CONFIG_SMP
769 /*
770 * Check for unwanted migration.
771 */
772 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
773 {
774 RTCPUID idCpu = RTMpCpuId();
775 if (RT_UNLIKELY( pTimer->fAllCpus
776 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
777 : pTimer->idCpu != idCpu))
778 {
779 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
780 return;
781 }
782 }
783#endif
784
785 if (pTimer->u64NanoInterval)
786 {
787 /*
788 * Interval timer, calculate the next timeout.
789 *
790 * The first time around, we'll re-adjust the u.Std.u64NextTS to
791 * try prevent some jittering if we were started at a bad time.
792 */
793 const uint64_t iTick = ++pSubTimer->iTick;
794 unsigned long uCurJiffies = jiffies;
795 unsigned long ulNextJiffies;
796 uint64_t u64NanoInterval;
797 unsigned long cJiffies;
798 unsigned long flFlags;
799
800 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
801 u64NanoInterval = pTimer->u64NanoInterval;
802 cJiffies = pTimer->cJiffies;
803 if (RT_UNLIKELY(pSubTimer->u.Std.fFirstAfterChg))
804 {
805 pSubTimer->u.Std.fFirstAfterChg = false;
806 pSubTimer->u.Std.u64NextTS = RTTimeSystemNanoTS();
807 pSubTimer->u.Std.nsStartTS = pSubTimer->u.Std.u64NextTS - u64NanoInterval * (iTick - 1);
808 pSubTimer->u.Std.ulNextJiffies = uCurJiffies = jiffies;
809 }
810 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
811
812 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
813 if (cJiffies)
814 {
815 ulNextJiffies = pSubTimer->u.Std.ulNextJiffies + cJiffies;
816 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
817 if (time_after_eq(ulNextJiffies, uCurJiffies))
818 { /* likely */ }
819 else
820 {
821 unsigned long cJiffiesBehind = uCurJiffies - ulNextJiffies;
822 ulNextJiffies = uCurJiffies + cJiffies / 2;
823 if (cJiffiesBehind >= HZ / 4) /* Conside if we're lagging too far behind. Screw the u64NextTS member. */
824 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
825 /*else: Don't update u.Std.ulNextJiffies so we can continue catching up in the next tick. */
826 }
827 }
828 else
829 {
830 const uint64_t u64NanoTS = RTTimeSystemNanoTS();
831 const int64_t cNsBehind = u64NanoTS - pSubTimer->u.Std.u64NextTS;
832 if (cNsBehind <= 0)
833 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(pSubTimer->u.Std.u64NextTS - u64NanoTS);
834 else if (u64NanoInterval >= RT_NS_1SEC_64 * 2 / HZ)
835 {
836 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(u64NanoInterval / 2);
837 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
838 pSubTimer->u.Std.u64NextTS = u64NanoTS + u64NanoInterval / 2;
839 }
840 else
841 {
842 ulNextJiffies = uCurJiffies + 1;
843 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
844 pSubTimer->u.Std.u64NextTS = u64NanoTS + RT_NS_1SEC_64 / HZ;
845 }
846 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
847 }
848
849 /*
850 * Run the timer and re-arm it unless the state changed .
851 * .
852 * We must re-arm it afterwards as we're not in a position to undo this .
853 * operation if for instance someone stopped or destroyed us while we .
854 * were in the callback. (Linux takes care of any races here.)
855 */
856 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
857 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
858 {
859#ifdef CONFIG_SMP
860 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
861 {
862# if RTLNX_VER_MIN(4,8,0)
863 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
864# else
865 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
866# endif
867 }
868 else
869#endif
870 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
871 return;
872 }
873 }
874 else
875 {
876 /*
877 * One shot timer, stop it before dispatching it.
878 * Allow RTTimerStart as well as RTTimerDestroy to be called from
879 * the callback.
880 */
881 ASMAtomicWriteBool(&pTimer->fSuspended, true);
882 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
883 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
884 return;
885 }
886
887 /*
888 * Some state change occurred while we were in the callback routine.
889 */
890 for (;;)
891 {
892 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
893 switch (enmState)
894 {
895 case RTTIMERLNXSTATE_CB_DESTROYING:
896 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
897 return;
898
899 case RTTIMERLNXSTATE_CB_STOPPING:
900 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
901 return;
902 break;
903
904 case RTTIMERLNXSTATE_CB_RESTARTING:
905 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
906 {
907 uint64_t u64NanoTS;
908 uint64_t u64NextTS;
909 unsigned long flFlags;
910
911 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
912 u64NextTS = pSubTimer->uNsRestartAt;
913 u64NanoTS = RTTimeSystemNanoTS();
914 pSubTimer->iTick = 0;
915 pSubTimer->u.Std.u64NextTS = u64NextTS;
916 pSubTimer->u.Std.fFirstAfterChg = true;
917 pSubTimer->u.Std.ulNextJiffies = u64NextTS > u64NanoTS
918 ? jiffies + rtTimerLnxNanoToJiffies(u64NextTS - u64NanoTS)
919 : jiffies;
920 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
921
922#ifdef CONFIG_SMP
923 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
924 {
925# if RTLNX_VER_MIN(4,8,0)
926 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
927# else
928 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
929# endif
930 }
931 else
932#endif
933 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
934 return;
935 }
936 break;
937
938 default:
939 AssertMsgFailed(("%d\n", enmState));
940 return;
941 }
942 ASMNopPause();
943 }
944}
945
946
947#ifdef CONFIG_SMP
948
949/**
950 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
951 *
952 * @param idCpu The current CPU.
953 * @param pvUser1 Pointer to the timer.
954 * @param pvUser2 Pointer to the argument structure.
955 */
956static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
957{
958 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
959 PRTTIMER pTimer = (PRTTIMER)pvUser1;
960 Assert(idCpu < pTimer->cCpus);
961 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
962}
963
964
965/**
966 * Worker for RTTimerStart() that takes care of the ugly bits.
967 *
968 * @returns RTTimerStart() return value.
969 * @param pTimer The timer.
970 * @param pArgs The argument structure.
971 */
972static int rtTimerLnxOmniStart(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
973{
974 RTCPUID iCpu;
975 RTCPUSET OnlineSet;
976 RTCPUSET OnlineSet2;
977 int rc2;
978
979 /*
980 * Prepare all the sub-timers for the startup and then flag the timer
981 * as a whole as non-suspended, make sure we get them all before
982 * clearing fSuspended as the MP handler will be waiting on this
983 * should something happen while we're looping.
984 */
985 RTSpinlockAcquire(pTimer->hSpinlock);
986
987 /* Just make it a omni timer restriction that no stop/start races are allowed. */
988 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
989 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
990 {
991 RTSpinlockRelease(pTimer->hSpinlock);
992 return VERR_TIMER_BUSY;
993 }
994
995 do
996 {
997 RTMpGetOnlineSet(&OnlineSet);
998 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
999 {
1000 Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
1001 rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
1002 RTCpuSetIsMember(&OnlineSet, iCpu)
1003 ? RTTIMERLNXSTATE_STARTING
1004 : RTTIMERLNXSTATE_STOPPED);
1005 }
1006 } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
1007
1008 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1009
1010 RTSpinlockRelease(pTimer->hSpinlock);
1011
1012 /*
1013 * Start them (can't find any exported function that allows me to
1014 * do this without the cross calls).
1015 */
1016 pArgs->u64Now = RTTimeSystemNanoTS();
1017 rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
1018 AssertRC(rc2); /* screw this if it fails. */
1019
1020 /*
1021 * Reset the sub-timers who didn't start up (ALL CPUs case).
1022 */
1023 RTSpinlockAcquire(pTimer->hSpinlock);
1024
1025 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1026 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
1027 {
1028 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
1029 * we were between calls needs to nudged as the MP handler will ignore events for
1030 * them because of the STARTING state. This is an extremely unlikely case - not that
1031 * that means anything in my experience... ;-) */
1032 RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu));
1033 }
1034
1035 RTSpinlockRelease(pTimer->hSpinlock);
1036
1037 return VINF_SUCCESS;
1038}
1039
1040
1041/**
1042 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
1043 *
1044 * @returns true if there was any active callbacks, false if not.
1045 * @param pTimer The timer (valid).
1046 * @param fForDestroy Whether this is for RTTimerDestroy or not.
1047 */
1048static bool rtTimerLnxOmniStop(PRTTIMER pTimer, bool fForDestroy)
1049{
1050 bool fActiveCallbacks = false;
1051 RTCPUID iCpu;
1052 RTTIMERLNXSTATE enmState;
1053
1054
1055 /*
1056 * Mark the timer as suspended and flag all timers as stopping, except
1057 * for those being stopped by an MP event.
1058 */
1059 RTSpinlockAcquire(pTimer->hSpinlock);
1060
1061 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1062 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1063 {
1064 for (;;)
1065 {
1066 enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1067 if ( enmState == RTTIMERLNXSTATE_STOPPED
1068 || enmState == RTTIMERLNXSTATE_MP_STOPPING)
1069 break;
1070 if ( enmState == RTTIMERLNXSTATE_CALLBACK
1071 || enmState == RTTIMERLNXSTATE_CB_STOPPING
1072 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1073 {
1074 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1075 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState,
1076 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1077 enmState))
1078 {
1079 fActiveCallbacks = true;
1080 break;
1081 }
1082 }
1083 else
1084 {
1085 Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
1086 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState))
1087 break;
1088 }
1089 ASMNopPause();
1090 }
1091 }
1092
1093 RTSpinlockRelease(pTimer->hSpinlock);
1094
1095 /*
1096 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
1097 * Unfortunately it cannot be done synchronously.
1098 */
1099 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1100 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
1101 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu], pTimer->fHighRes);
1102
1103 return fActiveCallbacks;
1104}
1105
1106
1107/**
1108 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
1109 * to start a sub-timer on a cpu that just have come online.
1110 *
1111 * @param idCpu The current CPU.
1112 * @param pvUser1 Pointer to the timer.
1113 * @param pvUser2 Pointer to the argument structure.
1114 */
1115static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1116{
1117 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1118 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1119 RTSPINLOCK hSpinlock;
1120 Assert(idCpu < pTimer->cCpus);
1121
1122 /*
1123 * We have to be kind of careful here as we might be racing RTTimerStop
1124 * (and/or RTTimerDestroy, thus the paranoia.
1125 */
1126 hSpinlock = pTimer->hSpinlock;
1127 if ( hSpinlock != NIL_RTSPINLOCK
1128 && pTimer->u32Magic == RTTIMER_MAGIC)
1129 {
1130 RTSpinlockAcquire(hSpinlock);
1131
1132 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1133 && pTimer->u32Magic == RTTIMER_MAGIC)
1134 {
1135 /* We're sane and the timer is not suspended yet. */
1136 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1137 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1138 rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1139 }
1140
1141 RTSpinlockRelease(hSpinlock);
1142 }
1143}
1144
1145
1146/**
1147 * MP event notification callback.
1148 *
1149 * @param enmEvent The event.
1150 * @param idCpu The cpu it applies to.
1151 * @param pvUser The timer.
1152 */
1153static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
1154{
1155 PRTTIMER pTimer = (PRTTIMER)pvUser;
1156 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1157 RTSPINLOCK hSpinlock;
1158
1159 Assert(idCpu < pTimer->cCpus);
1160
1161 /*
1162 * Some initial paranoia.
1163 */
1164 if (pTimer->u32Magic != RTTIMER_MAGIC)
1165 return;
1166 hSpinlock = pTimer->hSpinlock;
1167 if (hSpinlock == NIL_RTSPINLOCK)
1168 return;
1169
1170 RTSpinlockAcquire(hSpinlock);
1171
1172 /* Is it active? */
1173 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1174 && pTimer->u32Magic == RTTIMER_MAGIC)
1175 {
1176 switch (enmEvent)
1177 {
1178 /*
1179 * Try do it without leaving the spin lock, but if we have to, retake it
1180 * when we're on the right cpu.
1181 */
1182 case RTMPEVENT_ONLINE:
1183 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1184 {
1185 RTTIMERLINUXSTARTONCPUARGS Args;
1186 Args.u64Now = RTTimeSystemNanoTS();
1187 Args.u64First = 0;
1188
1189 if (RTMpCpuId() == idCpu)
1190 rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First, true /*fPinned*/, pTimer->fHighRes);
1191 else
1192 {
1193 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
1194 RTSpinlockRelease(hSpinlock);
1195
1196 RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
1197 return; /* we've left the spinlock */
1198 }
1199 }
1200 break;
1201
1202 /*
1203 * The CPU is (going) offline, make sure the sub-timer is stopped.
1204 *
1205 * Linux will migrate it to a different CPU, but we don't want this. The
1206 * timer function is checking for this.
1207 */
1208 case RTMPEVENT_OFFLINE:
1209 {
1210 RTTIMERLNXSTATE enmState;
1211 while ( (enmState = rtTimerLnxGetState(&pSubTimer->enmState)) == RTTIMERLNXSTATE_ACTIVE
1212 || enmState == RTTIMERLNXSTATE_CALLBACK
1213 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1214 {
1215 if (enmState == RTTIMERLNXSTATE_ACTIVE)
1216 {
1217 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1218 {
1219 RTSpinlockRelease(hSpinlock);
1220
1221 rtTimerLnxStopSubTimer(pSubTimer, pTimer->fHighRes);
1222 return; /* we've left the spinlock */
1223 }
1224 }
1225 else if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CB_STOPPING, enmState))
1226 break;
1227
1228 /* State not stable, try again. */
1229 ASMNopPause();
1230 }
1231 break;
1232 }
1233 }
1234 }
1235
1236 RTSpinlockRelease(hSpinlock);
1237}
1238
1239#endif /* CONFIG_SMP */
1240
1241
1242/**
1243 * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
1244 * running on a specific CPU.
1245 *
1246 * @param idCpu The current CPU.
1247 * @param pvUser1 Pointer to the timer.
1248 * @param pvUser2 Pointer to the argument structure.
1249 */
1250static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1251{
1252 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1253 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1254 RT_NOREF_PV(idCpu);
1255 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1256}
1257
1258
1259RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
1260{
1261 RTTIMERLINUXSTARTONCPUARGS Args;
1262 int rc2;
1263 IPRT_LINUX_SAVE_EFL_AC();
1264
1265 /*
1266 * Validate.
1267 */
1268 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1269 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1270
1271 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1272 return VERR_TIMER_ACTIVE;
1273 RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer, pTimer->cCpus));
1274
1275 Args.u64First = u64First;
1276#ifdef CONFIG_SMP
1277 /*
1278 * Omni timer?
1279 */
1280 if (pTimer->fAllCpus)
1281 {
1282 rc2 = rtTimerLnxOmniStart(pTimer, &Args);
1283 IPRT_LINUX_RESTORE_EFL_AC();
1284 return rc2;
1285 }
1286#endif
1287
1288 /*
1289 * Simple timer - Pretty straight forward if it wasn't for restarting.
1290 */
1291 Args.u64Now = RTTimeSystemNanoTS();
1292 ASMAtomicWriteU64(&pTimer->aSubTimers[0].uNsRestartAt, Args.u64Now + u64First);
1293 for (;;)
1294 {
1295 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1296 switch (enmState)
1297 {
1298 case RTTIMERLNXSTATE_STOPPED:
1299 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING, RTTIMERLNXSTATE_STOPPED))
1300 {
1301 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1302 if (!pTimer->fSpecificCpu)
1303 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First,
1304 false /*fPinned*/, pTimer->fHighRes);
1305 else
1306 {
1307 rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
1308 if (RT_FAILURE(rc2))
1309 {
1310 /* Suspend it, the cpu id is probably invalid or offline. */
1311 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1312 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
1313 return rc2;
1314 }
1315 }
1316 IPRT_LINUX_RESTORE_EFL_AC();
1317 return VINF_SUCCESS;
1318 }
1319 break;
1320
1321 case RTTIMERLNXSTATE_CALLBACK:
1322 case RTTIMERLNXSTATE_CB_STOPPING:
1323 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_CB_RESTARTING, enmState))
1324 {
1325 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1326 IPRT_LINUX_RESTORE_EFL_AC();
1327 return VINF_SUCCESS;
1328 }
1329 break;
1330
1331 default:
1332 AssertMsgFailed(("%d\n", enmState));
1333 IPRT_LINUX_RESTORE_EFL_AC();
1334 return VERR_INTERNAL_ERROR_4;
1335 }
1336 ASMNopPause();
1337 }
1338}
1339RT_EXPORT_SYMBOL(RTTimerStart);
1340
1341
1342/**
1343 * Common worker for RTTimerStop and RTTimerDestroy.
1344 *
1345 * @returns true if there was any active callbacks, false if not.
1346 * @param pTimer The timer to stop.
1347 * @param fForDestroy Whether it's RTTimerDestroy calling or not.
1348 */
1349static bool rtTimerLnxStop(PRTTIMER pTimer, bool fForDestroy)
1350{
1351 RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer, fForDestroy));
1352#ifdef CONFIG_SMP
1353 /*
1354 * Omni timer?
1355 */
1356 if (pTimer->fAllCpus)
1357 return rtTimerLnxOmniStop(pTimer, fForDestroy);
1358#endif
1359
1360 /*
1361 * Simple timer.
1362 */
1363 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1364 for (;;)
1365 {
1366 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1367 switch (enmState)
1368 {
1369 case RTTIMERLNXSTATE_ACTIVE:
1370 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1371 {
1372 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0], pTimer->fHighRes);
1373 return false;
1374 }
1375 break;
1376
1377 case RTTIMERLNXSTATE_CALLBACK:
1378 case RTTIMERLNXSTATE_CB_RESTARTING:
1379 case RTTIMERLNXSTATE_CB_STOPPING:
1380 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1381 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState,
1382 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1383 enmState))
1384 return true;
1385 break;
1386
1387 case RTTIMERLNXSTATE_STOPPED:
1388 return VINF_SUCCESS;
1389
1390 case RTTIMERLNXSTATE_CB_DESTROYING:
1391 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1392 return true;
1393
1394 default:
1395 case RTTIMERLNXSTATE_STARTING:
1396 case RTTIMERLNXSTATE_MP_STARTING:
1397 case RTTIMERLNXSTATE_STOPPING:
1398 case RTTIMERLNXSTATE_MP_STOPPING:
1399 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1400 return false;
1401 }
1402
1403 /* State not stable, try again. */
1404 ASMNopPause();
1405 }
1406}
1407
1408
1409RTDECL(int) RTTimerStop(PRTTIMER pTimer)
1410{
1411 /*
1412 * Validate.
1413 */
1414 IPRT_LINUX_SAVE_EFL_AC();
1415 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1416 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1417 RTTIMERLNX_LOG(("stop %p\n", pTimer));
1418
1419 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
1420 return VERR_TIMER_SUSPENDED;
1421
1422 rtTimerLnxStop(pTimer, false /*fForDestroy*/);
1423
1424 IPRT_LINUX_RESTORE_EFL_AC();
1425 return VINF_SUCCESS;
1426}
1427RT_EXPORT_SYMBOL(RTTimerStop);
1428
1429
1430RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
1431{
1432 unsigned long cJiffies;
1433 unsigned long flFlags;
1434 IPRT_LINUX_SAVE_EFL_AC();
1435
1436 /*
1437 * Validate.
1438 */
1439 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1440 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1441 AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
1442 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
1443 AssertReturn(pTimer->u64NanoInterval, VERR_INVALID_STATE);
1444 RTTIMERLNX_LOG(("change %p %llu\n", pTimer, u64NanoInterval));
1445
1446#ifdef RTTIMER_LINUX_WITH_HRTIMER
1447 /*
1448 * For the high resolution timers it is easy since we don't care so much
1449 * about when it is applied to the sub-timers.
1450 */
1451 if (pTimer->fHighRes)
1452 {
1453 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1454 IPRT_LINUX_RESTORE_EFL_AC();
1455 return VINF_SUCCESS;
1456 }
1457#endif
1458
1459 /*
1460 * Standard timers have a bit more complicated way of calculating
1461 * their interval and such. So, forget omni timers for now.
1462 */
1463 if (pTimer->cCpus > 1)
1464 return VERR_NOT_SUPPORTED;
1465
1466 cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1467 if (cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1468 cJiffies = 0;
1469
1470 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
1471 pTimer->aSubTimers[0].u.Std.fFirstAfterChg = true;
1472 pTimer->cJiffies = cJiffies;
1473 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1474 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
1475 IPRT_LINUX_RESTORE_EFL_AC();
1476 return VINF_SUCCESS;
1477}
1478RT_EXPORT_SYMBOL(RTTimerChangeInterval);
1479
1480
1481RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
1482{
1483 bool fCanDestroy;
1484 IPRT_LINUX_SAVE_EFL_AC();
1485
1486 /*
1487 * Validate. It's ok to pass NULL pointer.
1488 */
1489 if (pTimer == /*NIL_RTTIMER*/ NULL)
1490 return VINF_SUCCESS;
1491 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1492 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1493 RTTIMERLNX_LOG(("destroy %p\n", pTimer));
1494/** @todo We should invalidate the magic here! */
1495
1496 /*
1497 * Stop the timer if it's still active, then destroy it if we can.
1498 */
1499 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1500 fCanDestroy = rtTimerLnxStop(pTimer, true /*fForDestroy*/);
1501 else
1502 {
1503 uint32_t iCpu = pTimer->cCpus;
1504 if (pTimer->cCpus > 1)
1505 RTSpinlockAcquire(pTimer->hSpinlock);
1506
1507 fCanDestroy = true;
1508 while (iCpu-- > 0)
1509 {
1510 for (;;)
1511 {
1512 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1513 switch (enmState)
1514 {
1515 case RTTIMERLNXSTATE_CALLBACK:
1516 case RTTIMERLNXSTATE_CB_RESTARTING:
1517 case RTTIMERLNXSTATE_CB_STOPPING:
1518 if (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_CB_DESTROYING, enmState))
1519 continue;
1520 fCanDestroy = false;
1521 break;
1522
1523 case RTTIMERLNXSTATE_CB_DESTROYING:
1524 AssertMsgFailed(("%d\n", enmState));
1525 fCanDestroy = false;
1526 break;
1527 default:
1528 break;
1529 }
1530 break;
1531 }
1532 }
1533
1534 if (pTimer->cCpus > 1)
1535 RTSpinlockRelease(pTimer->hSpinlock);
1536 }
1537
1538 if (fCanDestroy)
1539 {
1540 /* For paranoid reasons, defer actually destroying the semaphore when
1541 in atomic or interrupt context. */
1542#if RTLNX_VER_MIN(2,5,32)
1543 if (in_atomic() || in_interrupt())
1544#else
1545 if (in_interrupt())
1546#endif
1547 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
1548 else
1549 rtTimerLnxDestroyIt(pTimer);
1550 }
1551
1552 IPRT_LINUX_RESTORE_EFL_AC();
1553 return VINF_SUCCESS;
1554}
1555RT_EXPORT_SYMBOL(RTTimerDestroy);
1556
1557
1558RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
1559{
1560 PRTTIMER pTimer;
1561 RTCPUID iCpu;
1562 unsigned cCpus;
1563 int rc;
1564 IPRT_LINUX_SAVE_EFL_AC();
1565
1566 rtR0LnxWorkqueueFlush(); /* for 2.4 */
1567 *ppTimer = NULL;
1568
1569 /*
1570 * Validate flags.
1571 */
1572 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
1573 {
1574 IPRT_LINUX_RESTORE_EFL_AC();
1575 return VERR_INVALID_PARAMETER;
1576 }
1577 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
1578 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
1579 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
1580 {
1581 IPRT_LINUX_RESTORE_EFL_AC();
1582 return VERR_CPU_NOT_FOUND;
1583 }
1584
1585 /*
1586 * Allocate the timer handler.
1587 */
1588 cCpus = 1;
1589#ifdef CONFIG_SMP
1590 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
1591 {
1592 cCpus = RTMpGetMaxCpuId() + 1;
1593 Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
1594 AssertReturnStmt(u64NanoInterval, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
1595 }
1596#endif
1597
1598 rc = RTMemAllocEx(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cCpus]), 0,
1599 RTMEMALLOCEX_FLAGS_ZEROED | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE, (void **)&pTimer);
1600 if (RT_FAILURE(rc))
1601 {
1602 IPRT_LINUX_RESTORE_EFL_AC();
1603 return rc;
1604 }
1605
1606 /*
1607 * Initialize it.
1608 */
1609 pTimer->u32Magic = RTTIMER_MAGIC;
1610 pTimer->hSpinlock = NIL_RTSPINLOCK;
1611 pTimer->fSuspended = true;
1612 pTimer->fHighRes = !!(fFlags & RTTIMER_FLAGS_HIGH_RES);
1613#ifdef CONFIG_SMP
1614 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
1615 pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
1616 pTimer->idCpu = pTimer->fSpecificCpu
1617 ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)
1618 : NIL_RTCPUID;
1619#else
1620 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
1621 pTimer->idCpu = RTMpCpuId();
1622#endif
1623 pTimer->cCpus = cCpus;
1624 pTimer->pfnTimer = pfnTimer;
1625 pTimer->pvUser = pvUser;
1626 pTimer->u64NanoInterval = u64NanoInterval;
1627 pTimer->cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1628 if (pTimer->cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1629 pTimer->cJiffies = 0;
1630 spin_lock_init(&pTimer->ChgIntLock);
1631
1632 for (iCpu = 0; iCpu < cCpus; iCpu++)
1633 {
1634#ifdef RTTIMER_LINUX_WITH_HRTIMER
1635 if (pTimer->fHighRes)
1636 {
1637#if RTLNX_VER_MIN(6,15,0)
1638 hrtimer_setup(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer,
1639 rtTimerLinuxHrCallback, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1640#else
1641 hrtimer_init(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1642 pTimer->aSubTimers[iCpu].u.Hr.LnxTimer.function = rtTimerLinuxHrCallback;
1643#endif
1644 }
1645 else
1646#endif
1647 {
1648#if RTLNX_VER_MIN(4,15,0)
1649 timer_setup(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer, rtTimerLinuxStdCallback, TIMER_PINNED);
1650#elif RTLNX_VER_MIN(4,8,0)
1651 init_timer_pinned(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1652#else
1653 init_timer(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1654#endif
1655#if RTLNX_VER_MAX(4,15,0)
1656 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
1657 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.function = rtTimerLinuxStdCallback;
1658#endif
1659 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.expires = jiffies;
1660 pTimer->aSubTimers[iCpu].u.Std.u64NextTS = 0;
1661 }
1662 pTimer->aSubTimers[iCpu].iTick = 0;
1663 pTimer->aSubTimers[iCpu].pParent = pTimer;
1664 pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
1665 }
1666
1667#ifdef CONFIG_SMP
1668 /*
1669 * If this is running on ALL cpus, we'll have to register a callback
1670 * for MP events (so timers can be started/stopped on cpus going
1671 * online/offline). We also create the spinlock for synchronizing
1672 * stop/start/mp-event.
1673 */
1674 if (cCpus > 1)
1675 {
1676 int rc = RTSpinlockCreate(&pTimer->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerLnx");
1677 if (RT_SUCCESS(rc))
1678 rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
1679 else
1680 pTimer->hSpinlock = NIL_RTSPINLOCK;
1681 if (RT_FAILURE(rc))
1682 {
1683 RTTimerDestroy(pTimer);
1684 IPRT_LINUX_RESTORE_EFL_AC();
1685 return rc;
1686 }
1687 }
1688#endif /* CONFIG_SMP */
1689
1690 RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer, pTimer->fHighRes, fFlags, cCpus));
1691 *ppTimer = pTimer;
1692 IPRT_LINUX_RESTORE_EFL_AC();
1693 return VINF_SUCCESS;
1694}
1695RT_EXPORT_SYMBOL(RTTimerCreateEx);
1696
1697
1698RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
1699{
1700#if 0 /** @todo Not sure if this is what we want or not... Add new API for
1701 * querying the resolution of the high res timers? */
1702 struct timespec Ts;
1703 int rc;
1704 IPRT_LINUX_SAVE_EFL_AC();
1705 rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
1706 IPRT_LINUX_RESTORE_EFL_AC();
1707 if (!rc)
1708 {
1709 Assert(!Ts.tv_sec);
1710 return Ts.tv_nsec;
1711 }
1712#endif
1713 /* */
1714#if RTLNX_VER_MAX(4,9,0) || RTLNX_VER_MIN(4,13,0)
1715 /* On 4.9, 4.10 and 4.12 we've observed tstRTR0Timer failures of the omni timer tests
1716 where we get about half of the ticks we want. The failing test is using this value
1717 as interval. So, this is a very very crude hack to try make omni timers work
1718 correctly without actually knowing what's going wrong... */
1719 return RT_NS_1SEC * 2 / HZ; /* ns */
1720#else
1721 return RT_NS_1SEC / HZ; /* ns */
1722#endif
1723}
1724RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity);
1725
1726
1727RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1728{
1729 RT_NOREF_PV(u32Request); RT_NOREF_PV(*pu32Granted);
1730 return VERR_NOT_SUPPORTED;
1731}
1732RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity);
1733
1734
1735RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1736{
1737 RT_NOREF_PV(u32Granted);
1738 return VERR_NOT_SUPPORTED;
1739}
1740RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity);
1741
1742
1743RTDECL(bool) RTTimerCanDoHighResolution(void)
1744{
1745#ifdef RTTIMER_LINUX_WITH_HRTIMER
1746 return true;
1747#else
1748 return false;
1749#endif
1750}
1751RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution);
1752
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette