VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c@ 96763

Last change on this file since 96763 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 59.4 KB
Line 
1/* $Id: timer-r0drv-linux.c 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-linux-kernel.h"
42#include "internal/iprt.h"
43
44#include <iprt/timer.h>
45#include <iprt/time.h>
46#include <iprt/mp.h>
47#include <iprt/cpuset.h>
48#include <iprt/spinlock.h>
49#include <iprt/err.h>
50#include <iprt/asm.h>
51#include <iprt/assert.h>
52#include <iprt/alloc.h>
53
54#include "internal/magics.h"
55
56/** @def RTTIMER_LINUX_WITH_HRTIMER
57 * Whether to use high resolution timers. */
58#if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
59 && defined(IPRT_LINUX_HAS_HRTIMER)
60# define RTTIMER_LINUX_WITH_HRTIMER
61#endif
62
63#if RTLNX_VER_MAX(2,6,31)
64# define mod_timer_pinned mod_timer
65# define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
66#endif
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72/**
73 * Timer state machine.
74 *
75 * This is used to try handle the issues with MP events and
76 * timers that runs on all CPUs. It's relatively nasty :-/
77 */
78typedef enum RTTIMERLNXSTATE
79{
80 /** Stopped. */
81 RTTIMERLNXSTATE_STOPPED = 0,
82 /** Transient state; next ACTIVE. */
83 RTTIMERLNXSTATE_STARTING,
84 /** Transient state; next ACTIVE. (not really necessary) */
85 RTTIMERLNXSTATE_MP_STARTING,
86 /** Active. */
87 RTTIMERLNXSTATE_ACTIVE,
88 /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
89 RTTIMERLNXSTATE_CALLBACK,
90 /** Stopped while in the callback; next STOPPED. */
91 RTTIMERLNXSTATE_CB_STOPPING,
92 /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
93 RTTIMERLNXSTATE_CB_RESTARTING,
94 /** The callback shall destroy the timer; next STOPPED. */
95 RTTIMERLNXSTATE_CB_DESTROYING,
96 /** Transient state; next STOPPED. */
97 RTTIMERLNXSTATE_STOPPING,
98 /** Transient state; next STOPPED. */
99 RTTIMERLNXSTATE_MP_STOPPING,
100 /** The usual 32-bit hack. */
101 RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
102} RTTIMERLNXSTATE;
103
104
105/**
106 * A Linux sub-timer.
107 */
108typedef struct RTTIMERLNXSUBTIMER
109{
110 /** Timer specific data. */
111 union
112 {
113#if defined(RTTIMER_LINUX_WITH_HRTIMER)
114 /** High resolution timer. */
115 struct
116 {
117 /** The linux timer structure. */
118 struct hrtimer LnxTimer;
119 } Hr;
120#endif
121 /** Standard timer. */
122 struct
123 {
124 /** The linux timer structure. */
125 struct timer_list LnxTimer;
126 /** The start of the current run (ns).
127 * This is used to calculate when the timer ought to fire the next time. */
128 uint64_t u64NextTS;
129 /** When the timer was started. */
130 uint64_t nsStartTS;
131 /** The u64NextTS in jiffies. */
132 unsigned long ulNextJiffies;
133 /** Set when starting or changing the timer so that u64StartTs
134 * and u64NextTS gets reinitialized (eliminating some jitter). */
135 bool volatile fFirstAfterChg;
136 } Std;
137 } u;
138 /** The current tick number. */
139 uint64_t iTick;
140 /** Restart the single shot timer at this specific time.
141 * Used when a single shot timer is restarted from the callback. */
142 uint64_t volatile uNsRestartAt;
143 /** Pointer to the parent timer. */
144 PRTTIMER pParent;
145 /** The current sub-timer state. */
146 RTTIMERLNXSTATE volatile enmState;
147} RTTIMERLNXSUBTIMER;
148/** Pointer to a linux sub-timer. */
149typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
150
151
152/**
153 * The internal representation of an Linux timer handle.
154 */
155typedef struct RTTIMER
156{
157 /** Magic.
158 * This is RTTIMER_MAGIC, but changes to something else before the timer
159 * is destroyed to indicate clearly that thread should exit. */
160 uint32_t volatile u32Magic;
161 /** Spinlock synchronizing the fSuspended and MP event handling.
162 * This is NIL_RTSPINLOCK if cCpus == 1. */
163 RTSPINLOCK hSpinlock;
164 /** Flag indicating that the timer is suspended. */
165 bool volatile fSuspended;
166 /** Whether the timer must run on one specific CPU or not. */
167 bool fSpecificCpu;
168#ifdef CONFIG_SMP
169 /** Whether the timer must run on all CPUs or not. */
170 bool fAllCpus;
171#endif /* else: All -> specific on non-SMP kernels */
172 /** Whether it is a high resolution timer or a standard one. */
173 bool fHighRes;
174 /** The id of the CPU it must run on if fSpecificCpu is set. */
175 RTCPUID idCpu;
176 /** The number of CPUs this timer should run on. */
177 RTCPUID cCpus;
178 /** Callback. */
179 PFNRTTIMER pfnTimer;
180 /** User argument. */
181 void *pvUser;
182 /** The timer interval. 0 if one-shot. */
183 uint64_t volatile u64NanoInterval;
184 /** This is set to the number of jiffies between ticks if the interval is
185 * an exact number of jiffies. (Standard timers only.) */
186 unsigned long volatile cJiffies;
187 /** The change interval spinlock for standard timers only. */
188 spinlock_t ChgIntLock;
189 /** Workqueue item for delayed destruction. */
190 RTR0LNXWORKQUEUEITEM DtorWorkqueueItem;
191 /** Sub-timers.
192 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
193 * an entry for all possible cpus. In that case the index will be the same as
194 * for the RTCpuSet. */
195 RTTIMERLNXSUBTIMER aSubTimers[1];
196} RTTIMER;
197
198
199/**
200 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
201 */
202typedef struct RTTIMERLINUXSTARTONCPUARGS
203{
204 /** The current time (RTTimeSystemNanoTS). */
205 uint64_t u64Now;
206 /** When to start firing (delta). */
207 uint64_t u64First;
208} RTTIMERLINUXSTARTONCPUARGS;
209/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
210typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
211
212
213/*********************************************************************************************************************************
214* Internal Functions *
215*********************************************************************************************************************************/
216#ifdef CONFIG_SMP
217static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
218#endif
219
220#if 0
221#define DEBUG_HACKING
222#include <iprt/string.h>
223#include <iprt/asm-amd64-x86.h>
224static void myLogBackdoorPrintf(const char *pszFormat, ...)
225{
226 char szTmp[256];
227 va_list args;
228 size_t cb;
229
230 cb = RTStrPrintf(szTmp, sizeof(szTmp) - 10, "%d: ", RTMpCpuId());
231 va_start(args, pszFormat);
232 cb += RTStrPrintfV(&szTmp[cb], sizeof(szTmp) - cb, pszFormat, args);
233 va_end(args);
234
235 ASMOutStrU8(0x504, (uint8_t *)&szTmp[0], cb);
236}
237# define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
238 myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
239# define RTAssertMsg2Weak myLogBackdoorPrintf
240# define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
241#else
242# define RTTIMERLNX_LOG(a) do { } while (0)
243#endif
244
245/**
246 * Sets the state.
247 */
248DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
249{
250#ifdef DEBUG_HACKING
251 RTTIMERLNX_LOG(("set %d -> %d\n", *penmState, enmNewState));
252#endif
253 ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
254}
255
256
257/**
258 * Sets the state if it has a certain value.
259 *
260 * @return true if xchg was done.
261 * @return false if xchg wasn't done.
262 */
263#ifdef DEBUG_HACKING
264#define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
265static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
266 RTTIMERLNXSTATE enmCurState, uint32_t uLine)
267{
268 RTTIMERLNXSTATE enmOldState = enmCurState;
269 bool fRc = ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState, enmNewState, enmCurState, (uint32_t *)&enmOldState);
270 RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState, enmNewState, fRc, uLine));
271 return fRc;
272}
273#else
274DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
275 RTTIMERLNXSTATE enmCurState)
276{
277 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
278}
279#endif
280
281
282/**
283 * Gets the state.
284 */
285DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
286{
287 return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
288}
289
290#ifdef RTTIMER_LINUX_WITH_HRTIMER
291
292/**
293 * Converts a nano second time stamp to ktime_t.
294 *
295 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
296 *
297 * @returns ktime_t.
298 * @param cNanoSecs Nanoseconds.
299 */
300DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
301{
302 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
303 return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
304}
305
306/**
307 * Converts ktime_t to a nano second time stamp.
308 *
309 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
310 *
311 * @returns nano second time stamp.
312 * @param Kt ktime_t.
313 */
314DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
315{
316 return ktime_to_ns(Kt);
317}
318
319#endif /* RTTIMER_LINUX_WITH_HRTIMER */
320
321/**
322 * Converts a nano second interval to jiffies.
323 *
324 * @returns Jiffies.
325 * @param cNanoSecs Nanoseconds.
326 */
327DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
328{
329 /* this can be made even better... */
330 if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
331 return MAX_JIFFY_OFFSET;
332# if ARCH_BITS == 32
333 if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
334 return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
335# endif
336 return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
337}
338
339
340/**
341 * Starts a sub-timer (RTTimerStart).
342 *
343 * @param pSubTimer The sub-timer to start.
344 * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
345 * @param u64First The interval from u64Now to the first time the timer should fire.
346 * @param fPinned true = timer pinned to a specific CPU,
347 * false = timer can migrate between CPUs
348 * @param fHighRes Whether the user requested a high resolution timer or not.
349 * @param enmOldState The old timer state.
350 */
351static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First,
352 bool fPinned, bool fHighRes)
353{
354 /*
355 * Calc when it should start firing.
356 */
357 uint64_t u64NextTS = u64Now + u64First;
358 if (!fHighRes)
359 {
360 pSubTimer->u.Std.u64NextTS = u64NextTS;
361 pSubTimer->u.Std.nsStartTS = u64NextTS;
362 }
363 RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer->pParent));
364
365 pSubTimer->iTick = 0;
366
367#ifdef RTTIMER_LINUX_WITH_HRTIMER
368 if (fHighRes)
369 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(u64NextTS),
370 fPinned ? HRTIMER_MODE_ABS_PINNED : HRTIMER_MODE_ABS);
371 else
372#endif
373 {
374 unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
375 pSubTimer->u.Std.ulNextJiffies = jiffies + cJiffies;
376 pSubTimer->u.Std.fFirstAfterChg = true;
377#ifdef CONFIG_SMP
378 if (fPinned)
379 {
380# if RTLNX_VER_MIN(4,8,0)
381 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
382# else
383 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
384# endif
385 }
386 else
387#endif
388 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
389 }
390
391 /* Be a bit careful here since we could be racing the callback. */
392 if (!rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_STARTING))
393 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_MP_STARTING);
394}
395
396
397/**
398 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
399 *
400 * The caller has already changed the state, so we will not be in a callback
401 * situation wrt to the calling thread.
402 *
403 * @param pSubTimer The sub-timer.
404 * @param fHighRes Whether the user requested a high resolution timer or not.
405 */
406static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, bool fHighRes)
407{
408 RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer->pParent, fHighRes));
409#ifdef RTTIMER_LINUX_WITH_HRTIMER
410 if (fHighRes)
411 {
412 /* There is no equivalent to del_timer in the hrtimer API,
413 hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
414 del_timer_sync() asserts, waiting for a timer callback to complete
415 is deadlock prone, so don't do it. */
416 int rc = hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
417 if (rc < 0)
418 {
419 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, ktime_set(KTIME_SEC_MAX, 0), HRTIMER_MODE_ABS);
420 hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
421 }
422 }
423 else
424#endif
425 del_timer(&pSubTimer->u.Std.LnxTimer);
426
427 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
428}
429
430
431/**
432 * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
433 *
434 * @param pTimer The timer in question.
435 */
436static void rtTimerLnxDestroyIt(PRTTIMER pTimer)
437{
438 RTSPINLOCK hSpinlock = pTimer->hSpinlock;
439 RTCPUID iCpu;
440 Assert(pTimer->fSuspended);
441 RTTIMERLNX_LOG(("destroyit %p\n", pTimer));
442
443 /*
444 * Remove the MP notifications first because it'll reduce the risk of
445 * us overtaking any MP event that might theoretically be racing us here.
446 */
447#ifdef CONFIG_SMP
448 if ( pTimer->cCpus > 1
449 && hSpinlock != NIL_RTSPINLOCK)
450 {
451 int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
452 AssertRC(rc);
453 }
454#endif /* CONFIG_SMP */
455
456 /*
457 * Invalidate the handle.
458 */
459 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
460
461 /*
462 * Make sure all timers have stopped executing since we're stopping them in
463 * an asynchronous manner up in rtTimerLnxStopSubTimer.
464 */
465 iCpu = pTimer->cCpus;
466 while (iCpu-- > 0)
467 {
468#ifdef RTTIMER_LINUX_WITH_HRTIMER
469 if (pTimer->fHighRes)
470 hrtimer_cancel(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer);
471 else
472#endif
473 del_timer_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
474 }
475
476 /*
477 * Finally, free the resources.
478 */
479 RTMemFreeEx(pTimer, RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[pTimer->cCpus]));
480 if (hSpinlock != NIL_RTSPINLOCK)
481 RTSpinlockDestroy(hSpinlock);
482}
483
484
485/**
486 * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
487 *
488 * @param pWork Pointer to the DtorWorkqueueItem member of our timer
489 * structure.
490 */
491static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM *pWork)
492{
493 PRTTIMER pTimer = RT_FROM_MEMBER(pWork, RTTIMER, DtorWorkqueueItem);
494 rtTimerLnxDestroyIt(pTimer);
495}
496
497
498/**
499 * Called when the timer was destroyed by the callback function.
500 *
501 * @param pTimer The timer.
502 * @param pSubTimer The sub-timer which we're handling, the state of this
503 * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
504 */
505static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
506{
507 /*
508 * If it's an omni timer, the last dude does the destroying.
509 */
510 if (pTimer->cCpus > 1)
511 {
512 uint32_t iCpu = pTimer->cCpus;
513 RTSpinlockAcquire(pTimer->hSpinlock);
514
515 Assert(pSubTimer->enmState == RTTIMERLNXSTATE_CB_DESTROYING);
516 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
517
518 while (iCpu-- > 0)
519 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
520 {
521 RTSpinlockRelease(pTimer->hSpinlock);
522 return;
523 }
524
525 RTSpinlockRelease(pTimer->hSpinlock);
526 }
527
528 /*
529 * Destroying a timer from the callback is unsafe since the callout code
530 * might be touching the timer structure upon return (hrtimer does!). So,
531 * we have to defer the actual destruction to the IRPT workqueue.
532 */
533 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
534}
535
536
537#ifdef CONFIG_SMP
538/**
539 * Deal with a sub-timer that has migrated.
540 *
541 * @param pTimer The timer.
542 * @param pSubTimer The sub-timer.
543 */
544static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
545{
546 RTTIMERLNXSTATE enmState;
547 if (pTimer->cCpus > 1)
548 RTSpinlockAcquire(pTimer->hSpinlock);
549
550 do
551 {
552 enmState = rtTimerLnxGetState(&pSubTimer->enmState);
553 switch (enmState)
554 {
555 case RTTIMERLNXSTATE_STOPPING:
556 case RTTIMERLNXSTATE_MP_STOPPING:
557 enmState = RTTIMERLNXSTATE_STOPPED;
558 RT_FALL_THRU();
559 case RTTIMERLNXSTATE_STOPPED:
560 break;
561
562 default:
563 AssertMsgFailed(("%d\n", enmState));
564 RT_FALL_THRU();
565 case RTTIMERLNXSTATE_STARTING:
566 case RTTIMERLNXSTATE_MP_STARTING:
567 case RTTIMERLNXSTATE_ACTIVE:
568 case RTTIMERLNXSTATE_CALLBACK:
569 case RTTIMERLNXSTATE_CB_STOPPING:
570 case RTTIMERLNXSTATE_CB_RESTARTING:
571 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, enmState))
572 enmState = RTTIMERLNXSTATE_STOPPED;
573 break;
574
575 case RTTIMERLNXSTATE_CB_DESTROYING:
576 {
577 if (pTimer->cCpus > 1)
578 RTSpinlockRelease(pTimer->hSpinlock);
579
580 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
581 return;
582 }
583 }
584 } while (enmState != RTTIMERLNXSTATE_STOPPED);
585
586 if (pTimer->cCpus > 1)
587 RTSpinlockRelease(pTimer->hSpinlock);
588}
589#endif /* CONFIG_SMP */
590
591
592/**
593 * The slow path of rtTimerLnxChangeToCallbackState.
594 *
595 * @returns true if changed successfully, false if not.
596 * @param pSubTimer The sub-timer.
597 */
598static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
599{
600 for (;;)
601 {
602 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
603 switch (enmState)
604 {
605 case RTTIMERLNXSTATE_ACTIVE:
606 case RTTIMERLNXSTATE_STARTING:
607 case RTTIMERLNXSTATE_MP_STARTING:
608 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, enmState))
609 return true;
610 break;
611
612 case RTTIMERLNXSTATE_CALLBACK:
613 case RTTIMERLNXSTATE_CB_STOPPING:
614 case RTTIMERLNXSTATE_CB_RESTARTING:
615 case RTTIMERLNXSTATE_CB_DESTROYING:
616 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
617 default:
618 return false;
619 }
620 ASMNopPause();
621 }
622}
623
624
625/**
626 * Tries to change the sub-timer state to 'callback'.
627 *
628 * @returns true if changed successfully, false if not.
629 * @param pSubTimer The sub-timer.
630 */
631DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer)
632{
633 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, RTTIMERLNXSTATE_ACTIVE)))
634 return true;
635 return rtTimerLnxChangeToCallbackStateSlow(pSubTimer);
636}
637
638
639#ifdef RTTIMER_LINUX_WITH_HRTIMER
640/**
641 * Timer callback function for high resolution timers.
642 *
643 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
644 * one-shot or interval timer.
645 * @param pHrTimer Pointer to the sub-timer structure.
646 */
647static enum hrtimer_restart rtTimerLinuxHrCallback(struct hrtimer *pHrTimer)
648{
649 PRTTIMERLNXSUBTIMER pSubTimer = RT_FROM_MEMBER(pHrTimer, RTTIMERLNXSUBTIMER, u.Hr.LnxTimer);
650 PRTTIMER pTimer = pSubTimer->pParent;
651
652
653 RTTIMERLNX_LOG(("hrcallback %p\n", pTimer));
654 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
655 return HRTIMER_NORESTART;
656
657#ifdef CONFIG_SMP
658 /*
659 * Check for unwanted migration.
660 */
661 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
662 {
663 RTCPUID idCpu = RTMpCpuId();
664 if (RT_UNLIKELY( pTimer->fAllCpus
665 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
666 : pTimer->idCpu != idCpu))
667 {
668 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
669 return HRTIMER_NORESTART;
670 }
671 }
672#endif
673
674 if (pTimer->u64NanoInterval)
675 {
676 /*
677 * Periodic timer, run it and update the native timer afterwards so
678 * we can handle RTTimerStop and RTTimerChangeInterval from the
679 * callback as well as a racing control thread.
680 */
681 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
682 hrtimer_add_expires_ns(&pSubTimer->u.Hr.LnxTimer, ASMAtomicReadU64(&pTimer->u64NanoInterval));
683 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
684 return HRTIMER_RESTART;
685 }
686 else
687 {
688 /*
689 * One shot timer (no omni), stop it before dispatching it.
690 * Allow RTTimerStart as well as RTTimerDestroy to be called from
691 * the callback.
692 */
693 ASMAtomicWriteBool(&pTimer->fSuspended, true);
694 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
695 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
696 return HRTIMER_NORESTART;
697 }
698
699 /*
700 * Some state change occurred while we were in the callback routine.
701 */
702 for (;;)
703 {
704 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
705 switch (enmState)
706 {
707 case RTTIMERLNXSTATE_CB_DESTROYING:
708 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
709 return HRTIMER_NORESTART;
710
711 case RTTIMERLNXSTATE_CB_STOPPING:
712 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
713 return HRTIMER_NORESTART;
714 break;
715
716 case RTTIMERLNXSTATE_CB_RESTARTING:
717 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
718 {
719 pSubTimer->iTick = 0;
720 hrtimer_set_expires(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(pSubTimer->uNsRestartAt));
721 return HRTIMER_RESTART;
722 }
723 break;
724
725 default:
726 AssertMsgFailed(("%d\n", enmState));
727 return HRTIMER_NORESTART;
728 }
729 ASMNopPause();
730 }
731}
732#endif /* RTTIMER_LINUX_WITH_HRTIMER */
733
734
735#if RTLNX_VER_MIN(4,15,0)
736/**
737 * Timer callback function for standard timers.
738 *
739 * @param pLnxTimer Pointer to the Linux timer structure.
740 */
741static void rtTimerLinuxStdCallback(struct timer_list *pLnxTimer)
742{
743 PRTTIMERLNXSUBTIMER pSubTimer = from_timer(pSubTimer, pLnxTimer, u.Std.LnxTimer);
744#else
745/**
746 * Timer callback function for standard timers.
747 *
748 * @param ulUser Address of the sub-timer structure.
749 */
750static void rtTimerLinuxStdCallback(unsigned long ulUser)
751{
752 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
753#endif
754 PRTTIMER pTimer = pSubTimer->pParent;
755
756 RTTIMERLNX_LOG(("stdcallback %p\n", pTimer));
757 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
758 return;
759
760#ifdef CONFIG_SMP
761 /*
762 * Check for unwanted migration.
763 */
764 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
765 {
766 RTCPUID idCpu = RTMpCpuId();
767 if (RT_UNLIKELY( pTimer->fAllCpus
768 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
769 : pTimer->idCpu != idCpu))
770 {
771 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
772 return;
773 }
774 }
775#endif
776
777 if (pTimer->u64NanoInterval)
778 {
779 /*
780 * Interval timer, calculate the next timeout.
781 *
782 * The first time around, we'll re-adjust the u.Std.u64NextTS to
783 * try prevent some jittering if we were started at a bad time.
784 */
785 const uint64_t iTick = ++pSubTimer->iTick;
786 unsigned long uCurJiffies = jiffies;
787 unsigned long ulNextJiffies;
788 uint64_t u64NanoInterval;
789 unsigned long cJiffies;
790 unsigned long flFlags;
791
792 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
793 u64NanoInterval = pTimer->u64NanoInterval;
794 cJiffies = pTimer->cJiffies;
795 if (RT_UNLIKELY(pSubTimer->u.Std.fFirstAfterChg))
796 {
797 pSubTimer->u.Std.fFirstAfterChg = false;
798 pSubTimer->u.Std.u64NextTS = RTTimeSystemNanoTS();
799 pSubTimer->u.Std.nsStartTS = pSubTimer->u.Std.u64NextTS - u64NanoInterval * (iTick - 1);
800 pSubTimer->u.Std.ulNextJiffies = uCurJiffies = jiffies;
801 }
802 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
803
804 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
805 if (cJiffies)
806 {
807 ulNextJiffies = pSubTimer->u.Std.ulNextJiffies + cJiffies;
808 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
809 if (time_after_eq(ulNextJiffies, uCurJiffies))
810 { /* likely */ }
811 else
812 {
813 unsigned long cJiffiesBehind = uCurJiffies - ulNextJiffies;
814 ulNextJiffies = uCurJiffies + cJiffies / 2;
815 if (cJiffiesBehind >= HZ / 4) /* Conside if we're lagging too far behind. Screw the u64NextTS member. */
816 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
817 /*else: Don't update u.Std.ulNextJiffies so we can continue catching up in the next tick. */
818 }
819 }
820 else
821 {
822 const uint64_t u64NanoTS = RTTimeSystemNanoTS();
823 const int64_t cNsBehind = u64NanoTS - pSubTimer->u.Std.u64NextTS;
824 if (cNsBehind <= 0)
825 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(pSubTimer->u.Std.u64NextTS - u64NanoTS);
826 else if (u64NanoInterval >= RT_NS_1SEC_64 * 2 / HZ)
827 {
828 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(u64NanoInterval / 2);
829 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
830 pSubTimer->u.Std.u64NextTS = u64NanoTS + u64NanoInterval / 2;
831 }
832 else
833 {
834 ulNextJiffies = uCurJiffies + 1;
835 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
836 pSubTimer->u.Std.u64NextTS = u64NanoTS + RT_NS_1SEC_64 / HZ;
837 }
838 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
839 }
840
841 /*
842 * Run the timer and re-arm it unless the state changed .
843 * .
844 * We must re-arm it afterwards as we're not in a position to undo this .
845 * operation if for instance someone stopped or destroyed us while we .
846 * were in the callback. (Linux takes care of any races here.)
847 */
848 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
849 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
850 {
851#ifdef CONFIG_SMP
852 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
853 {
854# if RTLNX_VER_MIN(4,8,0)
855 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
856# else
857 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
858# endif
859 }
860 else
861#endif
862 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
863 return;
864 }
865 }
866 else
867 {
868 /*
869 * One shot timer, stop it before dispatching it.
870 * Allow RTTimerStart as well as RTTimerDestroy to be called from
871 * the callback.
872 */
873 ASMAtomicWriteBool(&pTimer->fSuspended, true);
874 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
875 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
876 return;
877 }
878
879 /*
880 * Some state change occurred while we were in the callback routine.
881 */
882 for (;;)
883 {
884 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
885 switch (enmState)
886 {
887 case RTTIMERLNXSTATE_CB_DESTROYING:
888 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
889 return;
890
891 case RTTIMERLNXSTATE_CB_STOPPING:
892 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
893 return;
894 break;
895
896 case RTTIMERLNXSTATE_CB_RESTARTING:
897 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
898 {
899 uint64_t u64NanoTS;
900 uint64_t u64NextTS;
901 unsigned long flFlags;
902
903 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
904 u64NextTS = pSubTimer->uNsRestartAt;
905 u64NanoTS = RTTimeSystemNanoTS();
906 pSubTimer->iTick = 0;
907 pSubTimer->u.Std.u64NextTS = u64NextTS;
908 pSubTimer->u.Std.fFirstAfterChg = true;
909 pSubTimer->u.Std.ulNextJiffies = u64NextTS > u64NanoTS
910 ? jiffies + rtTimerLnxNanoToJiffies(u64NextTS - u64NanoTS)
911 : jiffies;
912 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
913
914#ifdef CONFIG_SMP
915 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
916 {
917# if RTLNX_VER_MIN(4,8,0)
918 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
919# else
920 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
921# endif
922 }
923 else
924#endif
925 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
926 return;
927 }
928 break;
929
930 default:
931 AssertMsgFailed(("%d\n", enmState));
932 return;
933 }
934 ASMNopPause();
935 }
936}
937
938
939#ifdef CONFIG_SMP
940
941/**
942 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
943 *
944 * @param idCpu The current CPU.
945 * @param pvUser1 Pointer to the timer.
946 * @param pvUser2 Pointer to the argument structure.
947 */
948static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
949{
950 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
951 PRTTIMER pTimer = (PRTTIMER)pvUser1;
952 Assert(idCpu < pTimer->cCpus);
953 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
954}
955
956
957/**
958 * Worker for RTTimerStart() that takes care of the ugly bits.
959 *
960 * @returns RTTimerStart() return value.
961 * @param pTimer The timer.
962 * @param pArgs The argument structure.
963 */
964static int rtTimerLnxOmniStart(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
965{
966 RTCPUID iCpu;
967 RTCPUSET OnlineSet;
968 RTCPUSET OnlineSet2;
969 int rc2;
970
971 /*
972 * Prepare all the sub-timers for the startup and then flag the timer
973 * as a whole as non-suspended, make sure we get them all before
974 * clearing fSuspended as the MP handler will be waiting on this
975 * should something happen while we're looping.
976 */
977 RTSpinlockAcquire(pTimer->hSpinlock);
978
979 /* Just make it a omni timer restriction that no stop/start races are allowed. */
980 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
981 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
982 {
983 RTSpinlockRelease(pTimer->hSpinlock);
984 return VERR_TIMER_BUSY;
985 }
986
987 do
988 {
989 RTMpGetOnlineSet(&OnlineSet);
990 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
991 {
992 Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
993 rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
994 RTCpuSetIsMember(&OnlineSet, iCpu)
995 ? RTTIMERLNXSTATE_STARTING
996 : RTTIMERLNXSTATE_STOPPED);
997 }
998 } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
999
1000 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1001
1002 RTSpinlockRelease(pTimer->hSpinlock);
1003
1004 /*
1005 * Start them (can't find any exported function that allows me to
1006 * do this without the cross calls).
1007 */
1008 pArgs->u64Now = RTTimeSystemNanoTS();
1009 rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
1010 AssertRC(rc2); /* screw this if it fails. */
1011
1012 /*
1013 * Reset the sub-timers who didn't start up (ALL CPUs case).
1014 */
1015 RTSpinlockAcquire(pTimer->hSpinlock);
1016
1017 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1018 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
1019 {
1020 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
1021 * we were between calls needs to nudged as the MP handler will ignore events for
1022 * them because of the STARTING state. This is an extremely unlikely case - not that
1023 * that means anything in my experience... ;-) */
1024 RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu));
1025 }
1026
1027 RTSpinlockRelease(pTimer->hSpinlock);
1028
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
1035 *
1036 * @returns true if there was any active callbacks, false if not.
1037 * @param pTimer The timer (valid).
1038 * @param fForDestroy Whether this is for RTTimerDestroy or not.
1039 */
1040static bool rtTimerLnxOmniStop(PRTTIMER pTimer, bool fForDestroy)
1041{
1042 bool fActiveCallbacks = false;
1043 RTCPUID iCpu;
1044 RTTIMERLNXSTATE enmState;
1045
1046
1047 /*
1048 * Mark the timer as suspended and flag all timers as stopping, except
1049 * for those being stopped by an MP event.
1050 */
1051 RTSpinlockAcquire(pTimer->hSpinlock);
1052
1053 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1054 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1055 {
1056 for (;;)
1057 {
1058 enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1059 if ( enmState == RTTIMERLNXSTATE_STOPPED
1060 || enmState == RTTIMERLNXSTATE_MP_STOPPING)
1061 break;
1062 if ( enmState == RTTIMERLNXSTATE_CALLBACK
1063 || enmState == RTTIMERLNXSTATE_CB_STOPPING
1064 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1065 {
1066 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1067 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState,
1068 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1069 enmState))
1070 {
1071 fActiveCallbacks = true;
1072 break;
1073 }
1074 }
1075 else
1076 {
1077 Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
1078 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState))
1079 break;
1080 }
1081 ASMNopPause();
1082 }
1083 }
1084
1085 RTSpinlockRelease(pTimer->hSpinlock);
1086
1087 /*
1088 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
1089 * Unfortunately it cannot be done synchronously.
1090 */
1091 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1092 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
1093 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu], pTimer->fHighRes);
1094
1095 return fActiveCallbacks;
1096}
1097
1098
1099/**
1100 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
1101 * to start a sub-timer on a cpu that just have come online.
1102 *
1103 * @param idCpu The current CPU.
1104 * @param pvUser1 Pointer to the timer.
1105 * @param pvUser2 Pointer to the argument structure.
1106 */
1107static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1108{
1109 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1110 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1111 RTSPINLOCK hSpinlock;
1112 Assert(idCpu < pTimer->cCpus);
1113
1114 /*
1115 * We have to be kind of careful here as we might be racing RTTimerStop
1116 * (and/or RTTimerDestroy, thus the paranoia.
1117 */
1118 hSpinlock = pTimer->hSpinlock;
1119 if ( hSpinlock != NIL_RTSPINLOCK
1120 && pTimer->u32Magic == RTTIMER_MAGIC)
1121 {
1122 RTSpinlockAcquire(hSpinlock);
1123
1124 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1125 && pTimer->u32Magic == RTTIMER_MAGIC)
1126 {
1127 /* We're sane and the timer is not suspended yet. */
1128 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1129 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1130 rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1131 }
1132
1133 RTSpinlockRelease(hSpinlock);
1134 }
1135}
1136
1137
1138/**
1139 * MP event notification callback.
1140 *
1141 * @param enmEvent The event.
1142 * @param idCpu The cpu it applies to.
1143 * @param pvUser The timer.
1144 */
1145static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
1146{
1147 PRTTIMER pTimer = (PRTTIMER)pvUser;
1148 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1149 RTSPINLOCK hSpinlock;
1150
1151 Assert(idCpu < pTimer->cCpus);
1152
1153 /*
1154 * Some initial paranoia.
1155 */
1156 if (pTimer->u32Magic != RTTIMER_MAGIC)
1157 return;
1158 hSpinlock = pTimer->hSpinlock;
1159 if (hSpinlock == NIL_RTSPINLOCK)
1160 return;
1161
1162 RTSpinlockAcquire(hSpinlock);
1163
1164 /* Is it active? */
1165 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1166 && pTimer->u32Magic == RTTIMER_MAGIC)
1167 {
1168 switch (enmEvent)
1169 {
1170 /*
1171 * Try do it without leaving the spin lock, but if we have to, retake it
1172 * when we're on the right cpu.
1173 */
1174 case RTMPEVENT_ONLINE:
1175 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1176 {
1177 RTTIMERLINUXSTARTONCPUARGS Args;
1178 Args.u64Now = RTTimeSystemNanoTS();
1179 Args.u64First = 0;
1180
1181 if (RTMpCpuId() == idCpu)
1182 rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First, true /*fPinned*/, pTimer->fHighRes);
1183 else
1184 {
1185 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
1186 RTSpinlockRelease(hSpinlock);
1187
1188 RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
1189 return; /* we've left the spinlock */
1190 }
1191 }
1192 break;
1193
1194 /*
1195 * The CPU is (going) offline, make sure the sub-timer is stopped.
1196 *
1197 * Linux will migrate it to a different CPU, but we don't want this. The
1198 * timer function is checking for this.
1199 */
1200 case RTMPEVENT_OFFLINE:
1201 {
1202 RTTIMERLNXSTATE enmState;
1203 while ( (enmState = rtTimerLnxGetState(&pSubTimer->enmState)) == RTTIMERLNXSTATE_ACTIVE
1204 || enmState == RTTIMERLNXSTATE_CALLBACK
1205 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1206 {
1207 if (enmState == RTTIMERLNXSTATE_ACTIVE)
1208 {
1209 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1210 {
1211 RTSpinlockRelease(hSpinlock);
1212
1213 rtTimerLnxStopSubTimer(pSubTimer, pTimer->fHighRes);
1214 return; /* we've left the spinlock */
1215 }
1216 }
1217 else if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CB_STOPPING, enmState))
1218 break;
1219
1220 /* State not stable, try again. */
1221 ASMNopPause();
1222 }
1223 break;
1224 }
1225 }
1226 }
1227
1228 RTSpinlockRelease(hSpinlock);
1229}
1230
1231#endif /* CONFIG_SMP */
1232
1233
1234/**
1235 * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
1236 * running on a specific CPU.
1237 *
1238 * @param idCpu The current CPU.
1239 * @param pvUser1 Pointer to the timer.
1240 * @param pvUser2 Pointer to the argument structure.
1241 */
1242static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1243{
1244 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1245 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1246 RT_NOREF_PV(idCpu);
1247 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1248}
1249
1250
1251RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
1252{
1253 RTTIMERLINUXSTARTONCPUARGS Args;
1254 int rc2;
1255 IPRT_LINUX_SAVE_EFL_AC();
1256
1257 /*
1258 * Validate.
1259 */
1260 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1261 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1262
1263 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1264 return VERR_TIMER_ACTIVE;
1265 RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer, pTimer->cCpus));
1266
1267 Args.u64First = u64First;
1268#ifdef CONFIG_SMP
1269 /*
1270 * Omni timer?
1271 */
1272 if (pTimer->fAllCpus)
1273 {
1274 rc2 = rtTimerLnxOmniStart(pTimer, &Args);
1275 IPRT_LINUX_RESTORE_EFL_AC();
1276 return rc2;
1277 }
1278#endif
1279
1280 /*
1281 * Simple timer - Pretty straight forward if it wasn't for restarting.
1282 */
1283 Args.u64Now = RTTimeSystemNanoTS();
1284 ASMAtomicWriteU64(&pTimer->aSubTimers[0].uNsRestartAt, Args.u64Now + u64First);
1285 for (;;)
1286 {
1287 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1288 switch (enmState)
1289 {
1290 case RTTIMERLNXSTATE_STOPPED:
1291 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING, RTTIMERLNXSTATE_STOPPED))
1292 {
1293 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1294 if (!pTimer->fSpecificCpu)
1295 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First,
1296 false /*fPinned*/, pTimer->fHighRes);
1297 else
1298 {
1299 rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
1300 if (RT_FAILURE(rc2))
1301 {
1302 /* Suspend it, the cpu id is probably invalid or offline. */
1303 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1304 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
1305 return rc2;
1306 }
1307 }
1308 IPRT_LINUX_RESTORE_EFL_AC();
1309 return VINF_SUCCESS;
1310 }
1311 break;
1312
1313 case RTTIMERLNXSTATE_CALLBACK:
1314 case RTTIMERLNXSTATE_CB_STOPPING:
1315 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_CB_RESTARTING, enmState))
1316 {
1317 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1318 IPRT_LINUX_RESTORE_EFL_AC();
1319 return VINF_SUCCESS;
1320 }
1321 break;
1322
1323 default:
1324 AssertMsgFailed(("%d\n", enmState));
1325 IPRT_LINUX_RESTORE_EFL_AC();
1326 return VERR_INTERNAL_ERROR_4;
1327 }
1328 ASMNopPause();
1329 }
1330}
1331RT_EXPORT_SYMBOL(RTTimerStart);
1332
1333
1334/**
1335 * Common worker for RTTimerStop and RTTimerDestroy.
1336 *
1337 * @returns true if there was any active callbacks, false if not.
1338 * @param pTimer The timer to stop.
1339 * @param fForDestroy Whether it's RTTimerDestroy calling or not.
1340 */
1341static bool rtTimerLnxStop(PRTTIMER pTimer, bool fForDestroy)
1342{
1343 RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer, fForDestroy));
1344#ifdef CONFIG_SMP
1345 /*
1346 * Omni timer?
1347 */
1348 if (pTimer->fAllCpus)
1349 return rtTimerLnxOmniStop(pTimer, fForDestroy);
1350#endif
1351
1352 /*
1353 * Simple timer.
1354 */
1355 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1356 for (;;)
1357 {
1358 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1359 switch (enmState)
1360 {
1361 case RTTIMERLNXSTATE_ACTIVE:
1362 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1363 {
1364 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0], pTimer->fHighRes);
1365 return false;
1366 }
1367 break;
1368
1369 case RTTIMERLNXSTATE_CALLBACK:
1370 case RTTIMERLNXSTATE_CB_RESTARTING:
1371 case RTTIMERLNXSTATE_CB_STOPPING:
1372 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1373 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState,
1374 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1375 enmState))
1376 return true;
1377 break;
1378
1379 case RTTIMERLNXSTATE_STOPPED:
1380 return VINF_SUCCESS;
1381
1382 case RTTIMERLNXSTATE_CB_DESTROYING:
1383 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1384 return true;
1385
1386 default:
1387 case RTTIMERLNXSTATE_STARTING:
1388 case RTTIMERLNXSTATE_MP_STARTING:
1389 case RTTIMERLNXSTATE_STOPPING:
1390 case RTTIMERLNXSTATE_MP_STOPPING:
1391 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1392 return false;
1393 }
1394
1395 /* State not stable, try again. */
1396 ASMNopPause();
1397 }
1398}
1399
1400
1401RTDECL(int) RTTimerStop(PRTTIMER pTimer)
1402{
1403 /*
1404 * Validate.
1405 */
1406 IPRT_LINUX_SAVE_EFL_AC();
1407 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1408 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1409 RTTIMERLNX_LOG(("stop %p\n", pTimer));
1410
1411 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
1412 return VERR_TIMER_SUSPENDED;
1413
1414 rtTimerLnxStop(pTimer, false /*fForDestroy*/);
1415
1416 IPRT_LINUX_RESTORE_EFL_AC();
1417 return VINF_SUCCESS;
1418}
1419RT_EXPORT_SYMBOL(RTTimerStop);
1420
1421
1422RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
1423{
1424 unsigned long cJiffies;
1425 unsigned long flFlags;
1426 IPRT_LINUX_SAVE_EFL_AC();
1427
1428 /*
1429 * Validate.
1430 */
1431 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1432 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1433 AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
1434 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
1435 AssertReturn(pTimer->u64NanoInterval, VERR_INVALID_STATE);
1436 RTTIMERLNX_LOG(("change %p %llu\n", pTimer, u64NanoInterval));
1437
1438#ifdef RTTIMER_LINUX_WITH_HRTIMER
1439 /*
1440 * For the high resolution timers it is easy since we don't care so much
1441 * about when it is applied to the sub-timers.
1442 */
1443 if (pTimer->fHighRes)
1444 {
1445 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1446 IPRT_LINUX_RESTORE_EFL_AC();
1447 return VINF_SUCCESS;
1448 }
1449#endif
1450
1451 /*
1452 * Standard timers have a bit more complicated way of calculating
1453 * their interval and such. So, forget omni timers for now.
1454 */
1455 if (pTimer->cCpus > 1)
1456 return VERR_NOT_SUPPORTED;
1457
1458 cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1459 if (cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1460 cJiffies = 0;
1461
1462 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
1463 pTimer->aSubTimers[0].u.Std.fFirstAfterChg = true;
1464 pTimer->cJiffies = cJiffies;
1465 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1466 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
1467 IPRT_LINUX_RESTORE_EFL_AC();
1468 return VINF_SUCCESS;
1469}
1470RT_EXPORT_SYMBOL(RTTimerChangeInterval);
1471
1472
1473RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
1474{
1475 bool fCanDestroy;
1476 IPRT_LINUX_SAVE_EFL_AC();
1477
1478 /*
1479 * Validate. It's ok to pass NULL pointer.
1480 */
1481 if (pTimer == /*NIL_RTTIMER*/ NULL)
1482 return VINF_SUCCESS;
1483 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1484 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1485 RTTIMERLNX_LOG(("destroy %p\n", pTimer));
1486/** @todo We should invalidate the magic here! */
1487
1488 /*
1489 * Stop the timer if it's still active, then destroy it if we can.
1490 */
1491 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1492 fCanDestroy = rtTimerLnxStop(pTimer, true /*fForDestroy*/);
1493 else
1494 {
1495 uint32_t iCpu = pTimer->cCpus;
1496 if (pTimer->cCpus > 1)
1497 RTSpinlockAcquire(pTimer->hSpinlock);
1498
1499 fCanDestroy = true;
1500 while (iCpu-- > 0)
1501 {
1502 for (;;)
1503 {
1504 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1505 switch (enmState)
1506 {
1507 case RTTIMERLNXSTATE_CALLBACK:
1508 case RTTIMERLNXSTATE_CB_RESTARTING:
1509 case RTTIMERLNXSTATE_CB_STOPPING:
1510 if (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_CB_DESTROYING, enmState))
1511 continue;
1512 fCanDestroy = false;
1513 break;
1514
1515 case RTTIMERLNXSTATE_CB_DESTROYING:
1516 AssertMsgFailed(("%d\n", enmState));
1517 fCanDestroy = false;
1518 break;
1519 default:
1520 break;
1521 }
1522 break;
1523 }
1524 }
1525
1526 if (pTimer->cCpus > 1)
1527 RTSpinlockRelease(pTimer->hSpinlock);
1528 }
1529
1530 if (fCanDestroy)
1531 {
1532 /* For paranoid reasons, defer actually destroying the semaphore when
1533 in atomic or interrupt context. */
1534#if RTLNX_VER_MIN(2,5,32)
1535 if (in_atomic() || in_interrupt())
1536#else
1537 if (in_interrupt())
1538#endif
1539 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
1540 else
1541 rtTimerLnxDestroyIt(pTimer);
1542 }
1543
1544 IPRT_LINUX_RESTORE_EFL_AC();
1545 return VINF_SUCCESS;
1546}
1547RT_EXPORT_SYMBOL(RTTimerDestroy);
1548
1549
1550RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
1551{
1552 PRTTIMER pTimer;
1553 RTCPUID iCpu;
1554 unsigned cCpus;
1555 int rc;
1556 IPRT_LINUX_SAVE_EFL_AC();
1557
1558 rtR0LnxWorkqueueFlush(); /* for 2.4 */
1559 *ppTimer = NULL;
1560
1561 /*
1562 * Validate flags.
1563 */
1564 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
1565 {
1566 IPRT_LINUX_RESTORE_EFL_AC();
1567 return VERR_INVALID_PARAMETER;
1568 }
1569 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
1570 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
1571 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
1572 {
1573 IPRT_LINUX_RESTORE_EFL_AC();
1574 return VERR_CPU_NOT_FOUND;
1575 }
1576
1577 /*
1578 * Allocate the timer handler.
1579 */
1580 cCpus = 1;
1581#ifdef CONFIG_SMP
1582 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
1583 {
1584 cCpus = RTMpGetMaxCpuId() + 1;
1585 Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
1586 AssertReturnStmt(u64NanoInterval, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
1587 }
1588#endif
1589
1590 rc = RTMemAllocEx(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cCpus]), 0,
1591 RTMEMALLOCEX_FLAGS_ZEROED | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE, (void **)&pTimer);
1592 if (RT_FAILURE(rc))
1593 {
1594 IPRT_LINUX_RESTORE_EFL_AC();
1595 return rc;
1596 }
1597
1598 /*
1599 * Initialize it.
1600 */
1601 pTimer->u32Magic = RTTIMER_MAGIC;
1602 pTimer->hSpinlock = NIL_RTSPINLOCK;
1603 pTimer->fSuspended = true;
1604 pTimer->fHighRes = !!(fFlags & RTTIMER_FLAGS_HIGH_RES);
1605#ifdef CONFIG_SMP
1606 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
1607 pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
1608 pTimer->idCpu = pTimer->fSpecificCpu
1609 ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)
1610 : NIL_RTCPUID;
1611#else
1612 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
1613 pTimer->idCpu = RTMpCpuId();
1614#endif
1615 pTimer->cCpus = cCpus;
1616 pTimer->pfnTimer = pfnTimer;
1617 pTimer->pvUser = pvUser;
1618 pTimer->u64NanoInterval = u64NanoInterval;
1619 pTimer->cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1620 if (pTimer->cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1621 pTimer->cJiffies = 0;
1622 spin_lock_init(&pTimer->ChgIntLock);
1623
1624 for (iCpu = 0; iCpu < cCpus; iCpu++)
1625 {
1626#ifdef RTTIMER_LINUX_WITH_HRTIMER
1627 if (pTimer->fHighRes)
1628 {
1629 hrtimer_init(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1630 pTimer->aSubTimers[iCpu].u.Hr.LnxTimer.function = rtTimerLinuxHrCallback;
1631 }
1632 else
1633#endif
1634 {
1635#if RTLNX_VER_MIN(4,15,0)
1636 timer_setup(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer, rtTimerLinuxStdCallback, TIMER_PINNED);
1637#elif RTLNX_VER_MIN(4,8,0)
1638 init_timer_pinned(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1639#else
1640 init_timer(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1641#endif
1642#if RTLNX_VER_MAX(4,15,0)
1643 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
1644 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.function = rtTimerLinuxStdCallback;
1645#endif
1646 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.expires = jiffies;
1647 pTimer->aSubTimers[iCpu].u.Std.u64NextTS = 0;
1648 }
1649 pTimer->aSubTimers[iCpu].iTick = 0;
1650 pTimer->aSubTimers[iCpu].pParent = pTimer;
1651 pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
1652 }
1653
1654#ifdef CONFIG_SMP
1655 /*
1656 * If this is running on ALL cpus, we'll have to register a callback
1657 * for MP events (so timers can be started/stopped on cpus going
1658 * online/offline). We also create the spinlock for synchronizing
1659 * stop/start/mp-event.
1660 */
1661 if (cCpus > 1)
1662 {
1663 int rc = RTSpinlockCreate(&pTimer->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerLnx");
1664 if (RT_SUCCESS(rc))
1665 rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
1666 else
1667 pTimer->hSpinlock = NIL_RTSPINLOCK;
1668 if (RT_FAILURE(rc))
1669 {
1670 RTTimerDestroy(pTimer);
1671 IPRT_LINUX_RESTORE_EFL_AC();
1672 return rc;
1673 }
1674 }
1675#endif /* CONFIG_SMP */
1676
1677 RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer, pTimer->fHighRes, fFlags, cCpus));
1678 *ppTimer = pTimer;
1679 IPRT_LINUX_RESTORE_EFL_AC();
1680 return VINF_SUCCESS;
1681}
1682RT_EXPORT_SYMBOL(RTTimerCreateEx);
1683
1684
1685RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
1686{
1687#if 0 /** @todo Not sure if this is what we want or not... Add new API for
1688 * querying the resolution of the high res timers? */
1689 struct timespec Ts;
1690 int rc;
1691 IPRT_LINUX_SAVE_EFL_AC();
1692 rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
1693 IPRT_LINUX_RESTORE_EFL_AC();
1694 if (!rc)
1695 {
1696 Assert(!Ts.tv_sec);
1697 return Ts.tv_nsec;
1698 }
1699#endif
1700 /* */
1701#if RTLNX_VER_MAX(4,9,0) || RTLNX_VER_MIN(4,13,0)
1702 /* On 4.9, 4.10 and 4.12 we've observed tstRTR0Timer failures of the omni timer tests
1703 where we get about half of the ticks we want. The failing test is using this value
1704 as interval. So, this is a very very crude hack to try make omni timers work
1705 correctly without actually knowing what's going wrong... */
1706 return RT_NS_1SEC * 2 / HZ; /* ns */
1707#else
1708 return RT_NS_1SEC / HZ; /* ns */
1709#endif
1710}
1711RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity);
1712
1713
1714RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1715{
1716 RT_NOREF_PV(u32Request); RT_NOREF_PV(*pu32Granted);
1717 return VERR_NOT_SUPPORTED;
1718}
1719RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity);
1720
1721
1722RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1723{
1724 RT_NOREF_PV(u32Granted);
1725 return VERR_NOT_SUPPORTED;
1726}
1727RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity);
1728
1729
1730RTDECL(bool) RTTimerCanDoHighResolution(void)
1731{
1732#ifdef RTTIMER_LINUX_WITH_HRTIMER
1733 return true;
1734#else
1735 return false;
1736#endif
1737}
1738RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution);
1739
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette