VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c@ 10390

Last change on this file since 10390 was 9529, checked in by vboxsync, 17 years ago

Fixed another wrong spinlock release in the MP handler. Taking CPUs offline and online while the GIP timer is running now works reliably.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.4 KB
Line 
1/* $Id: timer-r0drv-linux.c 9529 2008-06-09 11:10:02Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include "the-linux-kernel.h"
35
36#include <iprt/timer.h>
37#include <iprt/time.h>
38#include <iprt/mp.h>
39#include <iprt/cpuset.h>
40#include <iprt/spinlock.h>
41#include <iprt/err.h>
42#include <iprt/asm.h>
43#include <iprt/assert.h>
44#include <iprt/alloc.h>
45
46#include "internal/magics.h"
47
48#if !defined(RT_USE_LINUX_HRTIMER) \
49 && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
50 && 0 /* disabled because it somehow sucks. */
51# define RT_USE_LINUX_HRTIMER
52#endif
53
54/* This check must match the ktime usage in rtTimeGetSystemNanoTS() / time-r0drv-linux.c. */
55#if defined(RT_USE_LINUX_HRTIMER) \
56 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
57# error "RT_USE_LINUX_HRTIMER requires 2.6.16 or later, sorry."
58#endif
59
60
61/*******************************************************************************
62* Structures and Typedefs *
63*******************************************************************************/
64/**
65 * Timer state machine.
66 *
67 * This is used to try handle the issues with MP events and
68 * timers that runs on all CPUs. It's relatively nasty :-/
69 */
70typedef enum RTTIMERLNXSTATE
71{
72 /** Stopped. */
73 RTTIMERLNXSTATE_STOPPED = 0,
74 /** Transient state; next ACTIVE. */
75 RTTIMERLNXSTATE_STARTING,
76 /** Transient state; next ACTIVE. (not really necessary) */
77 RTTIMERLNXSTATE_MP_STARTING,
78 /** Active. */
79 RTTIMERLNXSTATE_ACTIVE,
80 /** Transient state; next STOPPED. */
81 RTTIMERLNXSTATE_STOPPING,
82 /** Transient state; next STOPPED. */
83 RTTIMERLNXSTATE_MP_STOPPING,
84 /** The usual 32-bit hack. */
85 RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
86} RTTIMERLNXSTATE;
87
88
89/**
90 * A Linux sub-timer.
91 */
92typedef struct RTTIMERLNXSUBTIMER
93{
94 /** The linux timer structure. */
95#ifdef RT_USE_LINUX_HRTIMER
96 struct hrtimer LnxTimer;
97#else
98 struct timer_list LnxTimer;
99#endif
100 /** The start of the current run (ns).
101 * This is used to calculate when the timer ought to fire the next time. */
102 uint64_t u64StartTS;
103 /** The start of the current run (ns).
104 * This is used to calculate when the timer ought to fire the next time. */
105 uint64_t u64NextTS;
106 /** The current tick number (since u64StartTS). */
107 uint64_t iTick;
108 /** Pointer to the parent timer. */
109 PRTTIMER pParent;
110#ifndef RT_USE_LINUX_HRTIMER
111 /** The u64NextTS in jiffies. */
112 unsigned long ulNextJiffies;
113#endif
114 /** The current sub-timer state. */
115 RTTIMERLNXSTATE volatile enmState;
116} RTTIMERLNXSUBTIMER;
117/** Pointer to a linux sub-timer. */
118typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
119AssertCompileMemberOffset(RTTIMERLNXSUBTIMER, LnxTimer, 0);
120
121
122/**
123 * The internal representation of an Linux timer handle.
124 */
125typedef struct RTTIMER
126{
127 /** Magic.
128 * This is RTTIMER_MAGIC, but changes to something else before the timer
129 * is destroyed to indicate clearly that thread should exit. */
130 uint32_t volatile u32Magic;
131 /** Spinlock synchronizing the fSuspended and MP event handling.
132 * This is NIL_RTSPINLOCK if cCpus == 1. */
133 RTSPINLOCK hSpinlock;
134 /** Flag indicating the the timer is suspended. */
135 bool volatile fSuspended;
136 /** Whether the timer must run on one specific CPU or not. */
137 bool fSpecificCpu;
138#ifdef CONFIG_SMP
139 /** Whether the timer must run on all CPUs or not. */
140 bool fAllCpus;
141#endif /* else: All -> specific on non-SMP kernels */
142 /** The CPU it must run on if fSpecificCpu is set. */
143 RTCPUID idCpu;
144 /** The number of CPUs this timer should run on. */
145 RTCPUID cCpus;
146 /** Callback. */
147 PFNRTTIMER pfnTimer;
148 /** User argument. */
149 void *pvUser;
150 /** The timer interval. 0 if one-shot. */
151 uint64_t u64NanoInterval;
152#ifndef RT_USE_LINUX_HRTIMER
153 /** This is set to the number of jiffies between ticks if the interval is
154 * an exact number of jiffies. */
155 unsigned long cJiffies;
156#endif
157 /** Sub-timers.
158 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
159 * an entry for all possible cpus. In that case the index will be the same as
160 * for the RTCpuSet. */
161 RTTIMERLNXSUBTIMER aSubTimers[1];
162} RTTIMER;
163
164
165/**
166 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
167 */
168typedef struct RTTIMERLINUXSTARTONCPUARGS
169{
170 /** The current time (RTTimeNanoTS). */
171 uint64_t u64Now;
172 /** When to start firing (delta). */
173 uint64_t u64First;
174} RTTIMERLINUXSTARTONCPUARGS;
175/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
176typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
177
178
179/**
180 * Sets the state.
181 */
182DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
183{
184 ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
185}
186
187
188/**
189 * Sets the state if it has a certain value.
190 *
191 * @return true if xchg was done.
192 * @return false if xchg wasn't done.
193 */
194DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState, RTTIMERLNXSTATE enmCurState)
195{
196 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
197}
198
199
200/**
201 * Gets the state.
202 */
203DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
204{
205 return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
206}
207
208
209#ifdef RT_USE_LINUX_HRTIMER
210/**
211 * Converts a nano second time stamp to ktime_t.
212 *
213 * ASSUMES RTTimeNanoTS() is implemented using ktime_get_ts().
214 *
215 * @returns ktime_t.
216 * @param cNanoSecs Nanoseconds.
217 */
218DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
219{
220 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
221 return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
222}
223
224/**
225 * Converts ktime_t to a nano second time stamp.
226 *
227 * ASSUMES RTTimeNanoTS() is implemented using ktime_get_ts().
228 *
229 * @returns nano second time stamp.
230 * @param Kt ktime_t.
231 */
232DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
233{
234 return ktime_to_ns(Kt);
235}
236
237#else /* ! RT_USE_LINUX_HRTIMER */
238
239/**
240 * Converts a nano second interval to jiffies.
241 *
242 * @returns Jiffies.
243 * @param cNanoSecs Nanoseconds.
244 */
245DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
246{
247 /* this can be made even better... */
248 if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
249 return MAX_JIFFY_OFFSET;
250#if ARCH_BITS == 32
251 if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
252 return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
253#endif
254 return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
255}
256#endif
257
258
259/**
260 * Starts a sub-timer (RTTimerStart).
261 *
262 * @param pSubTimer The sub-timer to start.
263 * @param u64Now The current timestamp (RTTimeNanoTS()).
264 * @param u64First The interval from u64Now to the first time the timer should fire.
265 */
266static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First)
267{
268 /*
269 * Calc when it should start firing.
270 */
271 uint64_t u64NextTS = u64Now + u64First;
272 pSubTimer->u64StartTS = u64NextTS;
273 pSubTimer->u64NextTS = u64NextTS;
274 pSubTimer->iTick = 0;
275
276#ifdef RT_USE_LINUX_HRTIMER
277 hrtimer_start(&pSubTimer->LnxTimer, rtTimerLnxNanoToKt(u64NextTS), HRTIMER_MODE_ABS);
278#else
279 {
280 unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
281 pSubTimer->ulNextJiffies = jiffies + cJiffies;
282 mod_timer(&pSubTimer->LnxTimer, pSubTimer->ulNextJiffies);
283 }
284#endif
285
286 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE);
287}
288
289
290/**
291 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
292 *
293 * @param pSubTimer The sub-timer.
294 */
295static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer)
296{
297#ifdef RT_USE_LINUX_HRTIMER
298 hrtimer_cancel(&pSubTimer->LnxTimer);
299#else
300 if (timer_pending(&pSubTimer->LnxTimer))
301 del_timer_sync(&pSubTimer->LnxTimer);
302#endif
303
304 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
305}
306
307
308#ifdef RT_USE_LINUX_HRTIMER
309/**
310 * Timer callback function.
311 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
312 * @param pHrTimer Pointer to the sub-timer structure.
313 */
314static enum hrtimer_restart rtTimerLinuxCallback(struct hrtimer *pHrTimer)
315#else
316/**
317 * Timer callback function.
318 * @param ulUser Address of the sub-timer structure.
319 */
320static void rtTimerLinuxCallback(unsigned long ulUser)
321#endif
322{
323#ifdef RT_USE_LINUX_HRTIMER
324 enum hrtimer_restart rc;
325 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)pHrTimer;
326#else
327 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
328#endif
329 PRTTIMER pTimer = pSubTimer->pParent;
330
331 /*
332 * Don't call the handler if the timer has been suspended.
333 * Also, when running on all CPUS, make sure we don't call out twice
334 * on a CPU because of timer migration.
335 *
336 * For the specific cpu case, we're just ignoring timer migration for now... (bad)
337 */
338 if ( ASMAtomicUoReadBool(&pTimer->fSuspended)
339#ifdef CONFIG_SMP
340 || ( pTimer->fAllCpus
341 && (pSubTimer - &pTimer->aSubTimers[0]) != RTMpCpuId())
342#endif
343 )
344 {
345 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_ACTIVE);
346# ifdef RT_USE_LINUX_HRTIMER
347 rc = HRTIMER_NORESTART;
348# endif
349 }
350 else if (!pTimer->u64NanoInterval)
351 {
352 /*
353 * One shot timer, stop it before dispatching it.
354 */
355 if (pTimer->cCpus == 1)
356 ASMAtomicWriteBool(&pTimer->fSuspended, true);
357 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_ACTIVE);
358#ifdef RT_USE_LINUX_HRTIMER
359 rc = HRTIMER_NORESTART;
360#else
361 /* detached before we're called, nothing to do for this case. */
362#endif
363
364 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
365 }
366 else
367 {
368 /*
369 * Interval timer, calculate the next timeout and re-arm it.
370 *
371 * The first time around, we'll re-adjust the u64StartTS to
372 * try prevent some jittering if we were started at a bad time.
373 * This may of course backfire with highres timers...
374 */
375 const uint64_t u64NanoTS = RTTimeNanoTS();
376 const uint64_t iTick = ++pSubTimer->iTick;
377
378 if (RT_UNLIKELY(iTick == 1))
379 {
380#ifdef RT_USE_LINUX_HRTIMER
381 pSubTimer->u64StartTS = pSubTimer->u64NextTS = u64NanoTS;//rtTimerLnxKtToNano(pSubTimer->LnxTimer.base->softirq_time);
382#else
383 pSubTimer->u64StartTS = pSubTimer->u64NextTS = u64NanoTS;
384 pSubTimer->ulNextJiffies = jiffies;
385#endif
386 }
387
388 pSubTimer->u64NextTS += pTimer->u64NanoInterval;
389
390#ifdef RT_USE_LINUX_HRTIMER
391 while (pSubTimer->u64NextTS < u64NanoTS)
392 pSubTimer->u64NextTS += pTimer->u64NanoInterval;
393
394 pSubTimer->LnxTimer.expires = rtTimerLnxNanoToKt(pSubTimer->u64NextTS);
395 rc = HRTIMER_RESTART;
396#else
397 if (pTimer->cJiffies)
398 {
399 pSubTimer->ulNextJiffies += pTimer->cJiffies;
400 while (pSubTimer->ulNextJiffies < jiffies)
401 {
402 pSubTimer->ulNextJiffies += pTimer->cJiffies;
403 pSubTimer->u64NextTS += pTimer->u64NanoInterval;
404 }
405 }
406 else
407 {
408 while (pSubTimer->u64NextTS < u64NanoTS)
409 pSubTimer->u64NextTS += pTimer->u64NanoInterval;
410 pSubTimer->ulNextJiffies = jiffies + rtTimerLnxNanoToJiffies(pSubTimer->u64NextTS - u64NanoTS);
411 }
412
413 mod_timer(&pSubTimer->LnxTimer, pSubTimer->ulNextJiffies);
414#endif
415
416 /*
417 * Run the timer.
418 */
419 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
420 }
421
422#ifdef RT_USE_LINUX_HRTIMER
423 return rc;
424#endif
425}
426
427
428#ifdef CONFIG_SMP
429
430/**
431 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
432 *
433 * @param idCpu The current CPU.
434 * @param pvUser1 Pointer to the timer.
435 * @param pvUser2 Pointer to the argument structure.
436 */
437static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
438{
439 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
440 PRTTIMER pTimer = (PRTTIMER)pvUser1;
441 Assert(idCpu < pTimer->cCpus);
442 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First);
443}
444
445
446/**
447 * Worker for RTTimerStart() that takes care of the ugly bit.s
448 *
449 * @returns RTTimerStart() return value.
450 * @param pTimer The timer.
451 * @param pArgs The argument structure.
452 */
453static int rtTimerLnxStartAll(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
454{
455 RTSPINLOCKTMP Tmp;
456 RTCPUID iCpu;
457 RTCPUSET OnlineSet;
458 RTCPUSET OnlineSet2;
459 int rc2;
460
461 /*
462 * Prepare all the sub-timers for the startup and then flag the timer
463 * as a whole as non-suspended, make sure we get them all before
464 * clearing fSuspended as the MP handler will be waiting on this
465 * should something happen while we're looping.
466 */
467 RTSpinlockAcquire(pTimer->hSpinlock, &Tmp);
468
469 do
470 {
471 RTMpGetOnlineSet(&OnlineSet);
472 for (iCpu = 0; iCpu <= pTimer->cCpus; iCpu++)
473 {
474 Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
475 rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
476 RTCpuSetIsMember(&OnlineSet, iCpu)
477 ? RTTIMERLNXSTATE_STARTING
478 : RTTIMERLNXSTATE_STOPPED);
479 }
480 } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
481
482 ASMAtomicWriteBool(&pTimer->fSuspended, false);
483
484 RTSpinlockRelease(pTimer->hSpinlock, &Tmp);
485
486 /*
487 * Start them (can't find any exported function that allows me to
488 * do this without the cross calls).
489 */
490 pArgs->u64Now = RTTimeNanoTS();
491 rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
492 AssertRC(rc2); /* screw this if it fails. */
493
494 /*
495 * Reset the sub-timers who didn't start up (ALL CPUs case).
496 * CPUs that comes online between the
497 */
498 RTSpinlockAcquire(pTimer->hSpinlock, &Tmp);
499
500 for (iCpu = 0; iCpu <= pTimer->cCpus; iCpu++)
501 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
502 {
503 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
504 * we were between calls needs to nudged as the MP handler will ignore events for
505 * them because of the STARTING state. This is an extremely unlikely case - not that
506 * that means anything in my experience... ;-) */
507 }
508
509 RTSpinlockRelease(pTimer->hSpinlock, &Tmp);
510
511 return VINF_SUCCESS;
512}
513
514
515/**
516 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
517 *
518 * @returns RTTimerStop() return value.
519 * @param pTimer The timer (valid).
520 */
521static int rtTimerLnxStopAll(PRTTIMER pTimer)
522{
523 RTCPUID iCpu;
524 RTSPINLOCKTMP Tmp;
525
526
527 /*
528 * Mark the timer as suspended and flag all timers as stopping, except
529 * for those being stopped by an MP event.
530 */
531 RTSpinlockAcquire(pTimer->hSpinlock, &Tmp);
532
533 ASMAtomicWriteBool(&pTimer->fSuspended, true);
534 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
535 {
536 RTTIMERLNXSTATE enmState;
537 do
538 {
539 enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
540 if ( enmState == RTTIMERLNXSTATE_STOPPED
541 || enmState == RTTIMERLNXSTATE_MP_STOPPING)
542 break;
543 Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
544 } while (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState));
545 }
546
547 RTSpinlockRelease(pTimer->hSpinlock, &Tmp);
548
549 /*
550 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
551 * Unfortunately it cannot be done synchronously from within the spinlock,
552 * because we might end up in an active waiting for a handler to complete.
553 */
554 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
555 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
556 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu]);
557
558 return VINF_SUCCESS;
559}
560
561
562/**
563 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
564 * to start a sub-timer on a cpu that just have come online.
565 *
566 * @param idCpu The current CPU.
567 * @param pvUser1 Pointer to the timer.
568 * @param pvUser2 Pointer to the argument structure.
569 */
570static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
571{
572 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
573 PRTTIMER pTimer = (PRTTIMER)pvUser1;
574 RTSPINLOCK hSpinlock;
575 Assert(idCpu < pTimer->cCpus);
576
577 /*
578 * We have to be kind of careful here as we might be racing RTTimerStop
579 * (and/or RTTimerDestroy, thus the paranoia.
580 */
581 hSpinlock = pTimer->hSpinlock;
582 if ( hSpinlock != NIL_RTSPINLOCK
583 && pTimer->u32Magic == RTTIMER_MAGIC)
584 {
585 RTSPINLOCKTMP Tmp;
586 RTSpinlockAcquire(hSpinlock, &Tmp);
587
588 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
589 && pTimer->u32Magic == RTTIMER_MAGIC)
590 {
591 /* We're sane and the timer is not suspended yet. */
592 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
593 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
594 rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First);
595 }
596
597 RTSpinlockRelease(hSpinlock, &Tmp);
598 }
599}
600
601
602/**
603 * MP event notification callback.
604 *
605 * @param enmEvent The event.
606 * @param idCpu The cpu it applies to.
607 * @param pvUser The timer.
608 */
609static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
610{
611 PRTTIMER pTimer = (PRTTIMER)pvUser;
612 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
613 RTSPINLOCK hSpinlock;
614 RTSPINLOCKTMP Tmp;
615
616 Assert(idCpu < pTimer->cCpus);
617
618 /*
619 * Some initial paranoia.
620 */
621 if (pTimer->u32Magic != RTTIMER_MAGIC)
622 return;
623 hSpinlock = pTimer->hSpinlock;
624 if (hSpinlock == NIL_RTSPINLOCK)
625 return;
626
627 RTSpinlockAcquire(hSpinlock, &Tmp);
628
629 /* Is it active? */
630 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
631 && pTimer->u32Magic == RTTIMER_MAGIC)
632 {
633 switch (enmEvent)
634 {
635 /*
636 * Try do it without leaving the spin lock, but if we have to, retake it
637 * when we're on the right cpu.
638 */
639 case RTMPEVENT_ONLINE:
640 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
641 {
642 RTTIMERLINUXSTARTONCPUARGS Args;
643 Args.u64Now = RTTimeNanoTS();
644 Args.u64First = 0;
645
646 if (RTMpCpuId() == idCpu)
647 rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First);
648 else
649 {
650 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
651 RTSpinlockRelease(hSpinlock, &Tmp);
652
653 RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
654 return; /* we've left the spinlock */
655 }
656 }
657 break;
658
659 /*
660 * The CPU is (going) offline, make sure the sub-timer is stopped.
661 *
662 * Linux will migrate it to a different CPU, but we don't want this. The
663 * timer function is checking for this.
664 */
665 case RTMPEVENT_OFFLINE:
666 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
667 {
668 RTSpinlockRelease(hSpinlock, &Tmp);
669
670 rtTimerLnxStopSubTimer(pSubTimer);
671 return; /* we've left the spinlock */
672 }
673 break;
674 }
675 }
676
677 RTSpinlockRelease(hSpinlock, &Tmp);
678}
679
680#endif /* CONFIG_SMP */
681
682
683/**
684 * Callback function use by RTTimerStart via RTMpOnSpecific to start
685 * a timer running on a specific CPU.
686 *
687 * @param idCpu The current CPU.
688 * @param pvUser1 Pointer to the timer.
689 * @param pvUser2 Pointer to the argument structure.
690 */
691static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
692{
693 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
694 PRTTIMER pTimer = (PRTTIMER)pvUser1;
695 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First);
696}
697
698
699RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
700{
701 RTTIMERLINUXSTARTONCPUARGS Args;
702 int rc2;
703
704 /*
705 * Validate.
706 */
707 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
708 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
709
710 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
711 return VERR_TIMER_ACTIVE;
712
713 Args.u64First = u64First;
714#ifdef CONFIG_SMP
715 /*
716 * Omnit timer?
717 */
718 if (pTimer->fAllCpus)
719 return rtTimerLnxStartAll(pTimer, &Args);
720#endif
721
722 /*
723 * Simple timer - Pretty straight forward.
724 */
725 Args.u64Now = RTTimeNanoTS();
726 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING);
727 ASMAtomicWriteBool(&pTimer->fSuspended, false);
728 if (!pTimer->fSpecificCpu)
729 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First);
730 else
731 {
732 rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
733 if (RT_FAILURE(rc2))
734 {
735 /* Suspend it, the cpu id is probably invalid or offline. */
736 ASMAtomicWriteBool(&pTimer->fSuspended, true);
737 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
738 return rc2;
739 }
740 }
741
742 return VINF_SUCCESS;
743}
744
745
746RTDECL(int) RTTimerStop(PRTTIMER pTimer)
747{
748
749 /*
750 * Validate.
751 */
752 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
753 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
754
755 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
756 return VERR_TIMER_SUSPENDED;
757
758#ifdef CONFIG_SMP
759 /*
760 * Omni timer?
761 */
762 if (pTimer->fAllCpus)
763 return rtTimerLnxStopAll(pTimer);
764#endif
765
766 /*
767 * Simple timer.
768 */
769 ASMAtomicWriteBool(&pTimer->fSuspended, true);
770 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING);
771 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0]);
772
773 return VINF_SUCCESS;
774}
775
776
777RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
778{
779 RTSPINLOCK hSpinlock;
780
781 /* It's ok to pass NULL pointer. */
782 if (pTimer == /*NIL_RTTIMER*/ NULL)
783 return VINF_SUCCESS;
784 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
785 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
786
787 /*
788 * Remove the MP notifications first because it'll reduce the risk of
789 * us overtaking any MP event that might theoretically be racing us here.
790 */
791 hSpinlock = pTimer->hSpinlock;
792#ifdef CONFIG_SMP
793 if ( pTimer->cCpus > 1
794 && hSpinlock != NIL_RTSPINLOCK)
795 {
796 int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
797 AssertRC(rc);
798 }
799#endif /* CONFIG_SMP */
800
801 /*
802 * Stop the timer if it's running.
803 */
804 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
805 RTTimerStop(pTimer);
806
807 /*
808 * Uninitialize the structure and free the associated resources.
809 * The spinlock goes last.
810 */
811 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
812 RTMemFree(pTimer);
813 if (hSpinlock != NIL_RTSPINLOCK)
814 RTSpinlockDestroy(hSpinlock);
815
816 return VINF_SUCCESS;
817}
818
819
820RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, unsigned fFlags, PFNRTTIMER pfnTimer, void *pvUser)
821{
822 PRTTIMER pTimer;
823 RTCPUID iCpu;
824 unsigned cCpus;
825
826 *ppTimer = NULL;
827
828 /*
829 * Validate flags.
830 */
831 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
832 return VERR_INVALID_PARAMETER;
833 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
834 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
835 && !RTMpIsCpuOnline(fFlags & RTTIMER_FLAGS_CPU_MASK))
836 return (fFlags & RTTIMER_FLAGS_CPU_MASK) > RTMpGetMaxCpuId()
837 ? VERR_CPU_NOT_FOUND
838 : VERR_CPU_OFFLINE;
839
840 /*
841 * Allocate the timer handler.
842 */
843 cCpus = 1;
844#ifdef CONFIG_SMP
845 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
846 {
847 cCpus = RTMpGetMaxCpuId() + 1;
848 Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
849 AssertReturn(u64NanoInterval, VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
850 }
851#endif
852
853 pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cCpus]));
854 if (!pTimer)
855 return VERR_NO_MEMORY;
856
857 /*
858 * Initialize it.
859 */
860 pTimer->u32Magic = RTTIMER_MAGIC;
861 pTimer->hSpinlock = NIL_RTSPINLOCK;
862 pTimer->fSuspended = true;
863#ifdef CONFIG_SMP
864 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
865 pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
866 pTimer->idCpu = fFlags & RTTIMER_FLAGS_CPU_MASK;
867#else
868 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
869 pTimer->idCpu = RTMpCpuId();
870#endif
871 pTimer->cCpus = cCpus;
872 pTimer->pfnTimer = pfnTimer;
873 pTimer->pvUser = pvUser;
874 pTimer->u64NanoInterval = u64NanoInterval;
875#ifndef RT_USE_LINUX_HRTIMER
876 pTimer->cJiffies = u64NanoInterval / RTTimerGetSystemGranularity();
877 if (pTimer->cJiffies * RTTimerGetSystemGranularity() != u64NanoInterval)
878 pTimer->cJiffies = 0;
879#endif
880
881 for (iCpu = 0; iCpu < cCpus; iCpu++)
882 {
883#ifdef RT_USE_LINUX_HRTIMER
884 hrtimer_init(&pTimer->aSubTimers[iCpu].LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
885 pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
886#else
887 init_timer(&pTimer->aSubTimers[iCpu].LnxTimer);
888 pTimer->aSubTimers[iCpu].LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
889 pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
890 pTimer->aSubTimers[iCpu].LnxTimer.expires = jiffies;
891#endif
892 pTimer->aSubTimers[iCpu].u64StartTS = 0;
893 pTimer->aSubTimers[iCpu].u64NextTS = 0;
894 pTimer->aSubTimers[iCpu].iTick = 0;
895 pTimer->aSubTimers[iCpu].pParent = pTimer;
896 pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
897 }
898
899#ifdef CONFIG_SMP
900 /*
901 * If this is running on ALL cpus, we'll have to register a callback
902 * for MP events (so timers can be started/stopped on cpus going
903 * online/offline). We also create the spinlock for syncrhonizing
904 * stop/start/mp-event.
905 */
906 if (cCpus > 1)
907 {
908 int rc = RTSpinlockCreate(&pTimer->hSpinlock);
909 if (RT_SUCCESS(rc))
910 rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
911 else
912 pTimer->hSpinlock = NIL_RTSPINLOCK;
913 if (RT_FAILURE(rc))
914 {
915 RTTimerDestroy(pTimer);
916 return rc;
917 }
918 }
919#endif /* CONFIG_SMP */
920
921 *ppTimer = pTimer;
922 return VINF_SUCCESS;
923}
924
925
926RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
927{
928#ifdef RT_USE_LINUX_HRTIMER
929 struct timespec Ts;
930 int rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
931 if (!rc)
932 {
933 Assert(!Ts.tv_sec);
934 return Ts.tv_nsec;
935 }
936#endif
937 return 1000000000 / HZ; /* ns */
938}
939
940
941RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
942{
943 return VERR_NOT_SUPPORTED;
944}
945
946
947RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
948{
949 return VERR_NOT_SUPPORTED;
950}
951
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette