VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c@ 73265

Last change on this file since 73265 was 73097, checked in by vboxsync, 6 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.6 KB
Line 
1/* $Id: timer-r0drv-solaris.c 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * IPRT - Timer, Ring-0 Driver, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/timer.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/mp.h>
43#include <iprt/spinlock.h>
44#include <iprt/time.h>
45#include <iprt/thread.h>
46#include "internal/magics.h"
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52/**
53 * The internal representation of a Solaris timer handle.
54 */
55typedef struct RTTIMER
56{
57 /** Magic.
58 * This is RTTIMER_MAGIC, but changes to something else before the timer
59 * is destroyed to indicate clearly that thread should exit. */
60 uint32_t volatile u32Magic;
61 /** Reference counter. */
62 uint32_t volatile cRefs;
63 /** Flag indicating that the timer is suspended (hCyclicId should be
64 * CYCLIC_NONE). */
65 bool volatile fSuspended;
66 /** Flag indicating that the timer was suspended from the timer callback and
67 * therefore the hCyclicId may still be valid. */
68 bool volatile fSuspendedFromTimer;
69 /** Flag indicating that the timer interval was changed and that it requires
70 * manual expiration time programming for each callout. */
71 bool volatile fIntervalChanged;
72 /** Whether the timer must run on all CPUs or not. */
73 uint8_t fAllCpus;
74 /** Whether the timer must run on a specific CPU or not. */
75 uint8_t fSpecificCpu;
76 /** The CPU it must run on if fSpecificCpu is set. */
77 uint32_t iCpu;
78 /** The nano second interval for repeating timers. */
79 uint64_t volatile cNsInterval;
80 /** Cyclic timer Id. This is CYCLIC_NONE if no active timer.
81 * @remarks Please keep in mind that cyclic may call us back before the
82 * cyclic_add/cyclic_add_omni functions returns, so don't use this
83 * unguarded with cyclic_reprogram. */
84 cyclic_id_t hCyclicId;
85 /** The user callback. */
86 PFNRTTIMER pfnTimer;
87 /** The argument for the user callback. */
88 void *pvUser;
89 /** Union with timer type specific data. */
90 union
91 {
92 /** Single timer (fAllCpus == false). */
93 struct
94 {
95 /** Timer ticks. */
96 uint64_t u64Tick;
97 /** The next tick when fIntervalChanged is true, otherwise 0. */
98 uint64_t nsNextTick;
99 /** The (interrupt) thread currently active in the callback. */
100 kthread_t * volatile pActiveThread;
101 } Single;
102
103 /** Omni timer (fAllCpus == true). */
104 struct
105 {
106 /** Absolute timestamp of when the timer should fire first when starting up. */
107 uint64_t u64When;
108 /** Array of per CPU data (variable size). */
109 struct
110 {
111 /** Timer ticks (reinitialized when online'd). */
112 uint64_t u64Tick;
113 /** The (interrupt) thread currently active in the callback. */
114 kthread_t * volatile pActiveThread;
115 /** The next tick when fIntervalChanged is true, otherwise 0. */
116 uint64_t nsNextTick;
117 } aPerCpu[1];
118 } Omni;
119 } u;
120} RTTIMER;
121
122
123/*********************************************************************************************************************************
124* Defined Constants And Macros *
125*********************************************************************************************************************************/
126/** Validates that the timer is valid. */
127#define RTTIMER_ASSERT_VALID_RET(pTimer) \
128 do \
129 { \
130 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
131 AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
132 VERR_INVALID_HANDLE); \
133 } while (0)
134
135
136/*********************************************************************************************************************************
137* Internal Functions *
138*********************************************************************************************************************************/
139static void rtTimerSolSingleCallbackWrapper(void *pvArg);
140static void rtTimerSolStopIt(PRTTIMER pTimer);
141
142
143/**
144 * Retains a reference to the timer.
145 *
146 * @returns New reference counter value.
147 * @param pTimer The timer.
148 */
149DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
150{
151 return ASMAtomicIncU32(&pTimer->cRefs);
152}
153
154
155/**
156 * Destroys the timer when the reference counter has reached zero.
157 *
158 * @returns 0 (new references counter value).
159 * @param pTimer The timer.
160 */
161static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
162{
163 Assert(pTimer->hCyclicId == CYCLIC_NONE);
164 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
165 RTMemFree(pTimer);
166 return 0;
167}
168
169
170/**
171 * Releases a reference to the timer.
172 *
173 * @returns New reference counter value.
174 * @param pTimer The timer.
175 */
176DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
177{
178 uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
179 if (!cRefs)
180 return rtTimeSolReleaseCleanup(pTimer);
181 return cRefs;
182}
183
184
185/**
186 * Callback wrapper for single-CPU timers.
187 *
188 * @param pvArg Opaque pointer to the timer.
189 *
190 * @remarks This will be executed in interrupt context but only at the specified
191 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
192 * cyclic subsystem here, neither should pfnTimer().
193 */
194static void rtTimerSolSingleCallbackWrapper(void *pvArg)
195{
196 PRTTIMER pTimer = (PRTTIMER)pvArg;
197 AssertPtrReturnVoid(pTimer);
198 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
199 Assert(!pTimer->fAllCpus);
200
201 /* Make sure one-shots do not fire another time. */
202 Assert( !pTimer->fSuspended
203 || pTimer->cNsInterval != 0);
204
205 if (!pTimer->fSuspendedFromTimer)
206 {
207 /* Make sure we are firing on the right CPU. */
208 Assert( !pTimer->fSpecificCpu
209 || pTimer->iCpu == RTMpCpuId());
210
211 /* For one-shot, we may allow the callback to restart them. */
212 if (pTimer->cNsInterval == 0)
213 pTimer->fSuspendedFromTimer = true;
214
215 /*
216 * Perform the callout.
217 */
218 pTimer->u.Single.pActiveThread = curthread;
219
220 uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
221 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
222
223 pTimer->u.Single.pActiveThread = NULL;
224
225 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
226 {
227 if ( !pTimer->fIntervalChanged
228 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
229 return;
230
231 /*
232 * The interval was changed, we need to set the expiration time
233 * ourselves before returning. This comes at a slight cost,
234 * which is why we don't do it all the time.
235 */
236 if (pTimer->u.Single.nsNextTick)
237 pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
238 else
239 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
240 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
241 return;
242 }
243
244 /*
245 * The timer has been suspended, set expiration time to infinitiy.
246 */
247 }
248 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
249 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
250}
251
252
253/**
254 * Callback wrapper for Omni-CPU timers.
255 *
256 * @param pvArg Opaque pointer to the timer.
257 *
258 * @remarks This will be executed in interrupt context but only at the specified
259 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
260 * cyclic subsystem here, neither should pfnTimer().
261 */
262static void rtTimerSolOmniCallbackWrapper(void *pvArg)
263{
264 PRTTIMER pTimer = (PRTTIMER)pvArg;
265 AssertPtrReturnVoid(pTimer);
266 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
267 Assert(pTimer->fAllCpus);
268
269 if (!pTimer->fSuspendedFromTimer)
270 {
271 /*
272 * Perform the callout.
273 */
274 uint32_t const iCpu = CPU->cpu_id;
275
276 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
277 uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;
278
279 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
280
281 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;
282
283 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
284 {
285 if ( !pTimer->fIntervalChanged
286 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
287 return;
288
289 /*
290 * The interval was changed, we need to set the expiration time
291 * ourselves before returning. This comes at a slight cost,
292 * which is why we don't do it all the time.
293 *
294 * Note! The cyclic_reprogram call only affects the omni cyclic
295 * component for this CPU.
296 */
297 if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
298 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
299 else
300 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
301 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
302 return;
303 }
304
305 /*
306 * The timer has been suspended, set expiration time to infinitiy.
307 */
308 }
309 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
310 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
311}
312
313
314/**
315 * Omni-CPU cyclic online event. This is called before the omni cycle begins to
316 * fire on the specified CPU.
317 *
318 * @param pvArg Opaque pointer to the timer.
319 * @param pCpu Pointer to the CPU on which it will fire.
320 * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
321 * specified in @a pCpu.
322 * @param pCyclicTime Pointer to the cyclic time and interval object.
323 *
324 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
325 * block (sleep).
326 */
327static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
328{
329 PRTTIMER pTimer = (PRTTIMER)pvArg;
330 AssertPtrReturnVoid(pTimer);
331 AssertPtrReturnVoid(pCpu);
332 AssertPtrReturnVoid(pCyclicHandler);
333 AssertPtrReturnVoid(pCyclicTime);
334 uint32_t const iCpu = pCpu->cpu_id; /* Note! CPU is not necessarily the same as pCpu. */
335
336 pTimer->u.Omni.aPerCpu[iCpu].u64Tick = 0;
337 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = 0;
338
339 pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
340 pCyclicHandler->cyh_arg = pTimer;
341 pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
342
343 uint64_t u64Now = RTTimeSystemNanoTS();
344 if (pTimer->u.Omni.u64When < u64Now)
345 pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
346 else
347 pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
348
349 pCyclicTime->cyt_interval = pTimer->cNsInterval;
350}
351
352
353RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
354{
355 RT_ASSERT_PREEMPTIBLE();
356 *ppTimer = NULL;
357
358 /*
359 * Validate flags.
360 */
361 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
362 return VERR_INVALID_PARAMETER;
363
364 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
365 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
366 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
367 return VERR_CPU_NOT_FOUND;
368
369 /* One-shot omni timers are not supported by the cyclic system. */
370 if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
371 && u64NanoInterval == 0)
372 return VERR_NOT_SUPPORTED;
373
374 /*
375 * Allocate and initialize the timer handle. The omni variant has a
376 * variable sized array of ticks counts, thus the size calculation.
377 */
378 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
379 ? RT_UOFFSETOF_DYN(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()])
380 : sizeof(RTTIMER));
381 if (!pTimer)
382 return VERR_NO_MEMORY;
383
384 pTimer->u32Magic = RTTIMER_MAGIC;
385 pTimer->cRefs = 1;
386 pTimer->fSuspended = true;
387 pTimer->fSuspendedFromTimer = false;
388 pTimer->fIntervalChanged = false;
389 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
390 {
391 pTimer->fAllCpus = true;
392 pTimer->fSpecificCpu = false;
393 pTimer->iCpu = UINT32_MAX;
394 }
395 else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
396 {
397 pTimer->fAllCpus = false;
398 pTimer->fSpecificCpu = true;
399 pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
400 }
401 else
402 {
403 pTimer->fAllCpus = false;
404 pTimer->fSpecificCpu = false;
405 pTimer->iCpu = UINT32_MAX;
406 }
407 pTimer->cNsInterval = u64NanoInterval;
408 pTimer->pfnTimer = pfnTimer;
409 pTimer->pvUser = pvUser;
410 pTimer->hCyclicId = CYCLIC_NONE;
411
412 *ppTimer = pTimer;
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Checks if the calling thread is currently executing the timer proceduce for
419 * the given timer.
420 *
421 * @returns true if it is, false if it isn't.
422 * @param pTimer The timer in question.
423 */
424DECLINLINE(bool) rtTimerSolIsCallingFromTimerProc(PRTTIMER pTimer)
425{
426 kthread_t *pCurThread = curthread;
427 AssertReturn(pCurThread, false); /* serious paranoia */
428
429 if (!pTimer->fAllCpus)
430 return pTimer->u.Single.pActiveThread == pCurThread;
431 return pTimer->u.Omni.aPerCpu[CPU->cpu_id].pActiveThread == pCurThread;
432}
433
434
435RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
436{
437 if (pTimer == NULL)
438 return VINF_SUCCESS;
439 RTTIMER_ASSERT_VALID_RET(pTimer);
440 RT_ASSERT_INTS_ON();
441
442 /*
443 * It is not possible to destroy a timer from it's callback function.
444 * Cyclic makes that impossible (or at least extremely risky).
445 */
446 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
447
448 /*
449 * Invalidate the handle, make sure it's stopped and free the associated resources.
450 */
451 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
452
453 if ( !pTimer->fSuspended
454 || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */
455 rtTimerSolStopIt(pTimer);
456
457 rtTimerSolRelease(pTimer);
458 return VINF_SUCCESS;
459}
460
461
462RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
463{
464 RTTIMER_ASSERT_VALID_RET(pTimer);
465 RT_ASSERT_INTS_ON();
466
467 /*
468 * It's not possible to restart a one-shot time from it's callback function,
469 * at least not at the moment.
470 */
471 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
472
473 mutex_enter(&cpu_lock);
474
475 /*
476 * Make sure it's not active already. If it was suspended from a timer
477 * callback function, we need to do some cleanup work here before we can
478 * restart the timer.
479 */
480 if (!pTimer->fSuspended)
481 {
482 if (!pTimer->fSuspendedFromTimer)
483 {
484 mutex_exit(&cpu_lock);
485 return VERR_TIMER_ACTIVE;
486 }
487 cyclic_remove(pTimer->hCyclicId);
488 pTimer->hCyclicId = CYCLIC_NONE;
489 }
490
491 pTimer->fSuspended = false;
492 pTimer->fSuspendedFromTimer = false;
493 pTimer->fIntervalChanged = false;
494 if (pTimer->fAllCpus)
495 {
496 /*
497 * Setup omni (all CPU) timer. The Omni-CPU online event will fire
498 * and from there we setup periodic timers per CPU.
499 */
500 pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);
501
502 cyc_omni_handler_t HandlerOmni;
503 HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
504 HandlerOmni.cyo_offline = NULL;
505 HandlerOmni.cyo_arg = pTimer;
506
507 pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
508 }
509 else
510 {
511 cyc_handler_t Handler;
512 cyc_time_t FireTime;
513
514 /*
515 * Setup a single CPU timer. If a specific CPU was requested, it
516 * must be online or the timer cannot start.
517 */
518 if ( pTimer->fSpecificCpu
519 && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
520 {
521 pTimer->fSuspended = true;
522
523 mutex_exit(&cpu_lock);
524 return VERR_CPU_OFFLINE;
525 }
526
527 Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
528 Handler.cyh_arg = pTimer;
529 Handler.cyh_level = CY_LOCK_LEVEL;
530
531 /*
532 * Use a large interval (1 hour) so that we don't get a timer-callback between
533 * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done.
534 * See @bugref{7691#c20}.
535 */
536 if (!pTimer->fSpecificCpu)
537 FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
538 else
539 FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR;
540 FireTime.cyt_interval = pTimer->cNsInterval != 0
541 ? pTimer->cNsInterval
542 : CY_INFINITY /* Special value, see cyclic_fire(). */;
543 pTimer->u.Single.u64Tick = 0;
544 pTimer->u.Single.nsNextTick = 0;
545
546 pTimer->hCyclicId = cyclic_add(&Handler, &FireTime);
547 if (pTimer->fSpecificCpu)
548 {
549 cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
550 cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First);
551 }
552 }
553
554 mutex_exit(&cpu_lock);
555 return VINF_SUCCESS;
556}
557
558
559/**
560 * Worker common for RTTimerStop and RTTimerDestroy.
561 *
562 * @param pTimer The timer to stop.
563 */
564static void rtTimerSolStopIt(PRTTIMER pTimer)
565{
566 mutex_enter(&cpu_lock);
567
568 pTimer->fSuspended = true;
569 if (pTimer->hCyclicId != CYCLIC_NONE)
570 {
571 cyclic_remove(pTimer->hCyclicId);
572 pTimer->hCyclicId = CYCLIC_NONE;
573 }
574 pTimer->fSuspendedFromTimer = false;
575
576 mutex_exit(&cpu_lock);
577}
578
579
580RTDECL(int) RTTimerStop(PRTTIMER pTimer)
581{
582 RTTIMER_ASSERT_VALID_RET(pTimer);
583 RT_ASSERT_INTS_ON();
584
585 if (pTimer->fSuspended)
586 return VERR_TIMER_SUSPENDED;
587
588 /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock
589 the system, so just mark the timer as suspened and deal with it in
590 the callback wrapper function above. */
591 if (rtTimerSolIsCallingFromTimerProc(pTimer))
592 pTimer->fSuspendedFromTimer = true;
593 else
594 rtTimerSolStopIt(pTimer);
595
596 return VINF_SUCCESS;
597}
598
599
600RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
601{
602 /*
603 * Validate.
604 */
605 RTTIMER_ASSERT_VALID_RET(pTimer);
606 AssertReturn(u64NanoInterval > 0, VERR_INVALID_PARAMETER);
607 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
608 AssertReturn(pTimer->cNsInterval, VERR_INVALID_STATE);
609
610 if (pTimer->fSuspended || pTimer->fSuspendedFromTimer)
611 pTimer->cNsInterval = u64NanoInterval;
612 else
613 {
614 ASMAtomicWriteU64(&pTimer->cNsInterval, u64NanoInterval);
615 ASMAtomicWriteBool(&pTimer->fIntervalChanged, true);
616
617 if ( !pTimer->fAllCpus
618 && !pTimer->u.Single.nsNextTick
619 && pTimer->hCyclicId != CYCLIC_NONE
620 && rtTimerSolIsCallingFromTimerProc(pTimer))
621 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS();
622 }
623
624 return VINF_SUCCESS;
625}
626
627
628RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
629{
630 return nsec_per_tick;
631}
632
633
634RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
635{
636 return VERR_NOT_SUPPORTED;
637}
638
639
640RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
641{
642 return VERR_NOT_SUPPORTED;
643}
644
645
646RTDECL(bool) RTTimerCanDoHighResolution(void)
647{
648 return true;
649}
650
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette