VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 92825

Last change on this file since 92825 was 92825, checked in by vboxsync, 3 years ago

IPRT/timer-r0drv-nt.cpp: Implemented high resolution timers using the ExAllocateTimer API from windows 8.1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 26.8 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 92825 2021-12-08 15:32:59Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/timer.h>
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal-r0drv-nt.h"
43#include "internal/magics.h"
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49/** This seems to provide better accuracy. */
50#define RTR0TIMER_NT_MANUAL_RE_ARM 1
51
52#if !defined(IN_GUEST) || defined(DOXYGEN_RUNNING)
53/** This using high resolution timers introduced with windows 8.1. */
54# define RTR0TIMER_NT_HIGH_RES 1
55#endif
56
57
58/*********************************************************************************************************************************
59* Structures and Typedefs *
60*********************************************************************************************************************************/
61/**
62 * A sub timer structure.
63 *
64 * This is used for keeping the per-cpu tick and DPC object.
65 */
66typedef struct RTTIMERNTSUBTIMER
67{
68 /** The tick counter. */
69 uint64_t iTick;
70 /** Pointer to the parent timer. */
71 PRTTIMER pParent;
72 /** Thread active executing the worker function, NIL if inactive. */
73 RTNATIVETHREAD volatile hActiveThread;
74 /** The NT DPC object. */
75 KDPC NtDpc;
76} RTTIMERNTSUBTIMER;
77/** Pointer to a NT sub-timer structure. */
78typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
79
80/**
81 * The internal representation of an Linux timer handle.
82 */
83typedef struct RTTIMER
84{
85 /** Magic.
86 * This is RTTIMER_MAGIC, but changes to something else before the timer
87 * is destroyed to indicate clearly that thread should exit. */
88 uint32_t volatile u32Magic;
89 /** Suspend count down for single shot omnit timers. */
90 int32_t volatile cOmniSuspendCountDown;
91 /** Flag indicating the timer is suspended. */
92 bool volatile fSuspended;
93 /** Whether the timer must run on one specific CPU or not. */
94 bool fSpecificCpu;
95 /** Whether the timer must run on all CPUs or not. */
96 bool fOmniTimer;
97 /** The CPU it must run on if fSpecificCpu is set.
98 * The master CPU for an omni-timer. */
99 RTCPUID idCpu;
100 /** Callback. */
101 PFNRTTIMER pfnTimer;
102 /** User argument. */
103 void *pvUser;
104 /** The timer interval. 0 if one-shot. */
105 uint64_t u64NanoInterval;
106#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
107 /** The desired NT time of the first tick. */
108 uint64_t uNtStartTime;
109#endif
110 /** The NT timer object. */
111 KTIMER NtTimer;
112#ifdef RTR0TIMER_NT_HIGH_RES
113 /** High resolution timer. If not NULL, this must be used instead of NtTimer. */
114 PEX_TIMER pHighResTimer;
115#endif
116 /** The number of sub-timers. */
117 RTCPUID cSubTimers;
118 /** Sub-timers.
119 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
120 * an entry for all possible cpus. In that case the index will be the same as
121 * for the RTCpuSet. */
122 RTTIMERNTSUBTIMER aSubTimers[1];
123} RTTIMER;
124
125
126#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
127/**
128 * Get current NT interrupt time.
129 * @return NT interrupt time
130 */
131static uint64_t rtTimerNtQueryInterruptTime(void)
132{
133# ifdef RT_ARCH_AMD64
134 return KeQueryInterruptTime(); /* macro */
135# else
136 if (g_pfnrtKeQueryInterruptTime)
137 return g_pfnrtKeQueryInterruptTime();
138
139 /* NT4 */
140 ULARGE_INTEGER InterruptTime;
141 do
142 {
143 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
144 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
145 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
146 return InterruptTime.QuadPart;
147# endif
148}
149#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
150
151
152/**
153 * Manually re-arms an internval timer.
154 *
155 * Turns out NT doesn't necessarily do a very good job at re-arming timers
156 * accurately.
157 *
158 * @param pTimer The timer.
159 * @param iTick The current timer tick.
160 */
161DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick)
162{
163#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
164 Assert(pTimer->u64NanoInterval);
165
166 uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
167 LARGE_INTEGER DueTime;
168 DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
169 if (DueTime.QuadPart < 0)
170 DueTime.QuadPart = 0;
171 if ((uint64_t)DueTime.QuadPart < uNtNext)
172 DueTime.QuadPart -= uNtNext;
173 else
174 DueTime.QuadPart = -2500; /* 0.25ms */
175
176# ifdef RTR0TIMER_NT_HIGH_RES
177 if (pTimer->pHighResTimer)
178 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
179 else
180# endif
181 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
182#else
183 RT_NOREF(pTimer, iTick);
184#endif
185}
186
187
188/**
189 * Common timer callback worker for the non-omni timers.
190 *
191 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
192 * @param pTimer The timer.
193 */
194static void rtTimerNtSimpleCallbackWorker(PRTTIMER pTimer)
195{
196 /*
197 * Check that we haven't been suspended before doing the callout.
198 */
199 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
200 && pTimer->u32Magic == RTTIMER_MAGIC)
201 {
202 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
203
204 if (!pTimer->u64NanoInterval)
205 ASMAtomicWriteBool(&pTimer->fSuspended, true);
206 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
207 if (pTimer->u64NanoInterval)
208 rtTimerNtRearmInternval(pTimer, iTick);
209 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
210
211 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
212 }
213}
214
215
216/**
217 * Timer callback function for the low-resolution non-omni timers.
218 *
219 * @param pDpc Pointer to the DPC.
220 * @param pvUser Pointer to our internal timer structure.
221 * @param SystemArgument1 Some system argument.
222 * @param SystemArgument2 Some system argument.
223 */
224static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
225{
226 PRTTIMER pTimer = (PRTTIMER)pvUser;
227 AssertPtr(pTimer);
228#ifdef RT_STRICT
229 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
230 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
231#endif
232
233 rtTimerNtSimpleCallbackWorker(pTimer);
234
235 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
236}
237
238
239#ifdef RTR0TIMER_NT_HIGH_RES
240/**
241 * Timer callback function for the high-resolution non-omni timers.
242 *
243 * @param pExTimer The windows timer.
244 * @param pvUser Pointer to our internal timer structure.
245 */
246static void _stdcall rtTimerNtHighResSimpleCallback(PEX_TIMER pExTimer, void *pvUser)
247{
248 PRTTIMER pTimer = (PRTTIMER)pvUser;
249 AssertPtr(pTimer);
250 Assert(pTimer->pHighResTimer == pExTimer);
251# ifdef RT_STRICT
252 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
253 RTAssertMsg2Weak("rtTimerNtHighResSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
254# endif
255
256 /* If we're not on the desired CPU, trigger the DPC. That will rearm the
257 timer and such. */
258 if ( !pTimer->fSpecificCpu
259 || pTimer->idCpu == RTMpCpuId())
260 rtTimerNtSimpleCallbackWorker(pTimer);
261 else
262 KeInsertQueueDpc(&pTimer->aSubTimers[0].NtDpc, 0, 0);
263
264 RT_NOREF(pExTimer);
265}
266#endif /* RTR0TIMER_NT_HIGH_RES */
267
268
269/**
270 * The slave DPC callback for an omni timer.
271 *
272 * @param pDpc The DPC object.
273 * @param pvUser Pointer to the sub-timer.
274 * @param SystemArgument1 Some system stuff.
275 * @param SystemArgument2 Some system stuff.
276 */
277static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
278{
279 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
280 PRTTIMER pTimer = pSubTimer->pParent;
281
282 AssertPtr(pTimer);
283#ifdef RT_STRICT
284 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
285 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
286 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
287 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
288 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
289#endif
290
291 /*
292 * Check that we haven't been suspended before doing the callout.
293 */
294 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
295 && pTimer->u32Magic == RTTIMER_MAGIC)
296 {
297 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
298
299 if (!pTimer->u64NanoInterval)
300 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
301 ASMAtomicWriteBool(&pTimer->fSuspended, true);
302
303 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
304
305 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
306 }
307
308 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
309}
310
311
312/**
313 * Common timer callback worker for omni-timers.
314 *
315 * This is responsible for queueing the DPCs for the other CPUs and
316 * perform the callback on the CPU on which it is called.
317 *
318 * @param pTimer The timer.
319 * @param pSubTimer The sub-timer of the calling CPU.
320 * @param iCpuSelf The set index of the CPU we're running on.
321 */
322static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
323{
324#ifdef RT_STRICT
325 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
326 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
327 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
328 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
329#endif
330
331 /*
332 * Check that we haven't been suspended before scheduling the other DPCs
333 * and doing the callout.
334 */
335 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
336 && pTimer->u32Magic == RTTIMER_MAGIC)
337 {
338 RTCPUSET OnlineSet;
339 RTMpGetOnlineSet(&OnlineSet);
340
341 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
342
343 if (pTimer->u64NanoInterval)
344 {
345 /*
346 * Recurring timer.
347 */
348 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
349 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
350 && iCpuSelf != iCpu)
351 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
352
353 uint64_t iTick = ++pSubTimer->iTick;
354 rtTimerNtRearmInternval(pTimer, iTick);
355 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
356 }
357 else
358 {
359 /*
360 * Single shot timers gets complicated wrt to fSuspended maintance.
361 */
362 uint32_t cCpus = 0;
363 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
364 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
365 cCpus++;
366 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
367
368 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
369 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
370 && iCpuSelf != iCpu)
371 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
372 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
373
374 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
375 ASMAtomicWriteBool(&pTimer->fSuspended, true);
376
377 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
378 }
379
380 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
381 }
382}
383
384
385/**
386 * The timer callback for an omni-timer, low-resolution.
387 *
388 * @param pDpc The DPC object.
389 * @param pvUser Pointer to the sub-timer.
390 * @param SystemArgument1 Some system stuff.
391 * @param SystemArgument2 Some system stuff.
392 */
393static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
394{
395 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
396 PRTTIMER const pTimer = pSubTimer->pParent;
397 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
398
399 AssertPtr(pTimer);
400#ifdef RT_STRICT
401 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
402 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
403 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
404 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
405#endif
406
407 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
408
409 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
410}
411
412
413#ifdef RTR0TIMER_NT_HIGH_RES
414/**
415 * The timer callback for an high-resolution omni-timer.
416 *
417 * @param pExTimer The windows timer.
418 * @param pvUser Pointer to our internal timer structure.
419 */
420static void __stdcall rtTimerNtHighResOmniCallback(PEX_TIMER pExTimer, void *pvUser)
421{
422 PRTTIMER const pTimer = (PRTTIMER)pvUser;
423 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
424 PRTTIMERNTSUBTIMER const pSubTimer = &pTimer->aSubTimers[iCpuSelf];
425
426 AssertPtr(pTimer);
427 Assert(pTimer->pHighResTimer == pExTimer);
428# ifdef RT_STRICT
429 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
430 RTAssertMsg2Weak("rtTimerNtHighResOmniCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
431# endif
432
433 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
434
435 RT_NOREF(pExTimer);
436}
437#endif /* RTR0TIMER_NT_HIGH_RES */
438
439
440RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
441{
442 /*
443 * Validate.
444 */
445 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
446 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
447
448 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
449 return VERR_TIMER_ACTIVE;
450 if ( pTimer->fSpecificCpu
451 && !RTMpIsCpuOnline(pTimer->idCpu))
452 return VERR_CPU_OFFLINE;
453
454 /*
455 * Start the timer.
456 */
457 PKDPC pMasterDpc = pTimer->fOmniTimer
458 ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
459 : &pTimer->aSubTimers[0].NtDpc;
460
461#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
462 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
463 ULONG ulInterval = (ULONG)u64Interval;
464 if (ulInterval != u64Interval)
465 ulInterval = MAXLONG;
466 else if (!ulInterval && pTimer->u64NanoInterval)
467 ulInterval = 1;
468#endif
469
470 LARGE_INTEGER DueTime;
471 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
472 if (!DueTime.QuadPart)
473 DueTime.QuadPart = -1;
474
475 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
476 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
477 pTimer->aSubTimers[iCpu].iTick = 0;
478#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
479 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
480#endif
481 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
482 ASMAtomicWriteBool(&pTimer->fSuspended, false);
483
484#ifdef RTR0TIMER_NT_HIGH_RES
485 if (pTimer->pHighResTimer)
486 {
487# ifdef RTR0TIMER_NT_MANUAL_RE_ARM
488 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
489# else
490 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, RT_MIN(pTimer->u64NanoInterval / 100, MAXLONG), NULL);
491# endif
492 }
493 else
494#endif
495 {
496#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
497 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
498#else
499 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
500#endif
501 }
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Worker function that stops an active timer.
508 *
509 * Shared by RTTimerStop and RTTimerDestroy.
510 *
511 * @param pTimer The active timer.
512 */
513static void rtTimerNtStopWorker(PRTTIMER pTimer)
514{
515 /*
516 * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
517 */
518 ASMAtomicWriteBool(&pTimer->fSuspended, true);
519
520#ifdef RTR0TIMER_NT_HIGH_RES
521 if (pTimer->pHighResTimer)
522 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
523 else
524#endif
525 KeCancelTimer(&pTimer->NtTimer);
526
527 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
528 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
529}
530
531
532RTDECL(int) RTTimerStop(PRTTIMER pTimer)
533{
534 /*
535 * Validate.
536 */
537 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
538 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
539
540 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
541 return VERR_TIMER_SUSPENDED;
542
543 /*
544 * Call the worker we share with RTTimerDestroy.
545 */
546 rtTimerNtStopWorker(pTimer);
547 return VINF_SUCCESS;
548}
549
550
551RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
552{
553 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
554 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
555 RT_NOREF1(u64NanoInterval);
556
557 return VERR_NOT_SUPPORTED;
558}
559
560
561RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
562{
563 /* It's ok to pass NULL pointer. */
564 if (pTimer == /*NIL_RTTIMER*/ NULL)
565 return VINF_SUCCESS;
566 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
567 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
568
569 /*
570 * We do not support destroying a timer from the callback because it is
571 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
572 */
573 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
574
575 /*
576 * Invalidate the timer, stop it if it's running and finally
577 * free up the memory.
578 */
579 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
580 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
581 rtTimerNtStopWorker(pTimer);
582
583#ifdef RTR0TIMER_NT_HIGH_RES
584 /*
585 * Destroy the high-resolution timer before flushing DPCs.
586 */
587 if (pTimer->pHighResTimer)
588 {
589 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, TRUE /*fCancel*/, TRUE /*fWait*/, NULL);
590 pTimer->pHighResTimer = NULL;
591 }
592#endif
593
594 /*
595 * Flush DPCs to be on the safe side.
596 */
597 if (g_pfnrtNtKeFlushQueuedDpcs)
598 g_pfnrtNtKeFlushQueuedDpcs();
599
600 RTMemFree(pTimer);
601
602 return VINF_SUCCESS;
603}
604
605
606RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
607{
608 *ppTimer = NULL;
609
610 /*
611 * Validate flags.
612 */
613 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
614 return VERR_INVALID_PARAMETER;
615 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
616 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
617 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
618 return VERR_CPU_NOT_FOUND;
619
620 /*
621 * Allocate the timer handler.
622 */
623 RTCPUID cSubTimers = 1;
624 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
625 {
626 cSubTimers = RTMpGetMaxCpuId() + 1;
627 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
628 }
629
630 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
631 if (!pTimer)
632 return VERR_NO_MEMORY;
633
634 /*
635 * Initialize it.
636 *
637 * Note! The difference between a SynchronizationTimer and a NotificationTimer
638 * (KeInitializeTimer) is, as far as I can gather, only that the former
639 * will wake up exactly one waiting thread and the latter will wake up
640 * everyone. Since we don't do any waiting on the NtTimer, that is not
641 * relevant to us.
642 */
643 pTimer->u32Magic = RTTIMER_MAGIC;
644 pTimer->cOmniSuspendCountDown = 0;
645 pTimer->fSuspended = true;
646 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
647 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
648 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
649 pTimer->cSubTimers = cSubTimers;
650 pTimer->pfnTimer = pfnTimer;
651 pTimer->pvUser = pvUser;
652 pTimer->u64NanoInterval = u64NanoInterval;
653
654 int rc = VINF_SUCCESS;
655#ifdef RTR0TIMER_NT_HIGH_RES
656 if ( (fFlags & RTTIMER_FLAGS_HIGH_RES)
657 && RTTimerCanDoHighResolution())
658 {
659 pTimer->pHighResTimer = g_pfnrtExAllocateTimer(pTimer->fOmniTimer ? rtTimerNtHighResOmniCallback
660 : rtTimerNtHighResSimpleCallback, pTimer,
661 EX_TIMER_HIGH_RESOLUTION | EX_TIMER_NOTIFICATION);
662 if (!pTimer->pHighResTimer)
663 rc = VERR_OUT_OF_RESOURCES;
664 }
665 else
666#endif
667 {
668 if (g_pfnrtKeInitializeTimerEx) /** @todo just call KeInitializeTimer. */
669 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
670 else
671 KeInitializeTimer(&pTimer->NtTimer);
672 }
673 if (RT_SUCCESS(rc))
674 {
675 if (pTimer->fOmniTimer)
676 {
677 /*
678 * Initialize the per-cpu "sub-timers", select the first online cpu to be
679 * the master. This ASSUMES that no cpus will ever go offline.
680 *
681 * Note! For the high-resolution scenario, all DPC callbacks are slaves as
682 * we have a dedicated timer callback, set above during allocation,
683 * and don't control which CPU it (rtTimerNtHighResOmniCallback) is
684 * called on.
685 */
686 pTimer->idCpu = NIL_RTCPUID;
687 for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++)
688 {
689 pTimer->aSubTimers[iCpu].iTick = 0;
690 pTimer->aSubTimers[iCpu].pParent = pTimer;
691
692 if ( pTimer->idCpu == NIL_RTCPUID
693 && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
694 {
695 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
696#ifdef RTR0TIMER_NT_HIGH_RES
697 if (pTimer->pHighResTimer)
698 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
699 else
700#endif
701 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
702 }
703 else
704 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
705 if (g_pfnrtKeSetImportanceDpc)
706 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
707 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
708 }
709 Assert(pTimer->idCpu != NIL_RTCPUID);
710 }
711 else
712 {
713 /*
714 * Initialize the first "sub-timer", target the DPC on a specific processor
715 * if requested to do so.
716 */
717 pTimer->aSubTimers[0].iTick = 0;
718 pTimer->aSubTimers[0].pParent = pTimer;
719
720 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
721 if (g_pfnrtKeSetImportanceDpc)
722 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
723 if (pTimer->fSpecificCpu)
724 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
725 }
726 if (RT_SUCCESS(rc))
727 {
728 *ppTimer = pTimer;
729 return VINF_SUCCESS;
730 }
731
732#ifdef RTR0TIMER_NT_HIGH_RES
733 if (pTimer->pHighResTimer)
734 {
735 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, FALSE, FALSE, NULL);
736 pTimer->pHighResTimer = NULL;
737 }
738#endif
739 }
740
741 RTMemFree(pTimer);
742 return rc;
743}
744
745
746RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
747{
748 if (!g_pfnrtNtExSetTimerResolution)
749 return VERR_NOT_SUPPORTED;
750
751 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
752 if (pu32Granted)
753 *pu32Granted = ulGranted * 100; /* NT -> ns */
754 return VINF_SUCCESS;
755}
756
757
758RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
759{
760 if (!g_pfnrtNtExSetTimerResolution)
761 return VERR_NOT_SUPPORTED;
762
763 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
764 NOREF(u32Granted);
765 return VINF_SUCCESS;
766}
767
768
769RTDECL(bool) RTTimerCanDoHighResolution(void)
770{
771#ifdef RTR0TIMER_NT_HIGH_RES
772 return g_pfnrtExAllocateTimer != NULL
773 && g_pfnrtExDeleteTimer != NULL
774 && g_pfnrtExSetTimer != NULL
775 && g_pfnrtExCancelTimer != NULL;
776#else
777 return false;
778#endif
779}
780
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette