VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 97698

Last change on this file since 97698 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.8 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-nt-kernel.h"
42
43#include <iprt/timer.h>
44#include <iprt/mp.h>
45#include <iprt/cpuset.h>
46#include <iprt/err.h>
47#include <iprt/asm.h>
48#include <iprt/assert.h>
49#include <iprt/mem.h>
50#include <iprt/thread.h>
51
52#include "internal-r0drv-nt.h"
53#include "internal/magics.h"
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/** This seems to provide better accuracy. */
60#define RTR0TIMER_NT_MANUAL_RE_ARM 1
61
62#if !defined(IN_GUEST) || defined(DOXYGEN_RUNNING)
63/** This using high resolution timers introduced with windows 8.1. */
64# define RTR0TIMER_NT_HIGH_RES 1
65#endif
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71/**
72 * A sub timer structure.
73 *
74 * This is used for keeping the per-cpu tick and DPC object.
75 */
76typedef struct RTTIMERNTSUBTIMER
77{
78 /** The tick counter. */
79 uint64_t iTick;
80 /** Pointer to the parent timer. */
81 PRTTIMER pParent;
82 /** Thread active executing the worker function, NIL if inactive. */
83 RTNATIVETHREAD volatile hActiveThread;
84 /** The NT DPC object. */
85 KDPC NtDpc;
86 /** Whether we failed to set the target CPU for the DPC and that this needs
87 * to be done at RTTimerStart (simple timers) or during timer callback (omni). */
88 bool fDpcNeedTargetCpuSet;
89} RTTIMERNTSUBTIMER;
90/** Pointer to a NT sub-timer structure. */
91typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
92
93/**
94 * The internal representation of an Linux timer handle.
95 */
96typedef struct RTTIMER
97{
98 /** Magic.
99 * This is RTTIMER_MAGIC, but changes to something else before the timer
100 * is destroyed to indicate clearly that thread should exit. */
101 uint32_t volatile u32Magic;
102 /** Suspend count down for single shot omnit timers. */
103 int32_t volatile cOmniSuspendCountDown;
104 /** Flag indicating the timer is suspended. */
105 bool volatile fSuspended;
106 /** Whether the timer must run on one specific CPU or not. */
107 bool fSpecificCpu;
108 /** Whether the timer must run on all CPUs or not. */
109 bool fOmniTimer;
110 /** The CPU it must run on if fSpecificCpu is set.
111 * The master CPU for an omni-timer. */
112 RTCPUID idCpu;
113 /** Callback. */
114 PFNRTTIMER pfnTimer;
115 /** User argument. */
116 void *pvUser;
117
118 /** @name Periodic scheduling / RTTimerChangeInterval.
119 * @{ */
120 /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime,
121 * uNtDueTime and (at least for updating) fSuspended. */
122 KSPIN_LOCK Spinlock;
123 /** The timer interval. 0 if one-shot. */
124 uint64_t volatile u64NanoInterval;
125 /** The the current master tick. This does not necessarily follow that of
126 * the subtimer, as RTTimerChangeInterval may cause it to reset. */
127 uint64_t volatile iMasterTick;
128#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
129 /** The desired NT time of the first tick.
130 * This is not set for one-shot timers, only periodic ones. */
131 uint64_t volatile uNtStartTime;
132 /** The current due time (absolute interrupt time).
133 * This is not set for one-shot timers, only periodic ones. */
134 uint64_t volatile uNtDueTime;
135#endif
136 /** @} */
137
138 /** The NT timer object. */
139 KTIMER NtTimer;
140#ifdef RTR0TIMER_NT_HIGH_RES
141 /** High resolution timer. If not NULL, this must be used instead of NtTimer. */
142 PEX_TIMER pHighResTimer;
143#endif
144 /** The number of sub-timers. */
145 RTCPUID cSubTimers;
146 /** Sub-timers.
147 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
148 * an entry for all possible cpus. In that case the index will be the same as
149 * for the RTCpuSet. */
150 RTTIMERNTSUBTIMER aSubTimers[1];
151} RTTIMER;
152
153
154#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
155
156/**
157 * Get current NT interrupt time.
158 * @return NT interrupt time
159 */
160static uint64_t rtTimerNtQueryInterruptTime(void)
161{
162# ifdef RT_ARCH_AMD64
163 return KeQueryInterruptTime(); /* macro */
164# else
165 if (g_pfnrtKeQueryInterruptTime)
166 return g_pfnrtKeQueryInterruptTime();
167
168 /* NT4 */
169 ULARGE_INTEGER InterruptTime;
170 do
171 {
172 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
173 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
174 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
175 return InterruptTime.QuadPart;
176# endif
177}
178
179/**
180 * Get current NT interrupt time, high resolution variant.
181 * @return High resolution NT interrupt time
182 */
183static uint64_t rtTimerNtQueryInterruptTimeHighRes(void)
184{
185 if (g_pfnrtKeQueryInterruptTimePrecise)
186 {
187 ULONG64 uQpcIgnored;
188 return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored);
189 }
190 return rtTimerNtQueryInterruptTime();
191}
192
193#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
194
195
196/**
197 * Worker for rtTimerNtRearmInternval that calculates the next due time.
198 *
199 * @returns The next due time (relative, so always negative).
200 * @param uNtNow The current time.
201 * @param uNtStartTime The start time of the timer.
202 * @param iTick The next tick number (zero being @a uNtStartTime).
203 * @param cNtInterval The timer interval in NT ticks.
204 * @param cNtNegDueSaftyMargin The due time safety margin in negative NT
205 * ticks.
206 * @param cNtMinNegInterval The minium interval to use when in catchup
207 * mode, also negative NT ticks.
208 */
209DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval,
210 int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval)
211{
212 /* Calculate the actual time elapsed since timer start: */
213 int64_t iDueTime = uNtNow - uNtStartTime;
214 if (iDueTime < 0)
215 iDueTime = 0;
216
217 /* Now calculate the nominal time since timer start for the next tick: */
218 uint64_t const uNtNextRelStart = iTick * cNtInterval;
219
220 /* Calulate now much time we have to the next tick: */
221 iDueTime -= uNtNextRelStart;
222
223 /* If we haven't already overshot the due time, including some safety margin, we're good: */
224 if (iDueTime < cNtNegDueSaftyMargin)
225 return iDueTime;
226
227 /* Okay, we've overshot it and are in catchup mode: */
228 if (iDueTime < (int64_t)cNtInterval)
229 iDueTime = -(int64_t)(cNtInterval / 2); /* double time */
230 else if (iDueTime < (int64_t)(cNtInterval * 4))
231 iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */
232 else
233 return cNtMinNegInterval;
234
235 /* Make sure we don't try intervals smaller than the minimum specified by the caller: */
236 if (iDueTime > cNtMinNegInterval)
237 iDueTime = cNtMinNegInterval;
238 return iDueTime;
239}
240
241/**
242 * Manually re-arms an internval timer.
243 *
244 * Turns out NT doesn't necessarily do a very good job at re-arming timers
245 * accurately, this is in part due to KeSetTimerEx API taking the interval in
246 * milliseconds.
247 *
248 * @param pTimer The timer.
249 * @param pMasterDpc The master timer DPC for passing to KeSetTimerEx
250 * in low-resolution mode. Ignored for high-res.
251 */
252static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc)
253{
254#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
255 Assert(pTimer->u64NanoInterval);
256
257 /*
258 * For simplicity we acquire the spinlock for the whole operation.
259 * This should be perfectly fine as it doesn't change the IRQL.
260 */
261 Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL);
262 KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock);
263
264 /*
265 * Make sure it wasn't suspended
266 */
267 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
268 {
269 uint64_t const cNtInterval = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100;
270 uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime);
271 uint64_t const iTick = ++pTimer->iMasterTick;
272
273 /*
274 * Calculate the deadline for the next timer tick and arm the timer.
275 * We always use a relative tick, i.e. negative DueTime value. This is
276 * crucial for the the high resolution API as it will bugcheck otherwise.
277 */
278 int64_t iDueTime;
279 uint64_t uNtNow;
280# ifdef RTR0TIMER_NT_HIGH_RES
281 if (pTimer->pHighResTimer)
282 {
283 /* Must use highres time here. */
284 uNtNow = rtTimerNtQueryInterruptTimeHighRes();
285 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
286 -100 /* 10us safety */, -2000 /* 200us min interval*/);
287 g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL);
288 }
289 else
290# endif
291 {
292 /* Expect interrupt time and timers to expire at the same time, so
293 don't use high res time api here. */
294 uNtNow = rtTimerNtQueryInterruptTime();
295 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
296 -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */
297 LARGE_INTEGER DueTime;
298 DueTime.QuadPart = iDueTime;
299 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
300 }
301
302 pTimer->uNtDueTime = uNtNow + -iDueTime;
303 }
304
305 KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock);
306#else
307 RT_NOREF(pTimer, iTick, pMasterDpc);
308#endif
309}
310
311
312/**
313 * Common timer callback worker for the non-omni timers.
314 *
315 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
316 * @param pTimer The timer.
317 */
318static void rtTimerNtSimpleCallbackWorker(PRTTIMER pTimer)
319{
320 /*
321 * Check that we haven't been suspended before doing the callout.
322 */
323 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
324 && pTimer->u32Magic == RTTIMER_MAGIC)
325 {
326 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
327
328 if (!pTimer->u64NanoInterval)
329 ASMAtomicWriteBool(&pTimer->fSuspended, true);
330 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
331
332 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
333
334 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
335 or change the interval, which would mean doing extra work. */
336 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
337 rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc);
338
339 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
340 }
341}
342
343
344/**
345 * Timer callback function for the low-resolution non-omni timers.
346 *
347 * @param pDpc Pointer to the DPC.
348 * @param pvUser Pointer to our internal timer structure.
349 * @param SystemArgument1 Some system argument.
350 * @param SystemArgument2 Some system argument.
351 */
352static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
353{
354 PRTTIMER pTimer = (PRTTIMER)pvUser;
355 AssertPtr(pTimer);
356#ifdef RT_STRICT
357 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
358 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
359#endif
360
361 rtTimerNtSimpleCallbackWorker(pTimer);
362
363 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
364}
365
366
367#ifdef RTR0TIMER_NT_HIGH_RES
368/**
369 * Timer callback function for the high-resolution non-omni timers.
370 *
371 * @param pExTimer The windows timer.
372 * @param pvUser Pointer to our internal timer structure.
373 */
374static void _stdcall rtTimerNtHighResSimpleCallback(PEX_TIMER pExTimer, void *pvUser)
375{
376 PRTTIMER pTimer = (PRTTIMER)pvUser;
377 AssertPtr(pTimer);
378 Assert(pTimer->pHighResTimer == pExTimer);
379# ifdef RT_STRICT
380 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
381 RTAssertMsg2Weak("rtTimerNtHighResSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
382# endif
383
384 /* If we're not on the desired CPU, trigger the DPC. That will rearm the
385 timer and such. */
386 if ( !pTimer->fSpecificCpu
387 || pTimer->idCpu == RTMpCpuId())
388 rtTimerNtSimpleCallbackWorker(pTimer);
389 else
390 KeInsertQueueDpc(&pTimer->aSubTimers[0].NtDpc, 0, 0);
391
392 RT_NOREF(pExTimer);
393}
394#endif /* RTR0TIMER_NT_HIGH_RES */
395
396
397/**
398 * The slave DPC callback for an omni timer.
399 *
400 * @param pDpc The DPC object.
401 * @param pvUser Pointer to the sub-timer.
402 * @param SystemArgument1 Some system stuff.
403 * @param SystemArgument2 Some system stuff.
404 */
405static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
406{
407 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
408 PRTTIMER pTimer = pSubTimer->pParent;
409
410 AssertPtr(pTimer);
411#ifdef RT_STRICT
412 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
413 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
414 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
415 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
416 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
417#endif
418
419 /*
420 * Check that we haven't been suspended before doing the callout.
421 */
422 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
423 && pTimer->u32Magic == RTTIMER_MAGIC)
424 {
425 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
426
427 if (!pTimer->u64NanoInterval)
428 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
429 ASMAtomicWriteBool(&pTimer->fSuspended, true);
430
431 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
432
433 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
434 }
435
436 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
437}
438
439
440/**
441 * Called when we have an impcomplete DPC object.
442 *
443 * @returns KeInsertQueueDpc return value.
444 * @param pSubTimer The sub-timer to queue an DPC for.
445 * @param iCpu The CPU set index corresponding to that sub-timer.
446 */
447DECL_NO_INLINE(static, BOOLEAN) rtTimerNtOmniQueueDpcSlow(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
448{
449 int rc = rtMpNtSetTargetProcessorDpc(&pSubTimer->NtDpc, RTMpCpuIdFromSetIndex(iCpu));
450 if (RT_SUCCESS(rc))
451 {
452 pSubTimer->fDpcNeedTargetCpuSet = false;
453 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
454 }
455 return FALSE;
456}
457
458
459/**
460 * Wrapper around KeInsertQueueDpc that makes sure the target CPU has been set.
461 *
462 * This is for handling deferred rtMpNtSetTargetProcessorDpc failures during
463 * creation. These errors happens for offline CPUs which probably never every
464 * will come online, as very few systems do CPU hotplugging.
465 *
466 * @returns KeInsertQueueDpc return value.
467 * @param pSubTimer The sub-timer to queue an DPC for.
468 * @param iCpu The CPU set index corresponding to that sub-timer.
469 */
470DECLINLINE(BOOLEAN) rtTimerNtOmniQueueDpc(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
471{
472 if (RT_LIKELY(!pSubTimer->fDpcNeedTargetCpuSet))
473 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
474 return rtTimerNtOmniQueueDpcSlow(pSubTimer, iCpu);
475}
476
477
478/**
479 * Common timer callback worker for omni-timers.
480 *
481 * This is responsible for queueing the DPCs for the other CPUs and
482 * perform the callback on the CPU on which it is called.
483 *
484 * @param pTimer The timer.
485 * @param pSubTimer The sub-timer of the calling CPU.
486 * @param iCpuSelf The set index of the CPU we're running on.
487 */
488static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
489{
490 /*
491 * Check that we haven't been suspended before scheduling the other DPCs
492 * and doing the callout.
493 */
494 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
495 && pTimer->u32Magic == RTTIMER_MAGIC)
496 {
497 RTCPUSET OnlineSet;
498 RTMpGetOnlineSet(&OnlineSet);
499
500 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
501
502 if (pTimer->u64NanoInterval)
503 {
504 /*
505 * Recurring timer.
506 */
507 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
508 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
509 && iCpuSelf != iCpu)
510 rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu);
511
512 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
513
514 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
515 or change the interval, which would mean doing extra work. */
516 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
517 rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc);
518 }
519 else
520 {
521 /*
522 * Single shot timers gets complicated wrt to fSuspended maintance.
523 */
524 uint32_t cCpus = 0;
525 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
526 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
527 cCpus++;
528 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus); /** @todo this is bogus bogus bogus. The counter is only used here. */
529
530 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
531 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
532 && iCpuSelf != iCpu)
533 if (!rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu))
534 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
535
536 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
537 ASMAtomicWriteBool(&pTimer->fSuspended, true);
538
539 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
540 }
541
542 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
543 }
544}
545
546
547/**
548 * The timer callback for an omni-timer, low-resolution.
549 *
550 * @param pDpc The DPC object.
551 * @param pvUser Pointer to the sub-timer.
552 * @param SystemArgument1 Some system stuff.
553 * @param SystemArgument2 Some system stuff.
554 */
555static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
556{
557 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
558 PRTTIMER const pTimer = pSubTimer->pParent;
559 RTCPUID idCpu = RTMpCpuId();
560 int const iCpuSelf = RTMpCpuIdToSetIndex(idCpu);
561
562 AssertPtr(pTimer);
563#ifdef RT_STRICT
564 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
565 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
566 /* We must be called on the master CPU or the tick variable goes south. */
567 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
568 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
569 if (pTimer->idCpu != idCpu)
570 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu);
571#endif
572
573 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
574
575 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
576}
577
578
579#ifdef RTR0TIMER_NT_HIGH_RES
580/**
581 * The timer callback for an high-resolution omni-timer.
582 *
583 * @param pExTimer The windows timer.
584 * @param pvUser Pointer to our internal timer structure.
585 */
586static void __stdcall rtTimerNtHighResOmniCallback(PEX_TIMER pExTimer, void *pvUser)
587{
588 PRTTIMER const pTimer = (PRTTIMER)pvUser;
589 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
590 PRTTIMERNTSUBTIMER const pSubTimer = &pTimer->aSubTimers[iCpuSelf];
591
592 AssertPtr(pTimer);
593 Assert(pTimer->pHighResTimer == pExTimer);
594# ifdef RT_STRICT
595 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
596 RTAssertMsg2Weak("rtTimerNtHighResOmniCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
597# endif
598
599 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
600
601 RT_NOREF(pExTimer);
602}
603#endif /* RTR0TIMER_NT_HIGH_RES */
604
605
606RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
607{
608 /*
609 * Validate.
610 */
611 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
612 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
613
614 /*
615 * The operation is protected by the spinlock.
616 */
617 KIRQL bSavedIrql;
618 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
619
620 /*
621 * Check the state.
622 */
623 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
624 { /* likely */ }
625 else
626 {
627 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
628 return VERR_TIMER_ACTIVE;
629 }
630 if ( !pTimer->fSpecificCpu
631 || RTMpIsCpuOnline(pTimer->idCpu))
632 { /* likely */ }
633 else
634 {
635 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
636 return VERR_CPU_OFFLINE;
637 }
638
639 /*
640 * Lazy set the DPC target CPU if needed.
641 */
642 if ( !pTimer->fSpecificCpu
643 || !pTimer->aSubTimers[0].fDpcNeedTargetCpuSet)
644 { /* likely */ }
645 else
646 {
647 int rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
648 if (RT_FAILURE(rc))
649 {
650 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
651 return rc;
652 }
653 }
654
655 /*
656 * Do the starting.
657 */
658#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
659 /* Calculate the interval time: */
660 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
661 ULONG ulInterval = (ULONG)u64Interval;
662 if (ulInterval != u64Interval)
663 ulInterval = MAXLONG;
664 else if (!ulInterval && pTimer->u64NanoInterval)
665 ulInterval = 1;
666#endif
667
668 /* Translate u64First to a DueTime: */
669 LARGE_INTEGER DueTime;
670 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
671 if (!DueTime.QuadPart)
672 DueTime.QuadPart = -10; /* 1us */
673
674 /* Reset tick counters: */
675 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
676 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
677 pTimer->aSubTimers[iCpu].iTick = 0;
678 pTimer->iMasterTick = 0;
679
680 /* Update timer state: */
681#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
682 if (pTimer->u64NanoInterval > 0)
683 {
684#ifdef RTR0TIMER_NT_HIGH_RES
685 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
686# else
687 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
688# endif
689 pTimer->uNtStartTime = uNtNow + -DueTime.QuadPart;
690 pTimer->uNtDueTime = pTimer->uNtStartTime;
691 }
692#endif
693 pTimer->cOmniSuspendCountDown = 0;
694 ASMAtomicWriteBool(&pTimer->fSuspended, false);
695
696 /*
697 * Finally start the NT timer.
698 *
699 * We do this without holding the spinlock to err on the side of
700 * caution in case ExSetTimer or KeSetTimerEx ever should have the idea
701 * of running the callback before returning.
702 */
703 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
704
705#ifdef RTR0TIMER_NT_HIGH_RES
706 if (pTimer->pHighResTimer)
707 {
708# ifdef RTR0TIMER_NT_MANUAL_RE_ARM
709 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
710# else
711 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, RT_MIN(pTimer->u64NanoInterval / 100, MAXLONG), NULL);
712# endif
713 }
714 else
715#endif
716 {
717 PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc;
718#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
719 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
720#else
721 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
722#endif
723 }
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Worker function that stops an active timer.
730 *
731 * Shared by RTTimerStop and RTTimerDestroy.
732 *
733 * @param pTimer The active timer.
734 */
735static int rtTimerNtStopWorker(PRTTIMER pTimer)
736{
737 /*
738 * Update the state from with the spinlock context.
739 */
740 KIRQL bSavedIrql;
741 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
742
743 bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true);
744
745 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
746 if (!fWasSuspended)
747 {
748 /*
749 * We should cacnel the timer and dequeue DPCs.
750 */
751#ifdef RTR0TIMER_NT_HIGH_RES
752 if (pTimer->pHighResTimer)
753 {
754 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
755
756 /* We can skip the DPC stuff, unless this is an omni timer or for a specific CPU. */
757 if (!pTimer->fSpecificCpu && !pTimer->fOmniTimer)
758 return VINF_SUCCESS;
759 }
760 else
761#endif
762 KeCancelTimer(&pTimer->NtTimer);
763
764 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
765 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
766 return VINF_SUCCESS;
767 }
768 return VERR_TIMER_SUSPENDED;
769}
770
771
772RTDECL(int) RTTimerStop(PRTTIMER pTimer)
773{
774 /*
775 * Validate.
776 */
777 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
778 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
779
780 /*
781 * Call the worker we share with RTTimerDestroy.
782 */
783 return rtTimerNtStopWorker(pTimer);
784}
785
786
787RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
788{
789 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
790 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
791
792 /*
793 * We do all the state changes while holding the spinlock.
794 */
795 int rc = VINF_SUCCESS;
796 KIRQL bSavedIrql;
797 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
798
799 /*
800 * When the timer isn't running, this is an simple job:
801 */
802 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
803 pTimer->u64NanoInterval = u64NanoInterval;
804 else
805 {
806 /*
807 * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM
808 * mode right now. We typically let the new interval take effect after
809 * the next timer callback, unless that's too far ahead.
810 */
811#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
812 pTimer->u64NanoInterval = u64NanoInterval;
813 pTimer->iMasterTick = 0;
814# ifdef RTR0TIMER_NT_HIGH_RES
815 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
816# else
817 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
818# endif
819 if (uNtNow >= pTimer->uNtDueTime)
820 pTimer->uNtStartTime = uNtNow;
821 else
822 {
823 pTimer->uNtStartTime = pTimer->uNtDueTime;
824
825 /*
826 * Re-arm the timer if the next DueTime is both more than 1.25 new
827 * intervals and at least 0.5 ms ahead.
828 */
829 uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow;
830 if ( cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */
831 && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4)
832 {
833 pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100;
834# ifdef RTR0TIMER_NT_HIGH_RES
835 if (pTimer->pHighResTimer)
836 g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL);
837 else
838# endif
839 {
840 LARGE_INTEGER DueTime;
841 DueTime.QuadPart = -(int64_t)u64NanoInterval / 100;
842 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0,
843 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc);
844 }
845 }
846 }
847#else
848 rc = VERR_NOT_SUPPORTED;
849#endif
850 }
851
852 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
853
854 return rc;
855}
856
857
858RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
859{
860 /* It's ok to pass NULL pointer. */
861 if (pTimer == /*NIL_RTTIMER*/ NULL)
862 return VINF_SUCCESS;
863 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
864 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
865
866 /*
867 * We do not support destroying a timer from the callback because it is
868 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
869 */
870 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
871
872 /*
873 * Invalidate the timer, stop it if it's running and finally free up the memory.
874 */
875 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
876 rtTimerNtStopWorker(pTimer);
877
878#ifdef RTR0TIMER_NT_HIGH_RES
879 /*
880 * Destroy the high-resolution timer before flushing DPCs.
881 */
882 if (pTimer->pHighResTimer)
883 {
884 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, TRUE /*fCancel*/, TRUE /*fWait*/, NULL);
885 pTimer->pHighResTimer = NULL;
886 }
887#endif
888
889 /*
890 * Flush DPCs to be on the safe side.
891 */
892 if (g_pfnrtNtKeFlushQueuedDpcs)
893 g_pfnrtNtKeFlushQueuedDpcs();
894
895 RTMemFree(pTimer);
896
897 return VINF_SUCCESS;
898}
899
900
901RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
902{
903 *ppTimer = NULL;
904
905 /*
906 * Validate flags.
907 */
908 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
909 return VERR_INVALID_FLAGS;
910 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
911 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
912 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
913 return VERR_CPU_NOT_FOUND;
914
915 /*
916 * Allocate the timer handler.
917 */
918 RTCPUID cSubTimers = 1;
919 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
920 {
921 cSubTimers = RTMpGetMaxCpuId() + 1;
922 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
923 }
924
925 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
926 if (!pTimer)
927 return VERR_NO_MEMORY;
928
929 /*
930 * Initialize it.
931 *
932 * Note! The difference between a SynchronizationTimer and a NotificationTimer
933 * (KeInitializeTimer) is, as far as I can gather, only that the former
934 * will wake up exactly one waiting thread and the latter will wake up
935 * everyone. Since we don't do any waiting on the NtTimer, that is not
936 * relevant to us.
937 */
938 pTimer->u32Magic = RTTIMER_MAGIC;
939 pTimer->cOmniSuspendCountDown = 0;
940 pTimer->fSuspended = true;
941 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
942 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
943 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
944 pTimer->cSubTimers = cSubTimers;
945 pTimer->pfnTimer = pfnTimer;
946 pTimer->pvUser = pvUser;
947 KeInitializeSpinLock(&pTimer->Spinlock);
948 pTimer->u64NanoInterval = u64NanoInterval;
949
950 int rc = VINF_SUCCESS;
951#ifdef RTR0TIMER_NT_HIGH_RES
952 if ( (fFlags & RTTIMER_FLAGS_HIGH_RES)
953 && RTTimerCanDoHighResolution())
954 {
955 pTimer->pHighResTimer = g_pfnrtExAllocateTimer(pTimer->fOmniTimer ? rtTimerNtHighResOmniCallback
956 : rtTimerNtHighResSimpleCallback, pTimer,
957 EX_TIMER_HIGH_RESOLUTION | EX_TIMER_NOTIFICATION);
958 if (!pTimer->pHighResTimer)
959 rc = VERR_OUT_OF_RESOURCES;
960 }
961 else
962#endif
963 {
964 if (g_pfnrtKeInitializeTimerEx) /** @todo just call KeInitializeTimer. */
965 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
966 else
967 KeInitializeTimer(&pTimer->NtTimer);
968 }
969 if (RT_SUCCESS(rc))
970 {
971 RTCPUSET OnlineSet;
972 RTMpGetOnlineSet(&OnlineSet);
973
974 if (pTimer->fOmniTimer)
975 {
976 /*
977 * Initialize the per-cpu "sub-timers", select the first online cpu to be
978 * the master. This ASSUMES that no cpus will ever go offline.
979 *
980 * Note! For the high-resolution scenario, all DPC callbacks are slaves as
981 * we have a dedicated timer callback, set above during allocation,
982 * and don't control which CPU it (rtTimerNtHighResOmniCallback) is
983 * called on.
984 */
985 pTimer->iMasterTick = 0;
986 pTimer->idCpu = NIL_RTCPUID;
987 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
988 {
989 pTimer->aSubTimers[iCpu].iTick = 0;
990 pTimer->aSubTimers[iCpu].pParent = pTimer;
991
992 if ( pTimer->idCpu == NIL_RTCPUID
993 && RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
994 {
995 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
996#ifdef RTR0TIMER_NT_HIGH_RES
997 if (pTimer->pHighResTimer)
998 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
999 else
1000#endif
1001 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
1002 }
1003 else
1004 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
1005 if (g_pfnrtKeSetImportanceDpc)
1006 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
1007
1008 /* This does not necessarily work for offline CPUs that could potentially be onlined
1009 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1010 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
1011 if (RT_SUCCESS(rc2))
1012 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1013 else if (!RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
1014 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1015 else
1016 {
1017 rc = rc2;
1018 break;
1019 }
1020 }
1021 Assert(pTimer->idCpu != NIL_RTCPUID);
1022 }
1023 else
1024 {
1025 /*
1026 * Initialize the first "sub-timer", target the DPC on a specific processor
1027 * if requested to do so.
1028 */
1029 pTimer->iMasterTick = 0;
1030 pTimer->aSubTimers[0].iTick = 0;
1031 pTimer->aSubTimers[0].pParent = pTimer;
1032
1033 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
1034 if (g_pfnrtKeSetImportanceDpc)
1035 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
1036 if (pTimer->fSpecificCpu)
1037 {
1038 /* This does not necessarily work for offline CPUs that could potentially be onlined
1039 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1040 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
1041 if (RT_SUCCESS(rc2))
1042 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1043 else if (!RTCpuSetIsMember(&OnlineSet, pTimer->idCpu))
1044 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1045 else
1046 rc = rc2;
1047 }
1048 }
1049 if (RT_SUCCESS(rc))
1050 {
1051 *ppTimer = pTimer;
1052 return VINF_SUCCESS;
1053 }
1054
1055#ifdef RTR0TIMER_NT_HIGH_RES
1056 if (pTimer->pHighResTimer)
1057 {
1058 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, FALSE, FALSE, NULL);
1059 pTimer->pHighResTimer = NULL;
1060 }
1061#endif
1062 }
1063
1064 RTMemFree(pTimer);
1065 return rc;
1066}
1067
1068
1069RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1070{
1071 if (!g_pfnrtNtExSetTimerResolution)
1072 return VERR_NOT_SUPPORTED;
1073
1074 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
1075 if (pu32Granted)
1076 *pu32Granted = ulGranted * 100; /* NT -> ns */
1077 return VINF_SUCCESS;
1078}
1079
1080
1081RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1082{
1083 if (!g_pfnrtNtExSetTimerResolution)
1084 return VERR_NOT_SUPPORTED;
1085
1086 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
1087 NOREF(u32Granted);
1088 return VINF_SUCCESS;
1089}
1090
1091
1092RTDECL(bool) RTTimerCanDoHighResolution(void)
1093{
1094#ifdef RTR0TIMER_NT_HIGH_RES
1095 return g_pfnrtExAllocateTimer != NULL
1096 && g_pfnrtExDeleteTimer != NULL
1097 && g_pfnrtExSetTimer != NULL
1098 && g_pfnrtExCancelTimer != NULL;
1099#else
1100 return false;
1101#endif
1102}
1103
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette