VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 94155

Last change on this file since 94155 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.5 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/timer.h>
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal-r0drv-nt.h"
43#include "internal/magics.h"
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49/** This seems to provide better accuracy. */
50#define RTR0TIMER_NT_MANUAL_RE_ARM 1
51
52#if !defined(IN_GUEST) || defined(DOXYGEN_RUNNING)
53/** This using high resolution timers introduced with windows 8.1. */
54# define RTR0TIMER_NT_HIGH_RES 1
55#endif
56
57
58/*********************************************************************************************************************************
59* Structures and Typedefs *
60*********************************************************************************************************************************/
61/**
62 * A sub timer structure.
63 *
64 * This is used for keeping the per-cpu tick and DPC object.
65 */
66typedef struct RTTIMERNTSUBTIMER
67{
68 /** The tick counter. */
69 uint64_t iTick;
70 /** Pointer to the parent timer. */
71 PRTTIMER pParent;
72 /** Thread active executing the worker function, NIL if inactive. */
73 RTNATIVETHREAD volatile hActiveThread;
74 /** The NT DPC object. */
75 KDPC NtDpc;
76 /** Whether we failed to set the target CPU for the DPC and that this needs
77 * to be done at RTTimerStart (simple timers) or during timer callback (omni). */
78 bool fDpcNeedTargetCpuSet;
79} RTTIMERNTSUBTIMER;
80/** Pointer to a NT sub-timer structure. */
81typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
82
83/**
84 * The internal representation of an Linux timer handle.
85 */
86typedef struct RTTIMER
87{
88 /** Magic.
89 * This is RTTIMER_MAGIC, but changes to something else before the timer
90 * is destroyed to indicate clearly that thread should exit. */
91 uint32_t volatile u32Magic;
92 /** Suspend count down for single shot omnit timers. */
93 int32_t volatile cOmniSuspendCountDown;
94 /** Flag indicating the timer is suspended. */
95 bool volatile fSuspended;
96 /** Whether the timer must run on one specific CPU or not. */
97 bool fSpecificCpu;
98 /** Whether the timer must run on all CPUs or not. */
99 bool fOmniTimer;
100 /** The CPU it must run on if fSpecificCpu is set.
101 * The master CPU for an omni-timer. */
102 RTCPUID idCpu;
103 /** Callback. */
104 PFNRTTIMER pfnTimer;
105 /** User argument. */
106 void *pvUser;
107
108 /** @name Periodic scheduling / RTTimerChangeInterval.
109 * @{ */
110 /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime,
111 * uNtDueTime and (at least for updating) fSuspended. */
112 KSPIN_LOCK Spinlock;
113 /** The timer interval. 0 if one-shot. */
114 uint64_t volatile u64NanoInterval;
115 /** The the current master tick. This does not necessarily follow that of
116 * the subtimer, as RTTimerChangeInterval may cause it to reset. */
117 uint64_t volatile iMasterTick;
118#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
119 /** The desired NT time of the first tick.
120 * This is not set for one-shot timers, only periodic ones. */
121 uint64_t volatile uNtStartTime;
122 /** The current due time (absolute interrupt time).
123 * This is not set for one-shot timers, only periodic ones. */
124 uint64_t volatile uNtDueTime;
125#endif
126 /** @} */
127
128 /** The NT timer object. */
129 KTIMER NtTimer;
130#ifdef RTR0TIMER_NT_HIGH_RES
131 /** High resolution timer. If not NULL, this must be used instead of NtTimer. */
132 PEX_TIMER pHighResTimer;
133#endif
134 /** The number of sub-timers. */
135 RTCPUID cSubTimers;
136 /** Sub-timers.
137 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
138 * an entry for all possible cpus. In that case the index will be the same as
139 * for the RTCpuSet. */
140 RTTIMERNTSUBTIMER aSubTimers[1];
141} RTTIMER;
142
143
144#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
145
146/**
147 * Get current NT interrupt time.
148 * @return NT interrupt time
149 */
150static uint64_t rtTimerNtQueryInterruptTime(void)
151{
152# ifdef RT_ARCH_AMD64
153 return KeQueryInterruptTime(); /* macro */
154# else
155 if (g_pfnrtKeQueryInterruptTime)
156 return g_pfnrtKeQueryInterruptTime();
157
158 /* NT4 */
159 ULARGE_INTEGER InterruptTime;
160 do
161 {
162 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
163 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
164 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
165 return InterruptTime.QuadPart;
166# endif
167}
168
169/**
170 * Get current NT interrupt time, high resolution variant.
171 * @return High resolution NT interrupt time
172 */
173static uint64_t rtTimerNtQueryInterruptTimeHighRes(void)
174{
175 if (g_pfnrtKeQueryInterruptTimePrecise)
176 {
177 ULONG64 uQpcIgnored;
178 return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored);
179 }
180 return rtTimerNtQueryInterruptTime();
181}
182
183#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
184
185
186/**
187 * Worker for rtTimerNtRearmInternval that calculates the next due time.
188 *
189 * @returns The next due time (relative, so always negative).
190 * @param uNtNow The current time.
191 * @param uNtStartTime The start time of the timer.
192 * @param iTick The next tick number (zero being @a uNtStartTime).
193 * @param cNtInterval The timer interval in NT ticks.
194 * @param cNtNegDueSaftyMargin The due time safety margin in negative NT
195 * ticks.
196 * @param cNtMinNegInterval The minium interval to use when in catchup
197 * mode, also negative NT ticks.
198 */
199DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval,
200 int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval)
201{
202 /* Calculate the actual time elapsed since timer start: */
203 int64_t iDueTime = uNtNow - uNtStartTime;
204 if (iDueTime < 0)
205 iDueTime = 0;
206
207 /* Now calculate the nominal time since timer start for the next tick: */
208 uint64_t const uNtNextRelStart = iTick * cNtInterval;
209
210 /* Calulate now much time we have to the next tick: */
211 iDueTime -= uNtNextRelStart;
212
213 /* If we haven't already overshot the due time, including some safety margin, we're good: */
214 if (iDueTime < cNtNegDueSaftyMargin)
215 return iDueTime;
216
217 /* Okay, we've overshot it and are in catchup mode: */
218 if (iDueTime < (int64_t)cNtInterval)
219 iDueTime = -(int64_t)(cNtInterval / 2); /* double time */
220 else if (iDueTime < (int64_t)(cNtInterval * 4))
221 iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */
222 else
223 return cNtMinNegInterval;
224
225 /* Make sure we don't try intervals smaller than the minimum specified by the caller: */
226 if (iDueTime > cNtMinNegInterval)
227 iDueTime = cNtMinNegInterval;
228 return iDueTime;
229}
230
231/**
232 * Manually re-arms an internval timer.
233 *
234 * Turns out NT doesn't necessarily do a very good job at re-arming timers
235 * accurately, this is in part due to KeSetTimerEx API taking the interval in
236 * milliseconds.
237 *
238 * @param pTimer The timer.
239 * @param pMasterDpc The master timer DPC for passing to KeSetTimerEx
240 * in low-resolution mode. Ignored for high-res.
241 */
242static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc)
243{
244#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
245 Assert(pTimer->u64NanoInterval);
246
247 /*
248 * For simplicity we acquire the spinlock for the whole operation.
249 * This should be perfectly fine as it doesn't change the IRQL.
250 */
251 Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL);
252 KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock);
253
254 /*
255 * Make sure it wasn't suspended
256 */
257 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
258 {
259 uint64_t const cNtInterval = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100;
260 uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime);
261 uint64_t const iTick = ++pTimer->iMasterTick;
262
263 /*
264 * Calculate the deadline for the next timer tick and arm the timer.
265 * We always use a relative tick, i.e. negative DueTime value. This is
266 * crucial for the the high resolution API as it will bugcheck otherwise.
267 */
268 int64_t iDueTime;
269 uint64_t uNtNow;
270# ifdef RTR0TIMER_NT_HIGH_RES
271 if (pTimer->pHighResTimer)
272 {
273 /* Must use highres time here. */
274 uNtNow = rtTimerNtQueryInterruptTimeHighRes();
275 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
276 -100 /* 10us safety */, -2000 /* 200us min interval*/);
277 g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL);
278 }
279 else
280# endif
281 {
282 /* Expect interrupt time and timers to expire at the same time, so
283 don't use high res time api here. */
284 uNtNow = rtTimerNtQueryInterruptTime();
285 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
286 -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */
287 LARGE_INTEGER DueTime;
288 DueTime.QuadPart = iDueTime;
289 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
290 }
291
292 pTimer->uNtDueTime = uNtNow + -iDueTime;
293 }
294
295 KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock);
296#else
297 RT_NOREF(pTimer, iTick, pMasterDpc);
298#endif
299}
300
301
302/**
303 * Common timer callback worker for the non-omni timers.
304 *
305 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
306 * @param pTimer The timer.
307 */
308static void rtTimerNtSimpleCallbackWorker(PRTTIMER pTimer)
309{
310 /*
311 * Check that we haven't been suspended before doing the callout.
312 */
313 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
314 && pTimer->u32Magic == RTTIMER_MAGIC)
315 {
316 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
317
318 if (!pTimer->u64NanoInterval)
319 ASMAtomicWriteBool(&pTimer->fSuspended, true);
320 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
321
322 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
323
324 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
325 or change the interval, which would mean doing extra work. */
326 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
327 rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc);
328
329 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
330 }
331}
332
333
334/**
335 * Timer callback function for the low-resolution non-omni timers.
336 *
337 * @param pDpc Pointer to the DPC.
338 * @param pvUser Pointer to our internal timer structure.
339 * @param SystemArgument1 Some system argument.
340 * @param SystemArgument2 Some system argument.
341 */
342static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
343{
344 PRTTIMER pTimer = (PRTTIMER)pvUser;
345 AssertPtr(pTimer);
346#ifdef RT_STRICT
347 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
348 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
349#endif
350
351 rtTimerNtSimpleCallbackWorker(pTimer);
352
353 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
354}
355
356
357#ifdef RTR0TIMER_NT_HIGH_RES
358/**
359 * Timer callback function for the high-resolution non-omni timers.
360 *
361 * @param pExTimer The windows timer.
362 * @param pvUser Pointer to our internal timer structure.
363 */
364static void _stdcall rtTimerNtHighResSimpleCallback(PEX_TIMER pExTimer, void *pvUser)
365{
366 PRTTIMER pTimer = (PRTTIMER)pvUser;
367 AssertPtr(pTimer);
368 Assert(pTimer->pHighResTimer == pExTimer);
369# ifdef RT_STRICT
370 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
371 RTAssertMsg2Weak("rtTimerNtHighResSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
372# endif
373
374 /* If we're not on the desired CPU, trigger the DPC. That will rearm the
375 timer and such. */
376 if ( !pTimer->fSpecificCpu
377 || pTimer->idCpu == RTMpCpuId())
378 rtTimerNtSimpleCallbackWorker(pTimer);
379 else
380 KeInsertQueueDpc(&pTimer->aSubTimers[0].NtDpc, 0, 0);
381
382 RT_NOREF(pExTimer);
383}
384#endif /* RTR0TIMER_NT_HIGH_RES */
385
386
387/**
388 * The slave DPC callback for an omni timer.
389 *
390 * @param pDpc The DPC object.
391 * @param pvUser Pointer to the sub-timer.
392 * @param SystemArgument1 Some system stuff.
393 * @param SystemArgument2 Some system stuff.
394 */
395static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
396{
397 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
398 PRTTIMER pTimer = pSubTimer->pParent;
399
400 AssertPtr(pTimer);
401#ifdef RT_STRICT
402 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
403 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
404 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
405 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
406 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
407#endif
408
409 /*
410 * Check that we haven't been suspended before doing the callout.
411 */
412 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
413 && pTimer->u32Magic == RTTIMER_MAGIC)
414 {
415 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
416
417 if (!pTimer->u64NanoInterval)
418 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
419 ASMAtomicWriteBool(&pTimer->fSuspended, true);
420
421 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
422
423 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
424 }
425
426 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
427}
428
429
430/**
431 * Called when we have an impcomplete DPC object.
432 *
433 * @returns KeInsertQueueDpc return value.
434 * @param pSubTimer The sub-timer to queue an DPC for.
435 * @param iCpu The CPU set index corresponding to that sub-timer.
436 */
437DECL_NO_INLINE(static, BOOLEAN) rtTimerNtOmniQueueDpcSlow(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
438{
439 int rc = rtMpNtSetTargetProcessorDpc(&pSubTimer->NtDpc, RTMpCpuIdFromSetIndex(iCpu));
440 if (RT_SUCCESS(rc))
441 {
442 pSubTimer->fDpcNeedTargetCpuSet = false;
443 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
444 }
445 return FALSE;
446}
447
448
449/**
450 * Wrapper around KeInsertQueueDpc that makes sure the target CPU has been set.
451 *
452 * This is for handling deferred rtMpNtSetTargetProcessorDpc failures during
453 * creation. These errors happens for offline CPUs which probably never every
454 * will come online, as very few systems do CPU hotplugging.
455 *
456 * @returns KeInsertQueueDpc return value.
457 * @param pSubTimer The sub-timer to queue an DPC for.
458 * @param iCpu The CPU set index corresponding to that sub-timer.
459 */
460DECLINLINE(BOOLEAN) rtTimerNtOmniQueueDpc(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
461{
462 if (RT_LIKELY(!pSubTimer->fDpcNeedTargetCpuSet))
463 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
464 return rtTimerNtOmniQueueDpcSlow(pSubTimer, iCpu);
465}
466
467
468/**
469 * Common timer callback worker for omni-timers.
470 *
471 * This is responsible for queueing the DPCs for the other CPUs and
472 * perform the callback on the CPU on which it is called.
473 *
474 * @param pTimer The timer.
475 * @param pSubTimer The sub-timer of the calling CPU.
476 * @param iCpuSelf The set index of the CPU we're running on.
477 */
478static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
479{
480 /*
481 * Check that we haven't been suspended before scheduling the other DPCs
482 * and doing the callout.
483 */
484 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
485 && pTimer->u32Magic == RTTIMER_MAGIC)
486 {
487 RTCPUSET OnlineSet;
488 RTMpGetOnlineSet(&OnlineSet);
489
490 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
491
492 if (pTimer->u64NanoInterval)
493 {
494 /*
495 * Recurring timer.
496 */
497 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
498 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
499 && iCpuSelf != iCpu)
500 rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu);
501
502 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
503
504 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
505 or change the interval, which would mean doing extra work. */
506 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
507 rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc);
508 }
509 else
510 {
511 /*
512 * Single shot timers gets complicated wrt to fSuspended maintance.
513 */
514 uint32_t cCpus = 0;
515 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
516 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
517 cCpus++;
518 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus); /** @todo this is bogus bogus bogus. The counter is only used here. */
519
520 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
521 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
522 && iCpuSelf != iCpu)
523 if (!rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu))
524 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
525
526 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
527 ASMAtomicWriteBool(&pTimer->fSuspended, true);
528
529 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
530 }
531
532 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
533 }
534}
535
536
537/**
538 * The timer callback for an omni-timer, low-resolution.
539 *
540 * @param pDpc The DPC object.
541 * @param pvUser Pointer to the sub-timer.
542 * @param SystemArgument1 Some system stuff.
543 * @param SystemArgument2 Some system stuff.
544 */
545static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
546{
547 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
548 PRTTIMER const pTimer = pSubTimer->pParent;
549 RTCPUID idCpu = RTMpCpuId();
550 int const iCpuSelf = RTMpCpuIdToSetIndex(idCpu);
551
552 AssertPtr(pTimer);
553#ifdef RT_STRICT
554 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
555 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
556 /* We must be called on the master CPU or the tick variable goes south. */
557 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
558 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
559 if (pTimer->idCpu != idCpu)
560 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu);
561#endif
562
563 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
564
565 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
566}
567
568
569#ifdef RTR0TIMER_NT_HIGH_RES
570/**
571 * The timer callback for an high-resolution omni-timer.
572 *
573 * @param pExTimer The windows timer.
574 * @param pvUser Pointer to our internal timer structure.
575 */
576static void __stdcall rtTimerNtHighResOmniCallback(PEX_TIMER pExTimer, void *pvUser)
577{
578 PRTTIMER const pTimer = (PRTTIMER)pvUser;
579 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
580 PRTTIMERNTSUBTIMER const pSubTimer = &pTimer->aSubTimers[iCpuSelf];
581
582 AssertPtr(pTimer);
583 Assert(pTimer->pHighResTimer == pExTimer);
584# ifdef RT_STRICT
585 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
586 RTAssertMsg2Weak("rtTimerNtHighResOmniCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
587# endif
588
589 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
590
591 RT_NOREF(pExTimer);
592}
593#endif /* RTR0TIMER_NT_HIGH_RES */
594
595
596RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
597{
598 /*
599 * Validate.
600 */
601 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
602 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
603
604 /*
605 * The operation is protected by the spinlock.
606 */
607 KIRQL bSavedIrql;
608 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
609
610 /*
611 * Check the state.
612 */
613 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
614 { /* likely */ }
615 else
616 {
617 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
618 return VERR_TIMER_ACTIVE;
619 }
620 if ( !pTimer->fSpecificCpu
621 || RTMpIsCpuOnline(pTimer->idCpu))
622 { /* likely */ }
623 else
624 {
625 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
626 return VERR_CPU_OFFLINE;
627 }
628
629 /*
630 * Lazy set the DPC target CPU if needed.
631 */
632 if ( !pTimer->fSpecificCpu
633 || !pTimer->aSubTimers[0].fDpcNeedTargetCpuSet)
634 { /* likely */ }
635 else
636 {
637 int rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
638 if (RT_FAILURE(rc))
639 {
640 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
641 return rc;
642 }
643 }
644
645 /*
646 * Do the starting.
647 */
648#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
649 /* Calculate the interval time: */
650 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
651 ULONG ulInterval = (ULONG)u64Interval;
652 if (ulInterval != u64Interval)
653 ulInterval = MAXLONG;
654 else if (!ulInterval && pTimer->u64NanoInterval)
655 ulInterval = 1;
656#endif
657
658 /* Translate u64First to a DueTime: */
659 LARGE_INTEGER DueTime;
660 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
661 if (!DueTime.QuadPart)
662 DueTime.QuadPart = -10; /* 1us */
663
664 /* Reset tick counters: */
665 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
666 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
667 pTimer->aSubTimers[iCpu].iTick = 0;
668 pTimer->iMasterTick = 0;
669
670 /* Update timer state: */
671#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
672 if (pTimer->u64NanoInterval > 0)
673 {
674#ifdef RTR0TIMER_NT_HIGH_RES
675 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
676# else
677 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
678# endif
679 pTimer->uNtStartTime = uNtNow + -DueTime.QuadPart;
680 pTimer->uNtDueTime = pTimer->uNtStartTime;
681 }
682#endif
683 pTimer->cOmniSuspendCountDown = 0;
684 ASMAtomicWriteBool(&pTimer->fSuspended, false);
685
686 /*
687 * Finally start the NT timer.
688 *
689 * We do this without holding the spinlock to err on the side of
690 * caution in case ExSetTimer or KeSetTimerEx ever should have the idea
691 * of running the callback before returning.
692 */
693 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
694
695#ifdef RTR0TIMER_NT_HIGH_RES
696 if (pTimer->pHighResTimer)
697 {
698# ifdef RTR0TIMER_NT_MANUAL_RE_ARM
699 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
700# else
701 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, RT_MIN(pTimer->u64NanoInterval / 100, MAXLONG), NULL);
702# endif
703 }
704 else
705#endif
706 {
707 PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc;
708#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
709 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
710#else
711 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
712#endif
713 }
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Worker function that stops an active timer.
720 *
721 * Shared by RTTimerStop and RTTimerDestroy.
722 *
723 * @param pTimer The active timer.
724 */
725static int rtTimerNtStopWorker(PRTTIMER pTimer)
726{
727 /*
728 * Update the state from with the spinlock context.
729 */
730 KIRQL bSavedIrql;
731 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
732
733 bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true);
734
735 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
736 if (!fWasSuspended)
737 {
738 /*
739 * We should cacnel the timer and dequeue DPCs.
740 */
741#ifdef RTR0TIMER_NT_HIGH_RES
742 if (pTimer->pHighResTimer)
743 {
744 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
745
746 /* We can skip the DPC stuff, unless this is an omni timer or for a specific CPU. */
747 if (!pTimer->fSpecificCpu && !pTimer->fOmniTimer)
748 return VINF_SUCCESS;
749 }
750 else
751#endif
752 KeCancelTimer(&pTimer->NtTimer);
753
754 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
755 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
756 return VINF_SUCCESS;
757 }
758 return VERR_TIMER_SUSPENDED;
759}
760
761
762RTDECL(int) RTTimerStop(PRTTIMER pTimer)
763{
764 /*
765 * Validate.
766 */
767 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
768 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
769
770 /*
771 * Call the worker we share with RTTimerDestroy.
772 */
773 return rtTimerNtStopWorker(pTimer);
774}
775
776
777RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
778{
779 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
780 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
781
782 /*
783 * We do all the state changes while holding the spinlock.
784 */
785 int rc = VINF_SUCCESS;
786 KIRQL bSavedIrql;
787 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
788
789 /*
790 * When the timer isn't running, this is an simple job:
791 */
792 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
793 pTimer->u64NanoInterval = u64NanoInterval;
794 else
795 {
796 /*
797 * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM
798 * mode right now. We typically let the new interval take effect after
799 * the next timer callback, unless that's too far ahead.
800 */
801#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
802 pTimer->u64NanoInterval = u64NanoInterval;
803 pTimer->iMasterTick = 0;
804# ifdef RTR0TIMER_NT_HIGH_RES
805 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
806# else
807 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
808# endif
809 if (uNtNow >= pTimer->uNtDueTime)
810 pTimer->uNtStartTime = uNtNow;
811 else
812 {
813 pTimer->uNtStartTime = pTimer->uNtDueTime;
814
815 /*
816 * Re-arm the timer if the next DueTime is both more than 1.25 new
817 * intervals and at least 0.5 ms ahead.
818 */
819 uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow;
820 if ( cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */
821 && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4)
822 {
823 pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100;
824# ifdef RTR0TIMER_NT_HIGH_RES
825 if (pTimer->pHighResTimer)
826 g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL);
827 else
828# endif
829 {
830 LARGE_INTEGER DueTime;
831 DueTime.QuadPart = -(int64_t)u64NanoInterval / 100;
832 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0,
833 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc);
834 }
835 }
836 }
837#else
838 rc = VERR_NOT_SUPPORTED;
839#endif
840 }
841
842 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
843
844 return rc;
845}
846
847
848RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
849{
850 /* It's ok to pass NULL pointer. */
851 if (pTimer == /*NIL_RTTIMER*/ NULL)
852 return VINF_SUCCESS;
853 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
854 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
855
856 /*
857 * We do not support destroying a timer from the callback because it is
858 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
859 */
860 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
861
862 /*
863 * Invalidate the timer, stop it if it's running and finally free up the memory.
864 */
865 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
866 rtTimerNtStopWorker(pTimer);
867
868#ifdef RTR0TIMER_NT_HIGH_RES
869 /*
870 * Destroy the high-resolution timer before flushing DPCs.
871 */
872 if (pTimer->pHighResTimer)
873 {
874 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, TRUE /*fCancel*/, TRUE /*fWait*/, NULL);
875 pTimer->pHighResTimer = NULL;
876 }
877#endif
878
879 /*
880 * Flush DPCs to be on the safe side.
881 */
882 if (g_pfnrtNtKeFlushQueuedDpcs)
883 g_pfnrtNtKeFlushQueuedDpcs();
884
885 RTMemFree(pTimer);
886
887 return VINF_SUCCESS;
888}
889
890
891RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
892{
893 *ppTimer = NULL;
894
895 /*
896 * Validate flags.
897 */
898 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
899 return VERR_INVALID_FLAGS;
900 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
901 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
902 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
903 return VERR_CPU_NOT_FOUND;
904
905 /*
906 * Allocate the timer handler.
907 */
908 RTCPUID cSubTimers = 1;
909 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
910 {
911 cSubTimers = RTMpGetMaxCpuId() + 1;
912 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
913 }
914
915 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
916 if (!pTimer)
917 return VERR_NO_MEMORY;
918
919 /*
920 * Initialize it.
921 *
922 * Note! The difference between a SynchronizationTimer and a NotificationTimer
923 * (KeInitializeTimer) is, as far as I can gather, only that the former
924 * will wake up exactly one waiting thread and the latter will wake up
925 * everyone. Since we don't do any waiting on the NtTimer, that is not
926 * relevant to us.
927 */
928 pTimer->u32Magic = RTTIMER_MAGIC;
929 pTimer->cOmniSuspendCountDown = 0;
930 pTimer->fSuspended = true;
931 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
932 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
933 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
934 pTimer->cSubTimers = cSubTimers;
935 pTimer->pfnTimer = pfnTimer;
936 pTimer->pvUser = pvUser;
937 KeInitializeSpinLock(&pTimer->Spinlock);
938 pTimer->u64NanoInterval = u64NanoInterval;
939
940 int rc = VINF_SUCCESS;
941#ifdef RTR0TIMER_NT_HIGH_RES
942 if ( (fFlags & RTTIMER_FLAGS_HIGH_RES)
943 && RTTimerCanDoHighResolution())
944 {
945 pTimer->pHighResTimer = g_pfnrtExAllocateTimer(pTimer->fOmniTimer ? rtTimerNtHighResOmniCallback
946 : rtTimerNtHighResSimpleCallback, pTimer,
947 EX_TIMER_HIGH_RESOLUTION | EX_TIMER_NOTIFICATION);
948 if (!pTimer->pHighResTimer)
949 rc = VERR_OUT_OF_RESOURCES;
950 }
951 else
952#endif
953 {
954 if (g_pfnrtKeInitializeTimerEx) /** @todo just call KeInitializeTimer. */
955 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
956 else
957 KeInitializeTimer(&pTimer->NtTimer);
958 }
959 if (RT_SUCCESS(rc))
960 {
961 RTCPUSET OnlineSet;
962 RTMpGetOnlineSet(&OnlineSet);
963
964 if (pTimer->fOmniTimer)
965 {
966 /*
967 * Initialize the per-cpu "sub-timers", select the first online cpu to be
968 * the master. This ASSUMES that no cpus will ever go offline.
969 *
970 * Note! For the high-resolution scenario, all DPC callbacks are slaves as
971 * we have a dedicated timer callback, set above during allocation,
972 * and don't control which CPU it (rtTimerNtHighResOmniCallback) is
973 * called on.
974 */
975 pTimer->iMasterTick = 0;
976 pTimer->idCpu = NIL_RTCPUID;
977 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
978 {
979 pTimer->aSubTimers[iCpu].iTick = 0;
980 pTimer->aSubTimers[iCpu].pParent = pTimer;
981
982 if ( pTimer->idCpu == NIL_RTCPUID
983 && RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
984 {
985 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
986#ifdef RTR0TIMER_NT_HIGH_RES
987 if (pTimer->pHighResTimer)
988 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
989 else
990#endif
991 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
992 }
993 else
994 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
995 if (g_pfnrtKeSetImportanceDpc)
996 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
997
998 /* This does not necessarily work for offline CPUs that could potentially be onlined
999 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1000 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
1001 if (RT_SUCCESS(rc2))
1002 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1003 else if (!RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
1004 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1005 else
1006 {
1007 rc = rc2;
1008 break;
1009 }
1010 }
1011 Assert(pTimer->idCpu != NIL_RTCPUID);
1012 }
1013 else
1014 {
1015 /*
1016 * Initialize the first "sub-timer", target the DPC on a specific processor
1017 * if requested to do so.
1018 */
1019 pTimer->iMasterTick = 0;
1020 pTimer->aSubTimers[0].iTick = 0;
1021 pTimer->aSubTimers[0].pParent = pTimer;
1022
1023 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
1024 if (g_pfnrtKeSetImportanceDpc)
1025 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
1026 if (pTimer->fSpecificCpu)
1027 {
1028 /* This does not necessarily work for offline CPUs that could potentially be onlined
1029 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1030 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
1031 if (RT_SUCCESS(rc2))
1032 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1033 else if (!RTCpuSetIsMember(&OnlineSet, pTimer->idCpu))
1034 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1035 else
1036 rc = rc2;
1037 }
1038 }
1039 if (RT_SUCCESS(rc))
1040 {
1041 *ppTimer = pTimer;
1042 return VINF_SUCCESS;
1043 }
1044
1045#ifdef RTR0TIMER_NT_HIGH_RES
1046 if (pTimer->pHighResTimer)
1047 {
1048 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, FALSE, FALSE, NULL);
1049 pTimer->pHighResTimer = NULL;
1050 }
1051#endif
1052 }
1053
1054 RTMemFree(pTimer);
1055 return rc;
1056}
1057
1058
1059RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1060{
1061 if (!g_pfnrtNtExSetTimerResolution)
1062 return VERR_NOT_SUPPORTED;
1063
1064 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
1065 if (pu32Granted)
1066 *pu32Granted = ulGranted * 100; /* NT -> ns */
1067 return VINF_SUCCESS;
1068}
1069
1070
1071RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1072{
1073 if (!g_pfnrtNtExSetTimerResolution)
1074 return VERR_NOT_SUPPORTED;
1075
1076 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
1077 NOREF(u32Granted);
1078 return VINF_SUCCESS;
1079}
1080
1081
1082RTDECL(bool) RTTimerCanDoHighResolution(void)
1083{
1084#ifdef RTR0TIMER_NT_HIGH_RES
1085 return g_pfnrtExAllocateTimer != NULL
1086 && g_pfnrtExDeleteTimer != NULL
1087 && g_pfnrtExSetTimer != NULL
1088 && g_pfnrtExCancelTimer != NULL;
1089#else
1090 return false;
1091#endif
1092}
1093
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette