VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 99286

Last change on this file since 99286 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.9 KB
Line 
1/* $Id: TMAll.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#ifdef DEBUG_bird
34# define DBGFTRACE_DISABLED /* annoying */
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/dbgftrace.h>
39#ifdef IN_RING3
40#endif
41#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <VBox/sup.h>
49#include <iprt/time.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/asm-math.h>
53#include <iprt/string.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58#include "TMInline.h"
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64#ifdef VBOX_STRICT
65/** @def TMTIMER_GET_CRITSECT
66 * Helper for safely resolving the critical section for a timer belonging to a
67 * device instance.
68 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
69# ifdef IN_RING3
70# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
71# else
72# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
73# endif
74#endif
75
76/** @def TMTIMER_ASSERT_CRITSECT
77 * Checks that the caller owns the critical section if one is associated with
78 * the timer. */
79#ifdef VBOX_STRICT
80# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
81 do { \
82 if ((a_pTimer)->pCritSect) \
83 { \
84 VMSTATE enmState; \
85 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
86 AssertMsg( pCritSect \
87 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
88 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
89 || enmState == VMSTATE_RESETTING \
90 || enmState == VMSTATE_RESETTING_LS ),\
91 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
92 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
93 } \
94 } while (0)
95#else
96# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
97#endif
98
99/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
100 * Checks for lock order trouble between the timer critsect and the critical
101 * section critsect. The virtual sync critsect must always be entered before
102 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
103 * isn't any critical section associated with the timer or if the calling thread
104 * doesn't own it, ASSUMING of course that the thread using this macro is going
105 * to enter the virtual sync critical section anyway.
106 *
107 * @remarks This is a sligtly relaxed timer locking attitude compared to
108 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
109 * should know what it's doing if it's stopping or starting a timer
110 * without taking the device lock.
111 */
112#ifdef VBOX_STRICT
113# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
114 do { \
115 if ((pTimer)->pCritSect) \
116 { \
117 VMSTATE enmState; \
118 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
119 AssertMsg( pCritSect \
120 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
121 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
122 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
123 || enmState == VMSTATE_RESETTING \
124 || enmState == VMSTATE_RESETTING_LS ),\
125 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
126 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
127 } \
128 } while (0)
129#else
130# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
131#endif
132
133
134#if defined(VBOX_STRICT) && defined(IN_RING0)
135/**
136 * Helper for TMTIMER_GET_CRITSECT
137 * @todo This needs a redo!
138 */
139DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
140{
141 if (pTimer->enmType == TMTIMERTYPE_DEV)
142 {
143 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
144 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
145 ASMSetFlags(fSavedFlags);
146 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
147 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
148 return pDevInsR0->pCritSectRoR0;
149 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
150 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
151 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
152 }
153 RT_NOREF(pVM);
154 Assert(pTimer->pCritSect == NULL);
155 return NULL;
156}
157#endif /* VBOX_STRICT && IN_RING0 */
158
159
160/**
161 * Notification that execution is about to start.
162 *
163 * This call must always be paired with a TMNotifyEndOfExecution call.
164 *
165 * The function may, depending on the configuration, resume the TSC and future
166 * clocks that only ticks when we're executing guest code.
167 *
168 * @param pVM The cross context VM structure.
169 * @param pVCpu The cross context virtual CPU structure.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
172{
173#ifndef VBOX_WITHOUT_NS_ACCOUNTING
174 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
175 pVCpu->tm.s.fExecuting = true;
176#endif
177 if (pVM->tm.s.fTSCTiedToExecution)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that execution has ended.
184 *
185 * This call must always be paired with a TMNotifyStartOfExecution call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're executing guest code.
189 *
190 * @param pVM The cross context VM structure.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param uTsc TSC value when exiting guest context.
193 */
194VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
195{
196 if (pVM->tm.s.fTSCTiedToExecution)
197 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
198
199#ifndef VBOX_WITHOUT_NS_ACCOUNTING
200 /*
201 * Calculate the elapsed tick count and convert it to nanoseconds.
202 */
203# ifdef IN_RING3
204 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
205 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
206 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
207# else
208 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
209 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
210# endif
211 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
212
213 uint64_t cNsExecutingDelta;
214 if (uCpuHz < _4G)
215 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
216 else if (uCpuHz < 16*_1G64)
217 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
218 else
219 {
220 Assert(uCpuHz < 64 * _1G64);
221 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
222 }
223
224 /*
225 * Update the data.
226 *
227 * Note! We're not using strict memory ordering here to speed things us.
228 * The data is in a single cache line and this thread is the only
229 * one writing to that line, so I cannot quite imagine why we would
230 * need any strict ordering here.
231 */
232 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
233 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
234 ASMCompilerBarrier();
235 pVCpu->tm.s.fExecuting = false;
236 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
237 pVCpu->tm.s.cPeriodsExecuting++;
238 ASMCompilerBarrier();
239 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
240
241 /*
242 * Update stats.
243 */
244# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
245 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
246 if (cNsExecutingDelta < 5000)
247 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
248 else if (cNsExecutingDelta < 50000)
249 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
250 else
251 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
252# endif
253
254 /* The timer triggers occational updating of the others and total stats: */
255 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
256 { /*likely*/ }
257 else
258 {
259 pVCpu->tm.s.fUpdateStats = false;
260
261 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
262 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
263
264# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
265 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
266 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
267 if (cNsOtherNewDelta > 0)
268 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
269# endif
270
271 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
272 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
273 }
274
275#endif
276}
277
278
279/**
280 * Notification that the cpu is entering the halt state
281 *
282 * This call must always be paired with a TMNotifyEndOfExecution call.
283 *
284 * The function may, depending on the configuration, resume the TSC and future
285 * clocks that only ticks when we're halted.
286 *
287 * @param pVCpu The cross context virtual CPU structure.
288 */
289VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
290{
291 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
292
293#ifndef VBOX_WITHOUT_NS_ACCOUNTING
294 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
295 pVCpu->tm.s.fHalting = true;
296#endif
297
298 if ( pVM->tm.s.fTSCTiedToExecution
299 && !pVM->tm.s.fTSCNotTiedToHalt)
300 tmCpuTickResume(pVM, pVCpu);
301}
302
303
304/**
305 * Notification that the cpu is leaving the halt state
306 *
307 * This call must always be paired with a TMNotifyStartOfHalt call.
308 *
309 * The function may, depending on the configuration, suspend the TSC and future
310 * clocks that only ticks when we're halted.
311 *
312 * @param pVCpu The cross context virtual CPU structure.
313 */
314VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
315{
316 PVM pVM = pVCpu->CTX_SUFF(pVM);
317
318 if ( pVM->tm.s.fTSCTiedToExecution
319 && !pVM->tm.s.fTSCNotTiedToHalt)
320 tmCpuTickPause(pVCpu);
321
322#ifndef VBOX_WITHOUT_NS_ACCOUNTING
323 uint64_t const u64NsTs = RTTimeNanoTS();
324 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
325 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
326 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
327 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
328
329 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
330 ASMCompilerBarrier();
331 pVCpu->tm.s.fHalting = false;
332 pVCpu->tm.s.fUpdateStats = false;
333 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
334 pVCpu->tm.s.cPeriodsHalted++;
335 ASMCompilerBarrier();
336 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
337
338# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
339 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
340 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
341 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
342 if (cNsOtherNewDelta > 0)
343 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
344# endif
345 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
346 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
347#endif
348}
349
350
351/**
352 * Raise the timer force action flag and notify the dedicated timer EMT.
353 *
354 * @param pVM The cross context VM structure.
355 */
356DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
357{
358 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
359 AssertReturnVoid(idCpu < pVM->cCpus);
360 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
361
362 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
363 {
364 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
365 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
366#ifdef IN_RING3
367 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
368#endif
369 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
370 }
371}
372
373
374/**
375 * Schedule the queue which was changed.
376 */
377DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
378{
379 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
380 if (RT_SUCCESS_NP(rc))
381 {
382 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
383 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
384 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
385#ifdef VBOX_STRICT
386 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
387#endif
388 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
389 PDMCritSectLeave(pVM, &pQueue->TimerLock);
390 return;
391 }
392
393 TMTIMERSTATE enmState = pTimer->enmState;
394 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
395 tmScheduleNotify(pVM);
396}
397
398
399/**
400 * Try change the state to enmStateNew from enmStateOld
401 * and link the timer into the scheduling queue.
402 *
403 * @returns Success indicator.
404 * @param pTimer Timer in question.
405 * @param enmStateNew The new timer state.
406 * @param enmStateOld The old timer state.
407 */
408DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
409{
410 /*
411 * Attempt state change.
412 */
413 bool fRc;
414 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
415 return fRc;
416}
417
418
419/**
420 * Links the timer onto the scheduling queue.
421 *
422 * @param pQueueCC The current context queue (same as @a pQueue for
423 * ring-3).
424 * @param pQueue The shared queue data.
425 * @param pTimer The timer.
426 *
427 * @todo FIXME: Look into potential race with the thread running the queues
428 * and stuff.
429 */
430DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
431{
432 Assert(pTimer->idxScheduleNext == UINT32_MAX);
433 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
434 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
435
436 uint32_t idxHead;
437 do
438 {
439 idxHead = pQueue->idxSchedule;
440 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
441 pTimer->idxScheduleNext = idxHead;
442 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
443}
444
445
446/**
447 * Try change the state to enmStateNew from enmStateOld
448 * and link the timer into the scheduling queue.
449 *
450 * @returns Success indicator.
451 * @param pQueueCC The current context queue (same as @a pQueue for
452 * ring-3).
453 * @param pQueue The shared queue data.
454 * @param pTimer Timer in question.
455 * @param enmStateNew The new timer state.
456 * @param enmStateOld The old timer state.
457 */
458DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
459 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
460{
461 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
462 {
463 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
464 return true;
465 }
466 return false;
467}
468
469
470/**
471 * Links a timer into the active list of a timer queue.
472 *
473 * @param pVM The cross context VM structure.
474 * @param pQueueCC The current context queue (same as @a pQueue for
475 * ring-3).
476 * @param pQueue The shared queue data.
477 * @param pTimer The timer.
478 * @param u64Expire The timer expiration time.
479 *
480 * @remarks Called while owning the relevant queue lock.
481 */
482DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
483 PTMTIMER pTimer, uint64_t u64Expire)
484{
485 Assert(pTimer->idxNext == UINT32_MAX);
486 Assert(pTimer->idxPrev == UINT32_MAX);
487 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
488 RT_NOREF(pVM);
489
490 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
491 if (pCur)
492 {
493 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
494 {
495 if (pCur->u64Expire > u64Expire)
496 {
497 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
498 tmTimerSetNext(pQueueCC, pTimer, pCur);
499 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
500 if (pPrev)
501 tmTimerSetNext(pQueueCC, pPrev, pTimer);
502 else
503 {
504 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
505 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
506 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
507 }
508 tmTimerSetPrev(pQueueCC, pCur, pTimer);
509 return;
510 }
511 if (pCur->idxNext == UINT32_MAX)
512 {
513 tmTimerSetNext(pQueueCC, pCur, pTimer);
514 tmTimerSetPrev(pQueueCC, pTimer, pCur);
515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
516 return;
517 }
518 }
519 }
520 else
521 {
522 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
523 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
524 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
525 }
526}
527
528
529
530/**
531 * Schedules the given timer on the given queue.
532 *
533 * @param pVM The cross context VM structure.
534 * @param pQueueCC The current context queue (same as @a pQueue for
535 * ring-3).
536 * @param pQueue The shared queue data.
537 * @param pTimer The timer that needs scheduling.
538 *
539 * @remarks Called while owning the lock.
540 */
541DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
542{
543 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
544 RT_NOREF(pVM);
545
546 /*
547 * Processing.
548 */
549 unsigned cRetries = 2;
550 do
551 {
552 TMTIMERSTATE enmState = pTimer->enmState;
553 switch (enmState)
554 {
555 /*
556 * Reschedule timer (in the active list).
557 */
558 case TMTIMERSTATE_PENDING_RESCHEDULE:
559 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
560 break; /* retry */
561 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
562 RT_FALL_THRU();
563
564 /*
565 * Schedule timer (insert into the active list).
566 */
567 case TMTIMERSTATE_PENDING_SCHEDULE:
568 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
569 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
570 break; /* retry */
571 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
572 return;
573
574 /*
575 * Stop the timer in active list.
576 */
577 case TMTIMERSTATE_PENDING_STOP:
578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
579 break; /* retry */
580 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
581 RT_FALL_THRU();
582
583 /*
584 * Stop the timer (not on the active list).
585 */
586 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
587 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
588 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
589 break;
590 return;
591
592 /*
593 * The timer is pending destruction by TMR3TimerDestroy, our caller.
594 * Nothing to do here.
595 */
596 case TMTIMERSTATE_DESTROY:
597 break;
598
599 /*
600 * Postpone these until they get into the right state.
601 */
602 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
603 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
604 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
605 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
606 return;
607
608 /*
609 * None of these can be in the schedule.
610 */
611 case TMTIMERSTATE_FREE:
612 case TMTIMERSTATE_STOPPED:
613 case TMTIMERSTATE_ACTIVE:
614 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
615 case TMTIMERSTATE_EXPIRED_DELIVER:
616 default:
617 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
618 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
619 return;
620 }
621 } while (cRetries-- > 0);
622}
623
624
625/**
626 * Schedules the specified timer queue.
627 *
628 * @param pVM The cross context VM structure.
629 * @param pQueueCC The current context queue (same as @a pQueue for
630 * ring-3) data of the queue to schedule.
631 * @param pQueue The shared queue data of the queue to schedule.
632 *
633 * @remarks Called while owning the lock.
634 */
635void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
636{
637 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
638
639 /*
640 * Dequeue the scheduling list and iterate it.
641 */
642 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
643 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
644 while (idxNext != UINT32_MAX)
645 {
646 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
647
648 /*
649 * Unlink the head timer and take down the index of the next one.
650 */
651 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
652 idxNext = pTimer->idxScheduleNext;
653 pTimer->idxScheduleNext = UINT32_MAX;
654
655 /*
656 * Do the scheduling.
657 */
658 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
659 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
660 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
661 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
662 }
663 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
664}
665
666
667#ifdef VBOX_STRICT
668/**
669 * Checks that the timer queues are sane.
670 *
671 * @param pVM The cross context VM structure.
672 * @param pszWhere Caller location clue.
673 */
674void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
675{
676 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
677 {
678 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
679 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
680 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
681
682 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
683 if (RT_SUCCESS(rc))
684 {
685 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
686 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
687 {
688 /* Check the linking of the active lists. */
689 PTMTIMER pPrev = NULL;
690 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
691 pCur;
692 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
693 {
694 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
695 TMTIMERSTATE enmState = pCur->enmState;
696 switch (enmState)
697 {
698 case TMTIMERSTATE_ACTIVE:
699 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
700 || pCur->enmState != TMTIMERSTATE_ACTIVE,
701 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
702 break;
703 case TMTIMERSTATE_PENDING_STOP:
704 case TMTIMERSTATE_PENDING_RESCHEDULE:
705 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
706 break;
707 default:
708 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
709 break;
710 }
711 }
712
713# ifdef IN_RING3
714 /* Go thru all the timers and check that the active ones all are in the active lists. */
715 uint32_t idxTimer = pQueue->cTimersAlloc;
716 uint32_t cFree = 0;
717 while (idxTimer-- > 0)
718 {
719 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
720 TMTIMERSTATE const enmState = pTimer->enmState;
721 switch (enmState)
722 {
723 case TMTIMERSTATE_FREE:
724 cFree++;
725 break;
726
727 case TMTIMERSTATE_ACTIVE:
728 case TMTIMERSTATE_PENDING_STOP:
729 case TMTIMERSTATE_PENDING_RESCHEDULE:
730 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
731 {
732 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
733 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
734 while (pCurAct && pCurAct != pTimer)
735 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
736 Assert(pCurAct == pTimer);
737 break;
738 }
739
740 case TMTIMERSTATE_PENDING_SCHEDULE:
741 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
742 case TMTIMERSTATE_STOPPED:
743 case TMTIMERSTATE_EXPIRED_DELIVER:
744 {
745 Assert(pTimer->idxNext == UINT32_MAX);
746 Assert(pTimer->idxPrev == UINT32_MAX);
747 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
748 pCurAct;
749 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
750 {
751 Assert(pCurAct != pTimer);
752 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
753 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
754 }
755 break;
756 }
757
758 /* ignore */
759 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
760 break;
761
762 case TMTIMERSTATE_INVALID:
763 Assert(idxTimer == 0);
764 break;
765
766 /* shouldn't get here! */
767 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
768 case TMTIMERSTATE_DESTROY:
769 default:
770 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
771 break;
772 }
773
774 /* Check the handle value. */
775 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
776 {
777 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
778 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
779 }
780 }
781 Assert(cFree == pQueue->cTimersFree);
782# endif /* IN_RING3 */
783
784 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
785 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
786 }
787 PDMCritSectLeave(pVM, &pQueue->TimerLock);
788 }
789 }
790}
791#endif /* !VBOX_STRICT */
792
793#ifdef VBOX_HIGH_RES_TIMERS_HACK
794
795/**
796 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
797 * EMT is polling.
798 *
799 * @returns See tmTimerPollInternal.
800 * @param pVM The cross context VM structure.
801 * @param u64Now Current virtual clock timestamp.
802 * @param u64Delta The delta to the next even in ticks of the
803 * virtual clock.
804 * @param pu64Delta Where to return the delta.
805 */
806DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
807{
808 Assert(!(u64Delta & RT_BIT_64(63)));
809
810 if (!pVM->tm.s.fVirtualWarpDrive)
811 {
812 *pu64Delta = u64Delta;
813 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
814 }
815
816 /*
817 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
818 */
819 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
820 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
821
822 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
823 u64GipTime -= u64Start; /* the start is GIP time. */
824 if (u64GipTime >= u64Delta)
825 {
826 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
827 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
828 }
829 else
830 {
831 u64Delta -= u64GipTime;
832 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
833 u64Delta += u64GipTime;
834 }
835 *pu64Delta = u64Delta;
836 u64GipTime += u64Start;
837 return u64GipTime;
838}
839
840
841/**
842 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
843 * than the one dedicated to timer work.
844 *
845 * @returns See tmTimerPollInternal.
846 * @param pVM The cross context VM structure.
847 * @param u64Now Current virtual clock timestamp.
848 * @param pu64Delta Where to return the delta.
849 */
850DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
851{
852 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
853 *pu64Delta = s_u64OtherRet;
854 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
855}
856
857
858/**
859 * Worker for tmTimerPollInternal.
860 *
861 * @returns See tmTimerPollInternal.
862 * @param pVM The cross context VM structure.
863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
864 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
865 * timer EMT.
866 * @param u64Now Current virtual clock timestamp.
867 * @param pu64Delta Where to return the delta.
868 * @param pCounter The statistics counter to update.
869 */
870DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
871 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
872{
873 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
874 if (pVCpuDst != pVCpu)
875 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
876 *pu64Delta = 0;
877 return 0;
878}
879
880
881/**
882 * Common worker for TMTimerPollGIP and TMTimerPoll.
883 *
884 * This function is called before FFs are checked in the inner execution EM loops.
885 *
886 * @returns The GIP timestamp of the next event.
887 * 0 if the next event has already expired.
888 *
889 * @param pVM The cross context VM structure.
890 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
891 * @param pu64Delta Where to store the delta.
892 *
893 * @thread The emulation thread.
894 *
895 * @remarks GIP uses ns ticks.
896 */
897DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
898{
899 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
900 AssertReturn(idCpu < pVM->cCpus, 0);
901 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
902
903 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
904 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
905
906 /*
907 * Return straight away if the timer FF is already set ...
908 */
909 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
910 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
911
912 /*
913 * ... or if timers are being run.
914 */
915 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
916 {
917 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
918 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
919 }
920
921 /*
922 * Check for TMCLOCK_VIRTUAL expiration.
923 */
924 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
925 const int64_t i64Delta1 = u64Expire1 - u64Now;
926 if (i64Delta1 <= 0)
927 {
928 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
929 {
930 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
931 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
932 }
933 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
934 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
935 }
936
937 /*
938 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
939 * This isn't quite as straight forward if in a catch-up, not only do
940 * we have to adjust the 'now' but when have to adjust the delta as well.
941 */
942
943 /*
944 * Optimistic lockless approach.
945 */
946 uint64_t u64VirtualSyncNow;
947 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
948 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
949 {
950 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
951 {
952 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
953 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
954 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
955 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
956 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
957 {
958 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
959 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
960 if (i64Delta2 > 0)
961 {
962 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
963 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
964
965 if (pVCpu == pVCpuDst)
966 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
967 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
968 }
969
970 if ( !pVM->tm.s.fRunningQueues
971 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
972 {
973 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
974 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
975 }
976
977 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
978 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
979 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
980 }
981 }
982 }
983 else
984 {
985 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
986 LogFlow(("TMTimerPoll: stopped\n"));
987 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
988 }
989
990 /*
991 * Complicated lockless approach.
992 */
993 uint64_t off;
994 uint32_t u32Pct = 0;
995 bool fCatchUp;
996 int cOuterTries = 42;
997 for (;; cOuterTries--)
998 {
999 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
1000 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
1001 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1002 if (fCatchUp)
1003 {
1004 /* No changes allowed, try get a consistent set of parameters. */
1005 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
1006 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
1007 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1008 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
1009 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1010 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1011 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1012 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1013 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1014 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1015 || cOuterTries <= 0)
1016 {
1017 uint64_t u64Delta = u64Now - u64Prev;
1018 if (RT_LIKELY(!(u64Delta >> 32)))
1019 {
1020 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1021 if (off > u64Sub + offGivenUp)
1022 off -= u64Sub;
1023 else /* we've completely caught up. */
1024 off = offGivenUp;
1025 }
1026 else
1027 /* More than 4 seconds since last time (or negative), ignore it. */
1028 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1029
1030 /* Check that we're still running and in catch up. */
1031 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1032 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1033 break;
1034 }
1035 }
1036 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1037 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1038 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1039 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1040 break; /* Got an consistent offset */
1041
1042 /* Repeat the initial checks before iterating. */
1043 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1044 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1045 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1046 {
1047 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1048 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1049 }
1050 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1051 {
1052 LogFlow(("TMTimerPoll: stopped\n"));
1053 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1054 }
1055 if (cOuterTries <= 0)
1056 break; /* that's enough */
1057 }
1058 if (cOuterTries <= 0)
1059 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1060 u64VirtualSyncNow = u64Now - off;
1061
1062 /* Calc delta and see if we've got a virtual sync hit. */
1063 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1064 if (i64Delta2 <= 0)
1065 {
1066 if ( !pVM->tm.s.fRunningQueues
1067 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1068 {
1069 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1070 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1071 }
1072 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1073 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1074 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1075 }
1076
1077 /*
1078 * Return the time left to the next event.
1079 */
1080 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1081 if (pVCpu == pVCpuDst)
1082 {
1083 if (fCatchUp)
1084 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1085 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1086 }
1087 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1088}
1089
1090
1091/**
1092 * Set FF if we've passed the next virtual event.
1093 *
1094 * This function is called before FFs are checked in the inner execution EM loops.
1095 *
1096 * @returns true if timers are pending, false if not.
1097 *
1098 * @param pVM The cross context VM structure.
1099 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1100 * @thread The emulation thread.
1101 */
1102VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1103{
1104 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1105 uint64_t off = 0;
1106 tmTimerPollInternal(pVM, pVCpu, &off);
1107 return off == 0;
1108}
1109
1110
1111/**
1112 * Set FF if we've passed the next virtual event.
1113 *
1114 * This function is called before FFs are checked in the inner execution EM loops.
1115 *
1116 * @param pVM The cross context VM structure.
1117 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1118 * @thread The emulation thread.
1119 */
1120VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1121{
1122 uint64_t off;
1123 tmTimerPollInternal(pVM, pVCpu, &off);
1124}
1125
1126
1127/**
1128 * Set FF if we've passed the next virtual event.
1129 *
1130 * This function is called before FFs are checked in the inner execution EM loops.
1131 *
1132 * @returns The GIP timestamp of the next event.
1133 * 0 if the next event has already expired.
1134 * @param pVM The cross context VM structure.
1135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1136 * @param pu64Delta Where to store the delta.
1137 * @thread The emulation thread.
1138 */
1139VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1140{
1141 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1142}
1143
1144#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1145
1146/**
1147 * Locks the timer clock.
1148 *
1149 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1150 * if the clock does not have a lock.
1151 * @param pVM The cross context VM structure.
1152 * @param hTimer Timer handle as returned by one of the create functions.
1153 * @param rcBusy What to return in ring-0 and raw-mode context if the
1154 * lock is busy. Pass VINF_SUCCESS to acquired the
1155 * critical section thru a ring-3 call if necessary.
1156 *
1157 * @remarks Currently only supported on timers using the virtual sync clock.
1158 */
1159VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1160{
1161 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1162 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1163 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1164}
1165
1166
1167/**
1168 * Unlocks a timer clock locked by TMTimerLock.
1169 *
1170 * @param pVM The cross context VM structure.
1171 * @param hTimer Timer handle as returned by one of the create functions.
1172 */
1173VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1174{
1175 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1176 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1177 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1178}
1179
1180
1181/**
1182 * Checks if the current thread owns the timer clock lock.
1183 *
1184 * @returns @c true if its the owner, @c false if not.
1185 * @param pVM The cross context VM structure.
1186 * @param hTimer Timer handle as returned by one of the create functions.
1187 */
1188VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1189{
1190 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1191 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1192 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1193}
1194
1195
1196/**
1197 * Optimized TMTimerSet code path for starting an inactive timer.
1198 *
1199 * @returns VBox status code.
1200 *
1201 * @param pVM The cross context VM structure.
1202 * @param pTimer The timer handle.
1203 * @param u64Expire The new expire time.
1204 * @param pQueue Pointer to the shared timer queue data.
1205 * @param idxQueue The queue index.
1206 */
1207static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1208{
1209 Assert(pTimer->idxPrev == UINT32_MAX);
1210 Assert(pTimer->idxNext == UINT32_MAX);
1211 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1212
1213 /*
1214 * Calculate and set the expiration time.
1215 */
1216 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1217 {
1218 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1219 AssertMsgStmt(u64Expire >= u64Last,
1220 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1221 u64Expire = u64Last);
1222 }
1223 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1224 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1225
1226 /*
1227 * Link the timer into the active list.
1228 */
1229 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1230
1231 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1232 return VINF_SUCCESS;
1233}
1234
1235
1236/**
1237 * TMTimerSet for the virtual sync timer queue.
1238 *
1239 * This employs a greatly simplified state machine by always acquiring the
1240 * queue lock and bypassing the scheduling list.
1241 *
1242 * @returns VBox status code
1243 * @param pVM The cross context VM structure.
1244 * @param pTimer The timer handle.
1245 * @param u64Expire The expiration time.
1246 */
1247static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1248{
1249 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1250 VM_ASSERT_EMT(pVM);
1251 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1252 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1253 AssertRCReturn(rc, rc);
1254
1255 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1256 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1257 TMTIMERSTATE const enmState = pTimer->enmState;
1258 switch (enmState)
1259 {
1260 case TMTIMERSTATE_EXPIRED_DELIVER:
1261 case TMTIMERSTATE_STOPPED:
1262 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1263 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1264 else
1265 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1266
1267 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1268 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1269 pTimer->u64Expire = u64Expire;
1270 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1271 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1272 rc = VINF_SUCCESS;
1273 break;
1274
1275 case TMTIMERSTATE_ACTIVE:
1276 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1277 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1278 pTimer->u64Expire = u64Expire;
1279 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1280 rc = VINF_SUCCESS;
1281 break;
1282
1283 case TMTIMERSTATE_PENDING_RESCHEDULE:
1284 case TMTIMERSTATE_PENDING_STOP:
1285 case TMTIMERSTATE_PENDING_SCHEDULE:
1286 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1287 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1288 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1289 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1290 case TMTIMERSTATE_DESTROY:
1291 case TMTIMERSTATE_FREE:
1292 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1293 rc = VERR_TM_INVALID_STATE;
1294 break;
1295
1296 default:
1297 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1298 rc = VERR_TM_UNKNOWN_STATE;
1299 break;
1300 }
1301
1302 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1303 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1304 return rc;
1305}
1306
1307
1308/**
1309 * Arm a timer with a (new) expire time.
1310 *
1311 * @returns VBox status code.
1312 * @param pVM The cross context VM structure.
1313 * @param hTimer Timer handle as returned by one of the create functions.
1314 * @param u64Expire New expire time.
1315 */
1316VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1317{
1318 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1319 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1320
1321 /* Treat virtual sync timers specially. */
1322 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1323 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1324
1325 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1326 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1327
1328 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1329
1330#ifdef VBOX_WITH_STATISTICS
1331 /*
1332 * Gather optimization info.
1333 */
1334 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1335 TMTIMERSTATE enmOrgState = pTimer->enmState;
1336 switch (enmOrgState)
1337 {
1338 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1339 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1340 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1341 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1342 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1343 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1344 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1345 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1346 }
1347#endif
1348
1349#if 1
1350 /*
1351 * The most common case is setting the timer again during the callback.
1352 * The second most common case is starting a timer at some other time.
1353 */
1354 TMTIMERSTATE enmState1 = pTimer->enmState;
1355 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1356 || ( enmState1 == TMTIMERSTATE_STOPPED
1357 && pTimer->pCritSect))
1358 {
1359 /* Try take the TM lock and check the state again. */
1360 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1361 if (RT_SUCCESS_NP(rc))
1362 {
1363 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1364 {
1365 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1366 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1367 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1368 return VINF_SUCCESS;
1369 }
1370 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1371 }
1372 }
1373#endif
1374
1375 /*
1376 * Unoptimized code path.
1377 */
1378 int cRetries = 1000;
1379 do
1380 {
1381 /*
1382 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1383 */
1384 TMTIMERSTATE enmState = pTimer->enmState;
1385 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1386 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1387 switch (enmState)
1388 {
1389 case TMTIMERSTATE_EXPIRED_DELIVER:
1390 case TMTIMERSTATE_STOPPED:
1391 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1392 {
1393 Assert(pTimer->idxPrev == UINT32_MAX);
1394 Assert(pTimer->idxNext == UINT32_MAX);
1395 pTimer->u64Expire = u64Expire;
1396 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1397 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1398 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1399 return VINF_SUCCESS;
1400 }
1401 break;
1402
1403 case TMTIMERSTATE_PENDING_SCHEDULE:
1404 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1405 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1406 {
1407 pTimer->u64Expire = u64Expire;
1408 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1409 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1410 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1411 return VINF_SUCCESS;
1412 }
1413 break;
1414
1415
1416 case TMTIMERSTATE_ACTIVE:
1417 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1418 {
1419 pTimer->u64Expire = u64Expire;
1420 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1421 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1422 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1423 return VINF_SUCCESS;
1424 }
1425 break;
1426
1427 case TMTIMERSTATE_PENDING_RESCHEDULE:
1428 case TMTIMERSTATE_PENDING_STOP:
1429 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1430 {
1431 pTimer->u64Expire = u64Expire;
1432 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1433 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1434 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1435 return VINF_SUCCESS;
1436 }
1437 break;
1438
1439
1440 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1441 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1442 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1443#ifdef IN_RING3
1444 if (!RTThreadYield())
1445 RTThreadSleep(1);
1446#else
1447/** @todo call host context and yield after a couple of iterations */
1448#endif
1449 break;
1450
1451 /*
1452 * Invalid states.
1453 */
1454 case TMTIMERSTATE_DESTROY:
1455 case TMTIMERSTATE_FREE:
1456 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1457 return VERR_TM_INVALID_STATE;
1458 default:
1459 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1460 return VERR_TM_UNKNOWN_STATE;
1461 }
1462 } while (cRetries-- > 0);
1463
1464 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1465 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1466 return VERR_TM_TIMER_UNSTABLE_STATE;
1467}
1468
1469
1470/**
1471 * Return the current time for the specified clock, setting pu64Now if not NULL.
1472 *
1473 * @returns Current time.
1474 * @param pVM The cross context VM structure.
1475 * @param enmClock The clock to query.
1476 * @param pu64Now Optional pointer where to store the return time
1477 */
1478DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1479{
1480 uint64_t u64Now;
1481 switch (enmClock)
1482 {
1483 case TMCLOCK_VIRTUAL_SYNC:
1484 u64Now = TMVirtualSyncGet(pVM);
1485 break;
1486 case TMCLOCK_VIRTUAL:
1487 u64Now = TMVirtualGet(pVM);
1488 break;
1489 case TMCLOCK_REAL:
1490 u64Now = TMRealGet(pVM);
1491 break;
1492 default:
1493 AssertFatalMsgFailed(("%d\n", enmClock));
1494 }
1495
1496 if (pu64Now)
1497 *pu64Now = u64Now;
1498 return u64Now;
1499}
1500
1501
1502/**
1503 * Optimized TMTimerSetRelative code path.
1504 *
1505 * @returns VBox status code.
1506 *
1507 * @param pVM The cross context VM structure.
1508 * @param pTimer The timer handle.
1509 * @param cTicksToNext Clock ticks until the next time expiration.
1510 * @param pu64Now Where to return the current time stamp used.
1511 * Optional.
1512 * @param pQueueCC The context specific queue data (same as @a pQueue
1513 * for ring-3).
1514 * @param pQueue The shared queue data.
1515 */
1516static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1517 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1518{
1519 Assert(pTimer->idxPrev == UINT32_MAX);
1520 Assert(pTimer->idxNext == UINT32_MAX);
1521 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1522
1523 /*
1524 * Calculate and set the expiration time.
1525 */
1526 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1527 pTimer->u64Expire = u64Expire;
1528 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1529
1530 /*
1531 * Link the timer into the active list.
1532 */
1533 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1534 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1535
1536 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/**
1542 * TMTimerSetRelative for the virtual sync timer queue.
1543 *
1544 * This employs a greatly simplified state machine by always acquiring the
1545 * queue lock and bypassing the scheduling list.
1546 *
1547 * @returns VBox status code
1548 * @param pVM The cross context VM structure.
1549 * @param pTimer The timer to (re-)arm.
1550 * @param cTicksToNext Clock ticks until the next time expiration.
1551 * @param pu64Now Where to return the current time stamp used.
1552 * Optional.
1553 */
1554static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1555{
1556 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1557 VM_ASSERT_EMT(pVM);
1558 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1559 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1560 AssertRCReturn(rc, rc);
1561
1562 /* Calculate the expiration tick. */
1563 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1564 if (pu64Now)
1565 *pu64Now = u64Expire;
1566 u64Expire += cTicksToNext;
1567
1568 /* Update the timer. */
1569 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1570 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1571 TMTIMERSTATE const enmState = pTimer->enmState;
1572 switch (enmState)
1573 {
1574 case TMTIMERSTATE_EXPIRED_DELIVER:
1575 case TMTIMERSTATE_STOPPED:
1576 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1577 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1578 else
1579 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1580 pTimer->u64Expire = u64Expire;
1581 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1582 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1583 rc = VINF_SUCCESS;
1584 break;
1585
1586 case TMTIMERSTATE_ACTIVE:
1587 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1588 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1589 pTimer->u64Expire = u64Expire;
1590 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1591 rc = VINF_SUCCESS;
1592 break;
1593
1594 case TMTIMERSTATE_PENDING_RESCHEDULE:
1595 case TMTIMERSTATE_PENDING_STOP:
1596 case TMTIMERSTATE_PENDING_SCHEDULE:
1597 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1598 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1599 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1600 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1601 case TMTIMERSTATE_DESTROY:
1602 case TMTIMERSTATE_FREE:
1603 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1604 rc = VERR_TM_INVALID_STATE;
1605 break;
1606
1607 default:
1608 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1609 rc = VERR_TM_UNKNOWN_STATE;
1610 break;
1611 }
1612
1613 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1614 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1615 return rc;
1616}
1617
1618
1619/**
1620 * Arm a timer with a expire time relative to the current time.
1621 *
1622 * @returns VBox status code.
1623 * @param pVM The cross context VM structure.
1624 * @param pTimer The timer to arm.
1625 * @param cTicksToNext Clock ticks until the next time expiration.
1626 * @param pu64Now Where to return the current time stamp used.
1627 * Optional.
1628 * @param pQueueCC The context specific queue data (same as @a pQueue
1629 * for ring-3).
1630 * @param pQueue The shared queue data.
1631 */
1632static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1633 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1634{
1635 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1636
1637 /* Treat virtual sync timers specially. */
1638 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1639 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1640
1641 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1642 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1643
1644 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1645
1646#ifdef VBOX_WITH_STATISTICS
1647 /*
1648 * Gather optimization info.
1649 */
1650 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1651 TMTIMERSTATE enmOrgState = pTimer->enmState;
1652 switch (enmOrgState)
1653 {
1654 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1655 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1656 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1657 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1658 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1659 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1660 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1661 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1662 }
1663#endif
1664
1665 /*
1666 * Try to take the TM lock and optimize the common cases.
1667 *
1668 * With the TM lock we can safely make optimizations like immediate
1669 * scheduling and we can also be 100% sure that we're not racing the
1670 * running of the timer queues. As an additional restraint we require the
1671 * timer to have a critical section associated with to be 100% there aren't
1672 * concurrent operations on the timer. (This latter isn't necessary any
1673 * longer as this isn't supported for any timers, critsect or not.)
1674 *
1675 * Note! Lock ordering doesn't apply when we only _try_ to
1676 * get the innermost locks.
1677 */
1678 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1679#if 1
1680 if ( fOwnTMLock
1681 && pTimer->pCritSect)
1682 {
1683 TMTIMERSTATE enmState = pTimer->enmState;
1684 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1685 || enmState == TMTIMERSTATE_STOPPED)
1686 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1687 {
1688 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1689 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1690 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1691 return VINF_SUCCESS;
1692 }
1693
1694 /* Optimize other states when it becomes necessary. */
1695 }
1696#endif
1697
1698 /*
1699 * Unoptimized path.
1700 */
1701 int rc;
1702 for (int cRetries = 1000; ; cRetries--)
1703 {
1704 /*
1705 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1706 */
1707 TMTIMERSTATE enmState = pTimer->enmState;
1708 switch (enmState)
1709 {
1710 case TMTIMERSTATE_STOPPED:
1711 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1712 {
1713 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1714 * Figure a safe way of activating this timer while the queue is
1715 * being run.
1716 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1717 * re-starting the timer in response to a initial_count write.) */
1718 }
1719 RT_FALL_THRU();
1720 case TMTIMERSTATE_EXPIRED_DELIVER:
1721 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1722 {
1723 Assert(pTimer->idxPrev == UINT32_MAX);
1724 Assert(pTimer->idxNext == UINT32_MAX);
1725 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1726 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1727 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1728 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1729 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1730 rc = VINF_SUCCESS;
1731 break;
1732 }
1733 rc = VERR_TRY_AGAIN;
1734 break;
1735
1736 case TMTIMERSTATE_PENDING_SCHEDULE:
1737 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1738 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1739 {
1740 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1741 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1742 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1743 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1744 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1745 rc = VINF_SUCCESS;
1746 break;
1747 }
1748 rc = VERR_TRY_AGAIN;
1749 break;
1750
1751
1752 case TMTIMERSTATE_ACTIVE:
1753 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1754 {
1755 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1756 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1757 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1758 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1759 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1760 rc = VINF_SUCCESS;
1761 break;
1762 }
1763 rc = VERR_TRY_AGAIN;
1764 break;
1765
1766 case TMTIMERSTATE_PENDING_RESCHEDULE:
1767 case TMTIMERSTATE_PENDING_STOP:
1768 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1769 {
1770 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1771 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1772 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1773 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1774 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1775 rc = VINF_SUCCESS;
1776 break;
1777 }
1778 rc = VERR_TRY_AGAIN;
1779 break;
1780
1781
1782 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1783 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1784 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1785#ifdef IN_RING3
1786 if (!RTThreadYield())
1787 RTThreadSleep(1);
1788#else
1789/** @todo call host context and yield after a couple of iterations */
1790#endif
1791 rc = VERR_TRY_AGAIN;
1792 break;
1793
1794 /*
1795 * Invalid states.
1796 */
1797 case TMTIMERSTATE_DESTROY:
1798 case TMTIMERSTATE_FREE:
1799 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1800 rc = VERR_TM_INVALID_STATE;
1801 break;
1802
1803 default:
1804 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1805 rc = VERR_TM_UNKNOWN_STATE;
1806 break;
1807 }
1808
1809 /* switch + loop is tedious to break out of. */
1810 if (rc == VINF_SUCCESS)
1811 break;
1812
1813 if (rc != VERR_TRY_AGAIN)
1814 {
1815 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1816 break;
1817 }
1818 if (cRetries <= 0)
1819 {
1820 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1821 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1822 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1823 break;
1824 }
1825
1826 /*
1827 * Retry to gain locks.
1828 */
1829 if (!fOwnTMLock)
1830 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1831
1832 } /* for (;;) */
1833
1834 /*
1835 * Clean up and return.
1836 */
1837 if (fOwnTMLock)
1838 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1839
1840 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1841 return rc;
1842}
1843
1844
1845/**
1846 * Arm a timer with a expire time relative to the current time.
1847 *
1848 * @returns VBox status code.
1849 * @param pVM The cross context VM structure.
1850 * @param hTimer Timer handle as returned by one of the create functions.
1851 * @param cTicksToNext Clock ticks until the next time expiration.
1852 * @param pu64Now Where to return the current time stamp used.
1853 * Optional.
1854 */
1855VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1856{
1857 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1858 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1859}
1860
1861
1862/**
1863 * Drops a hint about the frequency of the timer.
1864 *
1865 * This is used by TM and the VMM to calculate how often guest execution needs
1866 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1867 *
1868 * @returns VBox status code.
1869 * @param pVM The cross context VM structure.
1870 * @param hTimer Timer handle as returned by one of the create functions.
1871 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1872 *
1873 * @remarks We're using an integer hertz value here since anything above 1 HZ
1874 * is not going to be any trouble satisfying scheduling wise. The
1875 * range where it makes sense is >= 100 HZ.
1876 */
1877VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1878{
1879 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1880 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1881
1882 uint32_t const uHzOldHint = pTimer->uHzHint;
1883 pTimer->uHzHint = uHzHint;
1884
1885 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1886 if ( uHzHint > uMaxHzHint
1887 || uHzOldHint >= uMaxHzHint)
1888 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1889
1890 return VINF_SUCCESS;
1891}
1892
1893
1894/**
1895 * TMTimerStop for the virtual sync timer queue.
1896 *
1897 * This employs a greatly simplified state machine by always acquiring the
1898 * queue lock and bypassing the scheduling list.
1899 *
1900 * @returns VBox status code
1901 * @param pVM The cross context VM structure.
1902 * @param pTimer The timer handle.
1903 */
1904static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1905{
1906 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1907 VM_ASSERT_EMT(pVM);
1908 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1909 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1910 AssertRCReturn(rc, rc);
1911
1912 /* Reset the HZ hint. */
1913 uint32_t uOldHzHint = pTimer->uHzHint;
1914 if (uOldHzHint)
1915 {
1916 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1917 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1918 pTimer->uHzHint = 0;
1919 }
1920
1921 /* Update the timer state. */
1922 TMTIMERSTATE const enmState = pTimer->enmState;
1923 switch (enmState)
1924 {
1925 case TMTIMERSTATE_ACTIVE:
1926 {
1927 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1928 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1929 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1930 rc = VINF_SUCCESS;
1931 break;
1932 }
1933
1934 case TMTIMERSTATE_EXPIRED_DELIVER:
1935 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1936 rc = VINF_SUCCESS;
1937 break;
1938
1939 case TMTIMERSTATE_STOPPED:
1940 rc = VINF_SUCCESS;
1941 break;
1942
1943 case TMTIMERSTATE_PENDING_RESCHEDULE:
1944 case TMTIMERSTATE_PENDING_STOP:
1945 case TMTIMERSTATE_PENDING_SCHEDULE:
1946 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1947 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1948 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1949 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1950 case TMTIMERSTATE_DESTROY:
1951 case TMTIMERSTATE_FREE:
1952 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1953 rc = VERR_TM_INVALID_STATE;
1954 break;
1955
1956 default:
1957 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1958 rc = VERR_TM_UNKNOWN_STATE;
1959 break;
1960 }
1961
1962 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1963 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1964 return rc;
1965}
1966
1967
1968/**
1969 * Stop the timer.
1970 * Use TMR3TimerArm() to "un-stop" the timer.
1971 *
1972 * @returns VBox status code.
1973 * @param pVM The cross context VM structure.
1974 * @param hTimer Timer handle as returned by one of the create functions.
1975 */
1976VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1977{
1978 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1979 STAM_COUNTER_INC(&pTimer->StatStop);
1980
1981 /* Treat virtual sync timers specially. */
1982 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1983 return tmTimerVirtualSyncStop(pVM, pTimer);
1984
1985 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1986 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1987
1988 /*
1989 * Reset the HZ hint.
1990 */
1991 uint32_t const uOldHzHint = pTimer->uHzHint;
1992 if (uOldHzHint)
1993 {
1994 if (uOldHzHint >= pQueue->uMaxHzHint)
1995 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1996 pTimer->uHzHint = 0;
1997 }
1998
1999 /** @todo see if this function needs optimizing. */
2000 int cRetries = 1000;
2001 do
2002 {
2003 /*
2004 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2005 */
2006 TMTIMERSTATE enmState = pTimer->enmState;
2007 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2008 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2009 switch (enmState)
2010 {
2011 case TMTIMERSTATE_EXPIRED_DELIVER:
2012 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2013 return VERR_INVALID_PARAMETER;
2014
2015 case TMTIMERSTATE_STOPPED:
2016 case TMTIMERSTATE_PENDING_STOP:
2017 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2018 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2019 return VINF_SUCCESS;
2020
2021 case TMTIMERSTATE_PENDING_SCHEDULE:
2022 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2023 {
2024 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2025 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2026 return VINF_SUCCESS;
2027 }
2028 break;
2029
2030 case TMTIMERSTATE_PENDING_RESCHEDULE:
2031 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2032 {
2033 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2034 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2035 return VINF_SUCCESS;
2036 }
2037 break;
2038
2039 case TMTIMERSTATE_ACTIVE:
2040 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2041 {
2042 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2043 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2044 return VINF_SUCCESS;
2045 }
2046 break;
2047
2048 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2049 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2050 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2051#ifdef IN_RING3
2052 if (!RTThreadYield())
2053 RTThreadSleep(1);
2054#else
2055/** @todo call host and yield cpu after a while. */
2056#endif
2057 break;
2058
2059 /*
2060 * Invalid states.
2061 */
2062 case TMTIMERSTATE_DESTROY:
2063 case TMTIMERSTATE_FREE:
2064 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2065 return VERR_TM_INVALID_STATE;
2066 default:
2067 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2068 return VERR_TM_UNKNOWN_STATE;
2069 }
2070 } while (cRetries-- > 0);
2071
2072 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2073 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2074 return VERR_TM_TIMER_UNSTABLE_STATE;
2075}
2076
2077
2078/**
2079 * Get the current clock time.
2080 * Handy for calculating the new expire time.
2081 *
2082 * @returns Current clock time.
2083 * @param pVM The cross context VM structure.
2084 * @param hTimer Timer handle as returned by one of the create functions.
2085 */
2086VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2087{
2088 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2089 STAM_COUNTER_INC(&pTimer->StatGet);
2090
2091 uint64_t u64;
2092 switch (pQueue->enmClock)
2093 {
2094 case TMCLOCK_VIRTUAL:
2095 u64 = TMVirtualGet(pVM);
2096 break;
2097 case TMCLOCK_VIRTUAL_SYNC:
2098 u64 = TMVirtualSyncGet(pVM);
2099 break;
2100 case TMCLOCK_REAL:
2101 u64 = TMRealGet(pVM);
2102 break;
2103 default:
2104 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2105 return UINT64_MAX;
2106 }
2107 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2108 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2109 return u64;
2110}
2111
2112
2113/**
2114 * Get the frequency of the timer clock.
2115 *
2116 * @returns Clock frequency (as Hz of course).
2117 * @param pVM The cross context VM structure.
2118 * @param hTimer Timer handle as returned by one of the create functions.
2119 */
2120VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2121{
2122 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2123 switch (pQueue->enmClock)
2124 {
2125 case TMCLOCK_VIRTUAL:
2126 case TMCLOCK_VIRTUAL_SYNC:
2127 return TMCLOCK_FREQ_VIRTUAL;
2128
2129 case TMCLOCK_REAL:
2130 return TMCLOCK_FREQ_REAL;
2131
2132 default:
2133 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2134 return 0;
2135 }
2136}
2137
2138
2139/**
2140 * Get the expire time of the timer.
2141 * Only valid for active timers.
2142 *
2143 * @returns Expire time of the timer.
2144 * @param pVM The cross context VM structure.
2145 * @param hTimer Timer handle as returned by one of the create functions.
2146 */
2147VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2148{
2149 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2150 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2151 int cRetries = 1000;
2152 do
2153 {
2154 TMTIMERSTATE enmState = pTimer->enmState;
2155 switch (enmState)
2156 {
2157 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2158 case TMTIMERSTATE_EXPIRED_DELIVER:
2159 case TMTIMERSTATE_STOPPED:
2160 case TMTIMERSTATE_PENDING_STOP:
2161 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2162 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2163 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2164 return UINT64_MAX;
2165
2166 case TMTIMERSTATE_ACTIVE:
2167 case TMTIMERSTATE_PENDING_RESCHEDULE:
2168 case TMTIMERSTATE_PENDING_SCHEDULE:
2169 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2170 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2171 return pTimer->u64Expire;
2172
2173 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2174 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2175#ifdef IN_RING3
2176 if (!RTThreadYield())
2177 RTThreadSleep(1);
2178#endif
2179 break;
2180
2181 /*
2182 * Invalid states.
2183 */
2184 case TMTIMERSTATE_DESTROY:
2185 case TMTIMERSTATE_FREE:
2186 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2187 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2188 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2189 return UINT64_MAX;
2190 default:
2191 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2192 return UINT64_MAX;
2193 }
2194 } while (cRetries-- > 0);
2195
2196 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2197 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2198 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2199 return UINT64_MAX;
2200}
2201
2202
2203/**
2204 * Checks if a timer is active or not.
2205 *
2206 * @returns True if active.
2207 * @returns False if not active.
2208 * @param pVM The cross context VM structure.
2209 * @param hTimer Timer handle as returned by one of the create functions.
2210 */
2211VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2212{
2213 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2214 TMTIMERSTATE enmState = pTimer->enmState;
2215 switch (enmState)
2216 {
2217 case TMTIMERSTATE_STOPPED:
2218 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2219 case TMTIMERSTATE_EXPIRED_DELIVER:
2220 case TMTIMERSTATE_PENDING_STOP:
2221 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2222 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2223 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2224 return false;
2225
2226 case TMTIMERSTATE_ACTIVE:
2227 case TMTIMERSTATE_PENDING_RESCHEDULE:
2228 case TMTIMERSTATE_PENDING_SCHEDULE:
2229 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2230 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2231 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2232 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2233 return true;
2234
2235 /*
2236 * Invalid states.
2237 */
2238 case TMTIMERSTATE_DESTROY:
2239 case TMTIMERSTATE_FREE:
2240 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2241 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2242 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2243 return false;
2244 default:
2245 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2246 return false;
2247 }
2248}
2249
2250
2251/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2252
2253
2254/**
2255 * Arm a timer with a (new) expire time relative to current time.
2256 *
2257 * @returns VBox status code.
2258 * @param pVM The cross context VM structure.
2259 * @param hTimer Timer handle as returned by one of the create functions.
2260 * @param cMilliesToNext Number of milliseconds to the next tick.
2261 */
2262VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2263{
2264 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2265 switch (pQueue->enmClock)
2266 {
2267 case TMCLOCK_VIRTUAL:
2268 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2269 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2270
2271 case TMCLOCK_VIRTUAL_SYNC:
2272 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2273 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2274
2275 case TMCLOCK_REAL:
2276 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2277 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2278
2279 default:
2280 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2281 return VERR_TM_TIMER_BAD_CLOCK;
2282 }
2283}
2284
2285
2286/**
2287 * Arm a timer with a (new) expire time relative to current time.
2288 *
2289 * @returns VBox status code.
2290 * @param pVM The cross context VM structure.
2291 * @param hTimer Timer handle as returned by one of the create functions.
2292 * @param cMicrosToNext Number of microseconds to the next tick.
2293 */
2294VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2295{
2296 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2297 switch (pQueue->enmClock)
2298 {
2299 case TMCLOCK_VIRTUAL:
2300 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2301 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2302
2303 case TMCLOCK_VIRTUAL_SYNC:
2304 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2305 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2306
2307 case TMCLOCK_REAL:
2308 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2309 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2310
2311 default:
2312 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2313 return VERR_TM_TIMER_BAD_CLOCK;
2314 }
2315}
2316
2317
2318/**
2319 * Arm a timer with a (new) expire time relative to current time.
2320 *
2321 * @returns VBox status code.
2322 * @param pVM The cross context VM structure.
2323 * @param hTimer Timer handle as returned by one of the create functions.
2324 * @param cNanosToNext Number of nanoseconds to the next tick.
2325 */
2326VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2327{
2328 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2329 switch (pQueue->enmClock)
2330 {
2331 case TMCLOCK_VIRTUAL:
2332 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2333 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2334
2335 case TMCLOCK_VIRTUAL_SYNC:
2336 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2337 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2338
2339 case TMCLOCK_REAL:
2340 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2341 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2342
2343 default:
2344 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2345 return VERR_TM_TIMER_BAD_CLOCK;
2346 }
2347}
2348
2349
2350/**
2351 * Get the current clock time as nanoseconds.
2352 *
2353 * @returns The timer clock as nanoseconds.
2354 * @param pVM The cross context VM structure.
2355 * @param hTimer Timer handle as returned by one of the create functions.
2356 */
2357VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2358{
2359 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2360}
2361
2362
2363/**
2364 * Get the current clock time as microseconds.
2365 *
2366 * @returns The timer clock as microseconds.
2367 * @param pVM The cross context VM structure.
2368 * @param hTimer Timer handle as returned by one of the create functions.
2369 */
2370VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2371{
2372 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2373}
2374
2375
2376/**
2377 * Get the current clock time as milliseconds.
2378 *
2379 * @returns The timer clock as milliseconds.
2380 * @param pVM The cross context VM structure.
2381 * @param hTimer Timer handle as returned by one of the create functions.
2382 */
2383VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2384{
2385 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2386}
2387
2388
2389/**
2390 * Converts the specified timer clock time to nanoseconds.
2391 *
2392 * @returns nanoseconds.
2393 * @param pVM The cross context VM structure.
2394 * @param hTimer Timer handle as returned by one of the create functions.
2395 * @param cTicks The clock ticks.
2396 * @remark There could be rounding errors here. We just do a simple integer divide
2397 * without any adjustments.
2398 */
2399VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2400{
2401 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2402 switch (pQueue->enmClock)
2403 {
2404 case TMCLOCK_VIRTUAL:
2405 case TMCLOCK_VIRTUAL_SYNC:
2406 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2407 return cTicks;
2408
2409 case TMCLOCK_REAL:
2410 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2411 return cTicks * 1000000;
2412
2413 default:
2414 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2415 return 0;
2416 }
2417}
2418
2419
2420/**
2421 * Converts the specified timer clock time to microseconds.
2422 *
2423 * @returns microseconds.
2424 * @param pVM The cross context VM structure.
2425 * @param hTimer Timer handle as returned by one of the create functions.
2426 * @param cTicks The clock ticks.
2427 * @remark There could be rounding errors here. We just do a simple integer divide
2428 * without any adjustments.
2429 */
2430VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2431{
2432 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2433 switch (pQueue->enmClock)
2434 {
2435 case TMCLOCK_VIRTUAL:
2436 case TMCLOCK_VIRTUAL_SYNC:
2437 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2438 return cTicks / 1000;
2439
2440 case TMCLOCK_REAL:
2441 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2442 return cTicks * 1000;
2443
2444 default:
2445 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2446 return 0;
2447 }
2448}
2449
2450
2451/**
2452 * Converts the specified timer clock time to milliseconds.
2453 *
2454 * @returns milliseconds.
2455 * @param pVM The cross context VM structure.
2456 * @param hTimer Timer handle as returned by one of the create functions.
2457 * @param cTicks The clock ticks.
2458 * @remark There could be rounding errors here. We just do a simple integer divide
2459 * without any adjustments.
2460 */
2461VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2462{
2463 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2464 switch (pQueue->enmClock)
2465 {
2466 case TMCLOCK_VIRTUAL:
2467 case TMCLOCK_VIRTUAL_SYNC:
2468 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2469 return cTicks / 1000000;
2470
2471 case TMCLOCK_REAL:
2472 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2473 return cTicks;
2474
2475 default:
2476 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2477 return 0;
2478 }
2479}
2480
2481
2482/**
2483 * Converts the specified nanosecond timestamp to timer clock ticks.
2484 *
2485 * @returns timer clock ticks.
2486 * @param pVM The cross context VM structure.
2487 * @param hTimer Timer handle as returned by one of the create functions.
2488 * @param cNanoSecs The nanosecond value ticks to convert.
2489 * @remark There could be rounding and overflow errors here.
2490 */
2491VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2492{
2493 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2494 switch (pQueue->enmClock)
2495 {
2496 case TMCLOCK_VIRTUAL:
2497 case TMCLOCK_VIRTUAL_SYNC:
2498 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2499 return cNanoSecs;
2500
2501 case TMCLOCK_REAL:
2502 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2503 return cNanoSecs / 1000000;
2504
2505 default:
2506 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2507 return 0;
2508 }
2509}
2510
2511
2512/**
2513 * Converts the specified microsecond timestamp to timer clock ticks.
2514 *
2515 * @returns timer clock ticks.
2516 * @param pVM The cross context VM structure.
2517 * @param hTimer Timer handle as returned by one of the create functions.
2518 * @param cMicroSecs The microsecond value ticks to convert.
2519 * @remark There could be rounding and overflow errors here.
2520 */
2521VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2522{
2523 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2524 switch (pQueue->enmClock)
2525 {
2526 case TMCLOCK_VIRTUAL:
2527 case TMCLOCK_VIRTUAL_SYNC:
2528 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2529 return cMicroSecs * 1000;
2530
2531 case TMCLOCK_REAL:
2532 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2533 return cMicroSecs / 1000;
2534
2535 default:
2536 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2537 return 0;
2538 }
2539}
2540
2541
2542/**
2543 * Converts the specified millisecond timestamp to timer clock ticks.
2544 *
2545 * @returns timer clock ticks.
2546 * @param pVM The cross context VM structure.
2547 * @param hTimer Timer handle as returned by one of the create functions.
2548 * @param cMilliSecs The millisecond value ticks to convert.
2549 * @remark There could be rounding and overflow errors here.
2550 */
2551VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2552{
2553 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2554 switch (pQueue->enmClock)
2555 {
2556 case TMCLOCK_VIRTUAL:
2557 case TMCLOCK_VIRTUAL_SYNC:
2558 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2559 return cMilliSecs * 1000000;
2560
2561 case TMCLOCK_REAL:
2562 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2563 return cMilliSecs;
2564
2565 default:
2566 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2567 return 0;
2568 }
2569}
2570
2571
2572/**
2573 * Convert state to string.
2574 *
2575 * @returns Readonly status name.
2576 * @param enmState State.
2577 */
2578const char *tmTimerState(TMTIMERSTATE enmState)
2579{
2580 switch (enmState)
2581 {
2582#define CASE(num, state) \
2583 case TMTIMERSTATE_##state: \
2584 AssertCompile(TMTIMERSTATE_##state == (num)); \
2585 return #num "-" #state
2586 CASE( 0,INVALID);
2587 CASE( 1,STOPPED);
2588 CASE( 2,ACTIVE);
2589 CASE( 3,EXPIRED_GET_UNLINK);
2590 CASE( 4,EXPIRED_DELIVER);
2591 CASE( 5,PENDING_STOP);
2592 CASE( 6,PENDING_STOP_SCHEDULE);
2593 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2594 CASE( 8,PENDING_SCHEDULE);
2595 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2596 CASE(10,PENDING_RESCHEDULE);
2597 CASE(11,DESTROY);
2598 CASE(12,FREE);
2599 default:
2600 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2601 return "Invalid state!";
2602#undef CASE
2603 }
2604}
2605
2606
2607#if defined(IN_RING0) || defined(IN_RING3)
2608/**
2609 * Copies over old timers and initialized newly allocted ones.
2610 *
2611 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2612 *
2613 * @param paTimers The new timer allocation.
2614 * @param paOldTimers The old timers.
2615 * @param cNewTimers Number of new timers.
2616 * @param cOldTimers Number of old timers.
2617 */
2618void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2619{
2620 Assert(cOldTimers < cNewTimers);
2621
2622 /*
2623 * Copy over the old info and initialize the new handles.
2624 */
2625 if (cOldTimers > 0)
2626 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2627
2628 size_t i = cNewTimers;
2629 while (i-- > cOldTimers)
2630 {
2631 paTimers[i].u64Expire = UINT64_MAX;
2632 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2633 paTimers[i].enmState = TMTIMERSTATE_FREE;
2634 paTimers[i].idxScheduleNext = UINT32_MAX;
2635 paTimers[i].idxNext = UINT32_MAX;
2636 paTimers[i].idxPrev = UINT32_MAX;
2637 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2638 }
2639
2640 /*
2641 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2642 */
2643 if (cOldTimers == 0)
2644 {
2645 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2646 paTimers[0].szName[0] = 'n';
2647 paTimers[0].szName[1] = 'i';
2648 paTimers[0].szName[2] = 'l';
2649 paTimers[0].szName[3] = '\0';
2650 }
2651}
2652#endif /* IN_RING0 || IN_RING3 */
2653
2654
2655/**
2656 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2657 *
2658 * @returns The highest frequency. 0 if no timers care.
2659 * @param pVM The cross context VM structure.
2660 * @param uOldMaxHzHint The old global hint.
2661 */
2662DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2663{
2664 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2665 but it should force other callers thru the slow path while we're recalculating and
2666 help us detect changes while we're recalculating. */
2667 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2668
2669 /*
2670 * The "right" highest frequency value isn't so important that we'll block
2671 * waiting on the timer semaphores.
2672 */
2673 uint32_t uMaxHzHint = 0;
2674 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2675 {
2676 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2677
2678 /* Get the max Hz hint for the queue. */
2679 uint32_t uMaxHzHintQueue;
2680 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2681 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2682 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2683 else
2684 {
2685 /* Is it still necessary to do updating? */
2686 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2687 {
2688 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2689
2690 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2691 uMaxHzHintQueue = 0;
2692 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2693 pCur;
2694 pCur = tmTimerGetNext(pQueueCC, pCur))
2695 {
2696 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2697 if (uHzHint > uMaxHzHintQueue)
2698 {
2699 TMTIMERSTATE enmState = pCur->enmState;
2700 switch (enmState)
2701 {
2702 case TMTIMERSTATE_ACTIVE:
2703 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2704 case TMTIMERSTATE_EXPIRED_DELIVER:
2705 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2706 case TMTIMERSTATE_PENDING_SCHEDULE:
2707 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2708 case TMTIMERSTATE_PENDING_RESCHEDULE:
2709 uMaxHzHintQueue = uHzHint;
2710 break;
2711
2712 case TMTIMERSTATE_STOPPED:
2713 case TMTIMERSTATE_PENDING_STOP:
2714 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2715 case TMTIMERSTATE_DESTROY:
2716 case TMTIMERSTATE_FREE:
2717 case TMTIMERSTATE_INVALID:
2718 break;
2719 /* no default, want gcc warnings when adding more states. */
2720 }
2721 }
2722 }
2723
2724 /* Write the new Hz hint for the quest and clear the other update flag. */
2725 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2726 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2727 }
2728 else
2729 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2730
2731 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2732 }
2733
2734 /* Update the global max Hz hint. */
2735 if (uMaxHzHint < uMaxHzHintQueue)
2736 uMaxHzHint = uMaxHzHintQueue;
2737 }
2738
2739 /*
2740 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2741 */
2742 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2743 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2744 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2745 else
2746 for (uint32_t iTry = 1;; iTry++)
2747 {
2748 if (RT_LO_U32(u64Actual) != 0)
2749 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2750 else if (iTry >= 4)
2751 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2752 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2753 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2754 else
2755 continue;
2756 break;
2757 }
2758 return uMaxHzHint;
2759}
2760
2761
2762/**
2763 * Gets the highest frequency hint for all the important timers.
2764 *
2765 * @returns The highest frequency. 0 if no timers care.
2766 * @param pVM The cross context VM structure.
2767 */
2768DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2769{
2770 /*
2771 * Query the value, recalculate it if necessary.
2772 */
2773 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2774 if (RT_HI_U32(u64Combined) == 0)
2775 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2776 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2777}
2778
2779
2780/**
2781 * Calculates a host timer frequency that would be suitable for the current
2782 * timer load.
2783 *
2784 * This will take the highest timer frequency, adjust for catch-up and warp
2785 * driver, and finally add a little fudge factor. The caller (VMM) will use
2786 * the result to adjust the per-cpu preemption timer.
2787 *
2788 * @returns The highest frequency. 0 if no important timers around.
2789 * @param pVM The cross context VM structure.
2790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2791 */
2792VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2793{
2794 uint32_t uHz = tmGetFrequencyHint(pVM);
2795
2796 /* Catch up, we have to be more aggressive than the % indicates at the
2797 beginning of the effort. */
2798 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2799 {
2800 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2801 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2802 {
2803 if (u32Pct <= 100)
2804 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2805 else if (u32Pct <= 200)
2806 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2807 else if (u32Pct <= 400)
2808 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2809 uHz *= u32Pct + 100;
2810 uHz /= 100;
2811 }
2812 }
2813
2814 /* Warp drive. */
2815 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2816 {
2817 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2818 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2819 {
2820 uHz *= u32Pct;
2821 uHz /= 100;
2822 }
2823 }
2824
2825 /* Fudge factor. */
2826 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2827 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2828 else
2829 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2830 uHz /= 100;
2831
2832 /* Make sure it isn't too high. */
2833 if (uHz > pVM->tm.s.cHostHzMax)
2834 uHz = pVM->tm.s.cHostHzMax;
2835
2836 return uHz;
2837}
2838
2839
2840/**
2841 * Whether the guest virtual clock is ticking.
2842 *
2843 * @returns true if ticking, false otherwise.
2844 * @param pVM The cross context VM structure.
2845 */
2846VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2847{
2848 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2849}
2850
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette