VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 104956

Last change on this file since 104956 was 104788, checked in by vboxsync, 6 months ago

VMM/tmTimerQueuesSanityChecks: Corrected free count check. bugref:10688

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.4 KB
Line 
1/* $Id: TMAll.cpp 104788 2024-05-27 10:20:07Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#ifdef DEBUG_bird
34# define DBGFTRACE_DISABLED /* annoying */
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/dbgftrace.h>
39#ifdef IN_RING3
40#endif
41#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <VBox/sup.h>
49#include <iprt/time.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/asm-math.h>
53#include <iprt/string.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58#include "TMInline.h"
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64#ifdef VBOX_STRICT
65/** @def TMTIMER_GET_CRITSECT
66 * Helper for safely resolving the critical section for a timer belonging to a
67 * device instance.
68 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
69# ifdef IN_RING3
70# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
71# else
72# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
73# endif
74#endif
75
76/** @def TMTIMER_ASSERT_CRITSECT
77 * Checks that the caller owns the critical section if one is associated with
78 * the timer. */
79#ifdef VBOX_STRICT
80# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
81 do { \
82 if ((a_pTimer)->pCritSect) \
83 { \
84 VMSTATE enmState; \
85 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
86 AssertMsg( pCritSect \
87 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
88 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
89 || enmState == VMSTATE_RESETTING \
90 || enmState == VMSTATE_RESETTING_LS ),\
91 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
92 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
93 } \
94 } while (0)
95#else
96# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
97#endif
98
99/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
100 * Checks for lock order trouble between the timer critsect and the critical
101 * section critsect. The virtual sync critsect must always be entered before
102 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
103 * isn't any critical section associated with the timer or if the calling thread
104 * doesn't own it, ASSUMING of course that the thread using this macro is going
105 * to enter the virtual sync critical section anyway.
106 *
107 * @remarks This is a sligtly relaxed timer locking attitude compared to
108 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
109 * should know what it's doing if it's stopping or starting a timer
110 * without taking the device lock.
111 */
112#ifdef VBOX_STRICT
113# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
114 do { \
115 if ((pTimer)->pCritSect) \
116 { \
117 VMSTATE enmState; \
118 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
119 AssertMsg( pCritSect \
120 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
121 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
122 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
123 || enmState == VMSTATE_RESETTING \
124 || enmState == VMSTATE_RESETTING_LS ),\
125 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
126 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
127 } \
128 } while (0)
129#else
130# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
131#endif
132
133
134#if defined(VBOX_STRICT) && defined(IN_RING0)
135/**
136 * Helper for TMTIMER_GET_CRITSECT
137 * @todo This needs a redo!
138 */
139DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
140{
141 if (pTimer->enmType == TMTIMERTYPE_DEV)
142 {
143 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
144 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
145 ASMSetFlags(fSavedFlags);
146 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
147 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
148 return pDevInsR0->pCritSectRoR0;
149 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
150 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
151 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
152 }
153 RT_NOREF(pVM);
154 Assert(pTimer->pCritSect == NULL);
155 return NULL;
156}
157#endif /* VBOX_STRICT && IN_RING0 */
158
159
160/**
161 * Notification that execution is about to start.
162 *
163 * This call must always be paired with a TMNotifyEndOfExecution call.
164 *
165 * The function may, depending on the configuration, resume the TSC and future
166 * clocks that only ticks when we're executing guest code.
167 *
168 * @param pVM The cross context VM structure.
169 * @param pVCpu The cross context virtual CPU structure.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
172{
173#ifndef VBOX_WITHOUT_NS_ACCOUNTING
174 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
175 pVCpu->tm.s.fExecuting = true;
176#endif
177 if (pVM->tm.s.fTSCTiedToExecution)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that execution has ended.
184 *
185 * This call must always be paired with a TMNotifyStartOfExecution call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're executing guest code.
189 *
190 * @param pVM The cross context VM structure.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param uTsc TSC value when exiting guest context.
193 */
194VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
195{
196 if (pVM->tm.s.fTSCTiedToExecution)
197 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
198
199#ifndef VBOX_WITHOUT_NS_ACCOUNTING
200 /*
201 * Calculate the elapsed tick count and convert it to nanoseconds.
202 */
203# ifdef IN_RING3
204 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
205 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
206 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
207# else
208 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
209 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
210# endif
211 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
212
213 uint64_t cNsExecutingDelta;
214 if (uCpuHz < _4G)
215 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
216 else if (uCpuHz < 16*_1G64)
217 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
218 else
219 {
220 Assert(uCpuHz < 64 * _1G64);
221 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
222 }
223
224 /*
225 * Update the data.
226 *
227 * Note! We're not using strict memory ordering here to speed things us.
228 * The data is in a single cache line and this thread is the only
229 * one writing to that line, so I cannot quite imagine why we would
230 * need any strict ordering here.
231 */
232 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
233 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
234 ASMCompilerBarrier();
235 pVCpu->tm.s.fExecuting = false;
236 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
237 pVCpu->tm.s.cPeriodsExecuting++;
238 ASMCompilerBarrier();
239 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
240
241 /*
242 * Update stats.
243 */
244# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
245 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
246 if (cNsExecutingDelta < 5000)
247 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
248 else if (cNsExecutingDelta < 50000)
249 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
250 else
251 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
252# endif
253
254 /* The timer triggers occational updating of the others and total stats: */
255 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
256 { /*likely*/ }
257 else
258 {
259 pVCpu->tm.s.fUpdateStats = false;
260
261 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
262 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
263
264# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
265 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
266 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
267 if (cNsOtherNewDelta > 0)
268 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
269# endif
270
271 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
272 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
273 }
274
275#endif
276}
277
278
279/**
280 * Notification that the cpu is entering the halt state
281 *
282 * This call must always be paired with a TMNotifyEndOfExecution call.
283 *
284 * The function may, depending on the configuration, resume the TSC and future
285 * clocks that only ticks when we're halted.
286 *
287 * @param pVCpu The cross context virtual CPU structure.
288 */
289VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
290{
291 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
292
293#ifndef VBOX_WITHOUT_NS_ACCOUNTING
294 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
295 pVCpu->tm.s.fHalting = true;
296#endif
297
298 if ( pVM->tm.s.fTSCTiedToExecution
299 && !pVM->tm.s.fTSCNotTiedToHalt)
300 tmCpuTickResume(pVM, pVCpu);
301}
302
303
304/**
305 * Notification that the cpu is leaving the halt state
306 *
307 * This call must always be paired with a TMNotifyStartOfHalt call.
308 *
309 * The function may, depending on the configuration, suspend the TSC and future
310 * clocks that only ticks when we're halted.
311 *
312 * @param pVCpu The cross context virtual CPU structure.
313 */
314VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
315{
316 PVM pVM = pVCpu->CTX_SUFF(pVM);
317
318 if ( pVM->tm.s.fTSCTiedToExecution
319 && !pVM->tm.s.fTSCNotTiedToHalt)
320 tmCpuTickPause(pVCpu);
321
322#ifndef VBOX_WITHOUT_NS_ACCOUNTING
323 uint64_t const u64NsTs = RTTimeNanoTS();
324 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
325 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
326 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
327 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
328
329 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
330 ASMCompilerBarrier();
331 pVCpu->tm.s.fHalting = false;
332 pVCpu->tm.s.fUpdateStats = false;
333 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
334 pVCpu->tm.s.cPeriodsHalted++;
335 ASMCompilerBarrier();
336 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
337
338# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
339 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
340 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
341 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
342 if (cNsOtherNewDelta > 0)
343 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
344# endif
345 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
346 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
347#endif
348}
349
350
351/**
352 * Raise the timer force action flag and notify the dedicated timer EMT.
353 *
354 * @param pVM The cross context VM structure.
355 */
356DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
357{
358 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
359 AssertReturnVoid(idCpu < pVM->cCpus);
360 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
361
362 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
363 {
364 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
365 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
366#ifdef IN_RING3
367 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
368#endif
369 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
370 }
371}
372
373
374/**
375 * Schedule the queue which was changed.
376 */
377DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
378{
379 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
380 if (RT_SUCCESS_NP(rc))
381 {
382 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
383 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
384 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
385#ifdef VBOX_STRICT
386 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
387#endif
388 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
389 PDMCritSectLeave(pVM, &pQueue->TimerLock);
390 return;
391 }
392
393 TMTIMERSTATE enmState = pTimer->enmState;
394 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
395 tmScheduleNotify(pVM);
396}
397
398
399/**
400 * Try change the state to enmStateNew from enmStateOld
401 * and link the timer into the scheduling queue.
402 *
403 * @returns Success indicator.
404 * @param pTimer Timer in question.
405 * @param enmStateNew The new timer state.
406 * @param enmStateOld The old timer state.
407 */
408DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
409{
410 /*
411 * Attempt state change.
412 */
413 bool fRc;
414 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
415 return fRc;
416}
417
418
419/**
420 * Links the timer onto the scheduling queue.
421 *
422 * @param pQueueCC The current context queue (same as @a pQueue for
423 * ring-3).
424 * @param pQueue The shared queue data.
425 * @param pTimer The timer.
426 *
427 * @todo FIXME: Look into potential race with the thread running the queues
428 * and stuff.
429 */
430DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
431{
432 Assert(pTimer->idxScheduleNext == UINT32_MAX);
433 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
434 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
435
436 uint32_t idxHead;
437 do
438 {
439 idxHead = pQueue->idxSchedule;
440 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
441 pTimer->idxScheduleNext = idxHead;
442 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
443}
444
445
446/**
447 * Try change the state to enmStateNew from enmStateOld
448 * and link the timer into the scheduling queue.
449 *
450 * @returns Success indicator.
451 * @param pQueueCC The current context queue (same as @a pQueue for
452 * ring-3).
453 * @param pQueue The shared queue data.
454 * @param pTimer Timer in question.
455 * @param enmStateNew The new timer state.
456 * @param enmStateOld The old timer state.
457 */
458DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
459 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
460{
461 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
462 {
463 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
464 return true;
465 }
466 return false;
467}
468
469
470/**
471 * Links a timer into the active list of a timer queue.
472 *
473 * @param pVM The cross context VM structure.
474 * @param pQueueCC The current context queue (same as @a pQueue for
475 * ring-3).
476 * @param pQueue The shared queue data.
477 * @param pTimer The timer.
478 * @param u64Expire The timer expiration time.
479 *
480 * @remarks Called while owning the relevant queue lock.
481 */
482DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
483 PTMTIMER pTimer, uint64_t u64Expire)
484{
485 Assert(pTimer->idxNext == UINT32_MAX);
486 Assert(pTimer->idxPrev == UINT32_MAX);
487 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
488 RT_NOREF(pVM);
489
490 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
491 if (pCur)
492 {
493 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
494 {
495 if (pCur->u64Expire > u64Expire)
496 {
497 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
498 tmTimerSetNext(pQueueCC, pTimer, pCur);
499 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
500 if (pPrev)
501 tmTimerSetNext(pQueueCC, pPrev, pTimer);
502 else
503 {
504 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
505 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
506 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
507 }
508 tmTimerSetPrev(pQueueCC, pCur, pTimer);
509 return;
510 }
511 if (pCur->idxNext == UINT32_MAX)
512 {
513 tmTimerSetNext(pQueueCC, pCur, pTimer);
514 tmTimerSetPrev(pQueueCC, pTimer, pCur);
515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
516 return;
517 }
518 }
519 }
520 else
521 {
522 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
523 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
524 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
525 }
526}
527
528
529
530/**
531 * Schedules the given timer on the given queue.
532 *
533 * @param pVM The cross context VM structure.
534 * @param pQueueCC The current context queue (same as @a pQueue for
535 * ring-3).
536 * @param pQueue The shared queue data.
537 * @param pTimer The timer that needs scheduling.
538 *
539 * @remarks Called while owning the lock.
540 */
541DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
542{
543 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
544 RT_NOREF(pVM);
545
546 /*
547 * Processing.
548 */
549 unsigned cRetries = 2;
550 do
551 {
552 TMTIMERSTATE enmState = pTimer->enmState;
553 switch (enmState)
554 {
555 /*
556 * Reschedule timer (in the active list).
557 */
558 case TMTIMERSTATE_PENDING_RESCHEDULE:
559 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
560 break; /* retry */
561 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
562 RT_FALL_THRU();
563
564 /*
565 * Schedule timer (insert into the active list).
566 */
567 case TMTIMERSTATE_PENDING_SCHEDULE:
568 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
569 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
570 break; /* retry */
571 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
572 return;
573
574 /*
575 * Stop the timer in active list.
576 */
577 case TMTIMERSTATE_PENDING_STOP:
578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
579 break; /* retry */
580 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
581 RT_FALL_THRU();
582
583 /*
584 * Stop the timer (not on the active list).
585 */
586 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
587 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
588 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
589 break;
590 return;
591
592 /*
593 * The timer is pending destruction by TMR3TimerDestroy, our caller.
594 * Nothing to do here.
595 */
596 case TMTIMERSTATE_DESTROY:
597 break;
598
599 /*
600 * Postpone these until they get into the right state.
601 */
602 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
603 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
604 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
605 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
606 return;
607
608 /*
609 * None of these can be in the schedule.
610 */
611 case TMTIMERSTATE_FREE:
612 case TMTIMERSTATE_STOPPED:
613 case TMTIMERSTATE_ACTIVE:
614 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
615 case TMTIMERSTATE_EXPIRED_DELIVER:
616 default:
617 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
618 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
619 return;
620 }
621 } while (cRetries-- > 0);
622}
623
624
625/**
626 * Schedules the specified timer queue.
627 *
628 * @param pVM The cross context VM structure.
629 * @param pQueueCC The current context queue (same as @a pQueue for
630 * ring-3) data of the queue to schedule.
631 * @param pQueue The shared queue data of the queue to schedule.
632 *
633 * @remarks Called while owning the lock.
634 */
635void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
636{
637 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
638
639 /*
640 * Dequeue the scheduling list and iterate it.
641 */
642 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
643 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
644 while (idxNext != UINT32_MAX)
645 {
646 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
647
648 /*
649 * Unlink the head timer and take down the index of the next one.
650 */
651 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
652 idxNext = pTimer->idxScheduleNext;
653 pTimer->idxScheduleNext = UINT32_MAX;
654
655 /*
656 * Do the scheduling.
657 */
658 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
659 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
660 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
661 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
662 }
663 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
664}
665
666
667#ifdef VBOX_STRICT
668/**
669 * Checks that the timer queues are sane.
670 *
671 * @param pVM The cross context VM structure.
672 * @param pszWhere Caller location clue.
673 */
674void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
675{
676 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
677 {
678 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
679 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
680 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
681
682 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
683 if (RT_SUCCESS(rc))
684 {
685 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
686 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
687 {
688 /* Check the linking of the active lists. */
689 PTMTIMER pPrev = NULL;
690 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
691 pCur;
692 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
693 {
694 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
695 TMTIMERSTATE enmState = pCur->enmState;
696 switch (enmState)
697 {
698 case TMTIMERSTATE_ACTIVE:
699 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
700 || pCur->enmState != TMTIMERSTATE_ACTIVE,
701 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
702 break;
703 case TMTIMERSTATE_PENDING_STOP:
704 case TMTIMERSTATE_PENDING_RESCHEDULE:
705 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
706 break;
707 default:
708 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
709 break;
710 }
711 }
712
713# ifdef IN_RING3
714 /* Go thru all the timers and check that the active ones all are in the active lists. */
715 int const rcAllocLock = PDMCritSectRwTryEnterShared(pVM, &pQueue->AllocLock);
716 uint32_t idxTimer = pQueue->cTimersAlloc;
717 uint32_t cFree = 0;
718 while (idxTimer-- > 0)
719 {
720 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
721 TMTIMERSTATE const enmState = pTimer->enmState;
722 switch (enmState)
723 {
724 case TMTIMERSTATE_FREE:
725 cFree++;
726 break;
727
728 case TMTIMERSTATE_ACTIVE:
729 case TMTIMERSTATE_PENDING_STOP:
730 case TMTIMERSTATE_PENDING_RESCHEDULE:
731 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
732 {
733 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
734 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
735 while (pCurAct && pCurAct != pTimer)
736 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
737 Assert(pCurAct == pTimer);
738 break;
739 }
740
741 case TMTIMERSTATE_PENDING_SCHEDULE:
742 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
743 case TMTIMERSTATE_STOPPED:
744 case TMTIMERSTATE_EXPIRED_DELIVER:
745 {
746 Assert(pTimer->idxNext == UINT32_MAX);
747 Assert(pTimer->idxPrev == UINT32_MAX);
748 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
749 pCurAct;
750 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
751 {
752 Assert(pCurAct != pTimer);
753 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
754 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
755 }
756 break;
757 }
758
759 /* ignore */
760 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
761 break;
762
763 case TMTIMERSTATE_INVALID:
764 Assert(idxTimer == 0);
765 break;
766
767 /* shouldn't get here! */
768 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
769 case TMTIMERSTATE_DESTROY:
770 default:
771 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
772 break;
773 }
774
775 /* Check the handle value. */
776 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
777 {
778 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
779 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
780 }
781 }
782 if (RT_SUCCESS(rcAllocLock))
783 {
784 Assert(cFree == pQueue->cTimersFree);
785 PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
786 }
787 else
788 Assert(cFree >= pQueue->cTimersFree); /* Can be lower as the tmr3TimerCreate may run concurrent. */
789
790# endif /* IN_RING3 */
791
792 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
793 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
794 }
795 PDMCritSectLeave(pVM, &pQueue->TimerLock);
796 }
797 }
798}
799#endif /* !VBOX_STRICT */
800
801#ifdef VBOX_HIGH_RES_TIMERS_HACK
802
803/**
804 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
805 * EMT is polling.
806 *
807 * @returns See tmTimerPollInternal.
808 * @param pVM The cross context VM structure.
809 * @param u64Now Current virtual clock timestamp.
810 * @param u64Delta The delta to the next even in ticks of the
811 * virtual clock.
812 * @param pu64Delta Where to return the delta.
813 */
814DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
815{
816 Assert(!(u64Delta & RT_BIT_64(63)));
817
818 if (!pVM->tm.s.fVirtualWarpDrive)
819 {
820 *pu64Delta = u64Delta;
821 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
822 }
823
824 /*
825 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
826 */
827 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
828 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
829
830 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
831 u64GipTime -= u64Start; /* the start is GIP time. */
832 if (u64GipTime >= u64Delta)
833 {
834 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
835 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
836 }
837 else
838 {
839 u64Delta -= u64GipTime;
840 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
841 u64Delta += u64GipTime;
842 }
843 *pu64Delta = u64Delta;
844 u64GipTime += u64Start;
845 return u64GipTime;
846}
847
848
849/**
850 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
851 * than the one dedicated to timer work.
852 *
853 * @returns See tmTimerPollInternal.
854 * @param pVM The cross context VM structure.
855 * @param u64Now Current virtual clock timestamp.
856 * @param pu64Delta Where to return the delta.
857 */
858DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
859{
860 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
861 *pu64Delta = s_u64OtherRet;
862 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
863}
864
865
866/**
867 * Worker for tmTimerPollInternal.
868 *
869 * @returns See tmTimerPollInternal.
870 * @param pVM The cross context VM structure.
871 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
872 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
873 * timer EMT.
874 * @param u64Now Current virtual clock timestamp.
875 * @param pu64Delta Where to return the delta.
876 * @param pCounter The statistics counter to update.
877 */
878DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
879 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
880{
881 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
882 if (pVCpuDst != pVCpu)
883 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
884 *pu64Delta = 0;
885 return 0;
886}
887
888
889/**
890 * Common worker for TMTimerPollGIP and TMTimerPoll.
891 *
892 * This function is called before FFs are checked in the inner execution EM loops.
893 *
894 * @returns The GIP timestamp of the next event.
895 * 0 if the next event has already expired.
896 *
897 * @param pVM The cross context VM structure.
898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
899 * @param pu64Delta Where to store the delta.
900 * @param pu64Now Where to store the current time. Optional.
901 *
902 * @thread The emulation thread.
903 *
904 * @remarks GIP uses ns ticks.
905 */
906DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta, uint64_t *pu64Now)
907{
908 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
909 AssertReturn(idCpu < pVM->cCpus, 0);
910 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
911
912 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
913 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
914 if (pu64Now)
915 *pu64Now = u64Now;
916
917 /*
918 * Return straight away if the timer FF is already set ...
919 */
920 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
921 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
922
923 /*
924 * ... or if timers are being run.
925 */
926 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
927 {
928 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
929 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
930 }
931
932 /*
933 * Check for TMCLOCK_VIRTUAL expiration.
934 */
935 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
936 const int64_t i64Delta1 = u64Expire1 - u64Now;
937 if (i64Delta1 <= 0)
938 {
939 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
940 {
941 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
942 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
943 }
944 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
945 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
946 }
947
948 /*
949 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
950 * This isn't quite as straight forward if in a catch-up, not only do
951 * we have to adjust the 'now' but when have to adjust the delta as well.
952 */
953
954 /*
955 * Optimistic lockless approach.
956 */
957 uint64_t u64VirtualSyncNow;
958 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
959 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
960 {
961 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
962 {
963 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
964 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
965 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
966 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
967 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
968 {
969 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
970 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
971 if (i64Delta2 > 0)
972 {
973 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
974 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
975
976 if (pVCpu == pVCpuDst)
977 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
978 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
979 }
980
981 if ( !pVM->tm.s.fRunningQueues
982 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
983 {
984 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
985 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
986 }
987
988 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
989 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
990 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
991 }
992 }
993 }
994 else
995 {
996 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
997 LogFlow(("TMTimerPoll: stopped\n"));
998 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
999 }
1000
1001 /*
1002 * Complicated lockless approach.
1003 */
1004 uint64_t off;
1005 uint32_t u32Pct = 0;
1006 bool fCatchUp;
1007 int cOuterTries = 42;
1008 for (;; cOuterTries--)
1009 {
1010 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
1011 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
1012 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1013 if (fCatchUp)
1014 {
1015 /* No changes allowed, try get a consistent set of parameters. */
1016 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
1017 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
1018 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1019 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
1020 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1021 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1022 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1023 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1024 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1025 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1026 || cOuterTries <= 0)
1027 {
1028 uint64_t u64Delta = u64Now - u64Prev;
1029 if (RT_LIKELY(!(u64Delta >> 32)))
1030 {
1031 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1032 if (off > u64Sub + offGivenUp)
1033 off -= u64Sub;
1034 else /* we've completely caught up. */
1035 off = offGivenUp;
1036 }
1037 else
1038 /* More than 4 seconds since last time (or negative), ignore it. */
1039 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1040
1041 /* Check that we're still running and in catch up. */
1042 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1043 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1044 break;
1045 }
1046 }
1047 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1048 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1049 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1050 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1051 break; /* Got an consistent offset */
1052
1053 /* Repeat the initial checks before iterating. */
1054 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1055 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1056 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1057 {
1058 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1059 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1060 }
1061 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1062 {
1063 LogFlow(("TMTimerPoll: stopped\n"));
1064 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1065 }
1066 if (cOuterTries <= 0)
1067 break; /* that's enough */
1068 }
1069 if (cOuterTries <= 0)
1070 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1071 u64VirtualSyncNow = u64Now - off;
1072
1073 /* Calc delta and see if we've got a virtual sync hit. */
1074 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1075 if (i64Delta2 <= 0)
1076 {
1077 if ( !pVM->tm.s.fRunningQueues
1078 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1079 {
1080 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1081 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1082 }
1083 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1084 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1085 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1086 }
1087
1088 /*
1089 * Return the time left to the next event.
1090 */
1091 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1092 if (pVCpu == pVCpuDst)
1093 {
1094 if (fCatchUp)
1095 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1096 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1097 }
1098 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1099}
1100
1101
1102/**
1103 * Set FF if we've passed the next virtual event.
1104 *
1105 * This function is called before FFs are checked in the inner execution EM loops.
1106 *
1107 * @returns true if timers are pending, false if not.
1108 *
1109 * @param pVM The cross context VM structure.
1110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1111 * @thread The emulation thread.
1112 */
1113VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1114{
1115 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1116 uint64_t off = 0;
1117 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1118 return off == 0;
1119}
1120
1121
1122/**
1123 * Set FF if we've passed the next virtual event and return virtual time as MS.
1124 *
1125 * This function is called before FFs are checked in the inner execution EM loops.
1126 *
1127 * This is used by the IEM recompiler for polling timers while also providing a
1128 * free time source for recent use tracking and such.
1129 *
1130 * @returns true if timers are pending, false if not.
1131 *
1132 * @param pVM The cross context VM structure.
1133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1134 * @param pmsNow Where to return the current virtual time in
1135 * milliseconds.
1136 * @thread The emulation thread.
1137 */
1138VMMDECL(bool) TMTimerPollBoolWith32BitMilliTS(PVMCC pVM, PVMCPUCC pVCpu, uint32_t *pmsNow)
1139{
1140 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1141 uint64_t off = 0;
1142 uint64_t u64Now = 0;
1143 tmTimerPollInternal(pVM, pVCpu, &off, &u64Now);
1144 *pmsNow = (uint32_t)(u64Now / RT_NS_1MS);
1145 return off == 0;
1146}
1147
1148
1149/**
1150 * Set FF if we've passed the next virtual event.
1151 *
1152 * This function is called before FFs are checked in the inner execution EM loops.
1153 *
1154 * @param pVM The cross context VM structure.
1155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1156 * @thread The emulation thread.
1157 */
1158VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1159{
1160 uint64_t off;
1161 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1162}
1163
1164
1165/**
1166 * Set FF if we've passed the next virtual event.
1167 *
1168 * This function is called before FFs are checked in the inner execution EM loops.
1169 *
1170 * @returns The GIP timestamp of the next event.
1171 * 0 if the next event has already expired.
1172 * @param pVM The cross context VM structure.
1173 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1174 * @param pu64Delta Where to store the delta.
1175 * @thread The emulation thread.
1176 */
1177VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1178{
1179 return tmTimerPollInternal(pVM, pVCpu, pu64Delta, NULL);
1180}
1181
1182#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1183
1184/**
1185 * Locks the timer clock.
1186 *
1187 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1188 * if the clock does not have a lock.
1189 * @param pVM The cross context VM structure.
1190 * @param hTimer Timer handle as returned by one of the create functions.
1191 * @param rcBusy What to return in ring-0 and raw-mode context if the
1192 * lock is busy. Pass VINF_SUCCESS to acquired the
1193 * critical section thru a ring-3 call if necessary.
1194 *
1195 * @remarks Currently only supported on timers using the virtual sync clock.
1196 */
1197VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1198{
1199 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1200 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1201 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1202}
1203
1204
1205/**
1206 * Unlocks a timer clock locked by TMTimerLock.
1207 *
1208 * @param pVM The cross context VM structure.
1209 * @param hTimer Timer handle as returned by one of the create functions.
1210 */
1211VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1212{
1213 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1214 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1215 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1216}
1217
1218
1219/**
1220 * Checks if the current thread owns the timer clock lock.
1221 *
1222 * @returns @c true if its the owner, @c false if not.
1223 * @param pVM The cross context VM structure.
1224 * @param hTimer Timer handle as returned by one of the create functions.
1225 */
1226VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1227{
1228 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1229 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1230 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1231}
1232
1233
1234/**
1235 * Optimized TMTimerSet code path for starting an inactive timer.
1236 *
1237 * @returns VBox status code.
1238 *
1239 * @param pVM The cross context VM structure.
1240 * @param pTimer The timer handle.
1241 * @param u64Expire The new expire time.
1242 * @param pQueue Pointer to the shared timer queue data.
1243 * @param idxQueue The queue index.
1244 */
1245static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1246{
1247 Assert(pTimer->idxPrev == UINT32_MAX);
1248 Assert(pTimer->idxNext == UINT32_MAX);
1249 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1250
1251 /*
1252 * Calculate and set the expiration time.
1253 */
1254 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1255 {
1256 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1257 AssertMsgStmt(u64Expire >= u64Last,
1258 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1259 u64Expire = u64Last);
1260 }
1261 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1262 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1263
1264 /*
1265 * Link the timer into the active list.
1266 */
1267 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1268
1269 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1270 return VINF_SUCCESS;
1271}
1272
1273
1274/**
1275 * TMTimerSet for the virtual sync timer queue.
1276 *
1277 * This employs a greatly simplified state machine by always acquiring the
1278 * queue lock and bypassing the scheduling list.
1279 *
1280 * @returns VBox status code
1281 * @param pVM The cross context VM structure.
1282 * @param pTimer The timer handle.
1283 * @param u64Expire The expiration time.
1284 */
1285static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1286{
1287 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1288 VM_ASSERT_EMT(pVM);
1289 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1290 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1291 AssertRCReturn(rc, rc);
1292
1293 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1294 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1295 TMTIMERSTATE const enmState = pTimer->enmState;
1296 switch (enmState)
1297 {
1298 case TMTIMERSTATE_EXPIRED_DELIVER:
1299 case TMTIMERSTATE_STOPPED:
1300 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1301 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1302 else
1303 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1304
1305 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1306 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1307 pTimer->u64Expire = u64Expire;
1308 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1309 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1310 rc = VINF_SUCCESS;
1311 break;
1312
1313 case TMTIMERSTATE_ACTIVE:
1314 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1315 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1316 pTimer->u64Expire = u64Expire;
1317 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1318 rc = VINF_SUCCESS;
1319 break;
1320
1321 case TMTIMERSTATE_PENDING_RESCHEDULE:
1322 case TMTIMERSTATE_PENDING_STOP:
1323 case TMTIMERSTATE_PENDING_SCHEDULE:
1324 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1325 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1326 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1327 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1328 case TMTIMERSTATE_DESTROY:
1329 case TMTIMERSTATE_FREE:
1330 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1331 rc = VERR_TM_INVALID_STATE;
1332 break;
1333
1334 default:
1335 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1336 rc = VERR_TM_UNKNOWN_STATE;
1337 break;
1338 }
1339
1340 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1341 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1342 return rc;
1343}
1344
1345
1346/**
1347 * Arm a timer with a (new) expire time.
1348 *
1349 * @returns VBox status code.
1350 * @param pVM The cross context VM structure.
1351 * @param hTimer Timer handle as returned by one of the create functions.
1352 * @param u64Expire New expire time.
1353 */
1354VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1355{
1356 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1357 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1358
1359 /* Treat virtual sync timers specially. */
1360 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1361 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1362
1363 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1364 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1365
1366 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1367
1368#ifdef VBOX_WITH_STATISTICS
1369 /*
1370 * Gather optimization info.
1371 */
1372 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1373 TMTIMERSTATE enmOrgState = pTimer->enmState;
1374 switch (enmOrgState)
1375 {
1376 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1377 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1378 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1379 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1380 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1381 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1382 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1383 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1384 }
1385#endif
1386
1387#if 1
1388 /*
1389 * The most common case is setting the timer again during the callback.
1390 * The second most common case is starting a timer at some other time.
1391 */
1392 TMTIMERSTATE enmState1 = pTimer->enmState;
1393 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1394 || ( enmState1 == TMTIMERSTATE_STOPPED
1395 && pTimer->pCritSect))
1396 {
1397 /* Try take the TM lock and check the state again. */
1398 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1399 if (RT_SUCCESS_NP(rc))
1400 {
1401 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1402 {
1403 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1404 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1405 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1406 return VINF_SUCCESS;
1407 }
1408 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1409 }
1410 }
1411#endif
1412
1413 /*
1414 * Unoptimized code path.
1415 */
1416 int cRetries = 1000;
1417 do
1418 {
1419 /*
1420 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1421 */
1422 TMTIMERSTATE enmState = pTimer->enmState;
1423 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1424 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1425 switch (enmState)
1426 {
1427 case TMTIMERSTATE_EXPIRED_DELIVER:
1428 case TMTIMERSTATE_STOPPED:
1429 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1430 {
1431 Assert(pTimer->idxPrev == UINT32_MAX);
1432 Assert(pTimer->idxNext == UINT32_MAX);
1433 pTimer->u64Expire = u64Expire;
1434 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1435 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1436 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1437 return VINF_SUCCESS;
1438 }
1439 break;
1440
1441 case TMTIMERSTATE_PENDING_SCHEDULE:
1442 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1443 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1444 {
1445 pTimer->u64Expire = u64Expire;
1446 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1447 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1448 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1449 return VINF_SUCCESS;
1450 }
1451 break;
1452
1453
1454 case TMTIMERSTATE_ACTIVE:
1455 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1456 {
1457 pTimer->u64Expire = u64Expire;
1458 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1459 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1460 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1461 return VINF_SUCCESS;
1462 }
1463 break;
1464
1465 case TMTIMERSTATE_PENDING_RESCHEDULE:
1466 case TMTIMERSTATE_PENDING_STOP:
1467 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1468 {
1469 pTimer->u64Expire = u64Expire;
1470 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1471 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1472 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1473 return VINF_SUCCESS;
1474 }
1475 break;
1476
1477
1478 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1479 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1480 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1481#ifdef IN_RING3
1482 if (!RTThreadYield())
1483 RTThreadSleep(1);
1484#else
1485/** @todo call host context and yield after a couple of iterations */
1486#endif
1487 break;
1488
1489 /*
1490 * Invalid states.
1491 */
1492 case TMTIMERSTATE_DESTROY:
1493 case TMTIMERSTATE_FREE:
1494 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1495 return VERR_TM_INVALID_STATE;
1496 default:
1497 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1498 return VERR_TM_UNKNOWN_STATE;
1499 }
1500 } while (cRetries-- > 0);
1501
1502 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1503 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1504 return VERR_TM_TIMER_UNSTABLE_STATE;
1505}
1506
1507
1508/**
1509 * Return the current time for the specified clock, setting pu64Now if not NULL.
1510 *
1511 * @returns Current time.
1512 * @param pVM The cross context VM structure.
1513 * @param enmClock The clock to query.
1514 * @param pu64Now Optional pointer where to store the return time
1515 */
1516DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1517{
1518 uint64_t u64Now;
1519 switch (enmClock)
1520 {
1521 case TMCLOCK_VIRTUAL_SYNC:
1522 u64Now = TMVirtualSyncGet(pVM);
1523 break;
1524 case TMCLOCK_VIRTUAL:
1525 u64Now = TMVirtualGet(pVM);
1526 break;
1527 case TMCLOCK_REAL:
1528 u64Now = TMRealGet(pVM);
1529 break;
1530 default:
1531 AssertFatalMsgFailed(("%d\n", enmClock));
1532 }
1533
1534 if (pu64Now)
1535 *pu64Now = u64Now;
1536 return u64Now;
1537}
1538
1539
1540/**
1541 * Optimized TMTimerSetRelative code path.
1542 *
1543 * @returns VBox status code.
1544 *
1545 * @param pVM The cross context VM structure.
1546 * @param pTimer The timer handle.
1547 * @param cTicksToNext Clock ticks until the next time expiration.
1548 * @param pu64Now Where to return the current time stamp used.
1549 * Optional.
1550 * @param pQueueCC The context specific queue data (same as @a pQueue
1551 * for ring-3).
1552 * @param pQueue The shared queue data.
1553 */
1554static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1555 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1556{
1557 Assert(pTimer->idxPrev == UINT32_MAX);
1558 Assert(pTimer->idxNext == UINT32_MAX);
1559 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1560
1561 /*
1562 * Calculate and set the expiration time.
1563 */
1564 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1565 pTimer->u64Expire = u64Expire;
1566 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1567
1568 /*
1569 * Link the timer into the active list.
1570 */
1571 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1572 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1573
1574 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1575 return VINF_SUCCESS;
1576}
1577
1578
1579/**
1580 * TMTimerSetRelative for the virtual sync timer queue.
1581 *
1582 * This employs a greatly simplified state machine by always acquiring the
1583 * queue lock and bypassing the scheduling list.
1584 *
1585 * @returns VBox status code
1586 * @param pVM The cross context VM structure.
1587 * @param pTimer The timer to (re-)arm.
1588 * @param cTicksToNext Clock ticks until the next time expiration.
1589 * @param pu64Now Where to return the current time stamp used.
1590 * Optional.
1591 */
1592static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1593{
1594 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1595 VM_ASSERT_EMT(pVM);
1596 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1597 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1598 AssertRCReturn(rc, rc);
1599
1600 /* Calculate the expiration tick. */
1601 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1602 if (pu64Now)
1603 *pu64Now = u64Expire;
1604 u64Expire += cTicksToNext;
1605
1606 /* Update the timer. */
1607 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1608 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1609 TMTIMERSTATE const enmState = pTimer->enmState;
1610 switch (enmState)
1611 {
1612 case TMTIMERSTATE_EXPIRED_DELIVER:
1613 case TMTIMERSTATE_STOPPED:
1614 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1615 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1616 else
1617 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1618 pTimer->u64Expire = u64Expire;
1619 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1620 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1621 rc = VINF_SUCCESS;
1622 break;
1623
1624 case TMTIMERSTATE_ACTIVE:
1625 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1626 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1627 pTimer->u64Expire = u64Expire;
1628 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1629 rc = VINF_SUCCESS;
1630 break;
1631
1632 case TMTIMERSTATE_PENDING_RESCHEDULE:
1633 case TMTIMERSTATE_PENDING_STOP:
1634 case TMTIMERSTATE_PENDING_SCHEDULE:
1635 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1636 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1637 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1638 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1639 case TMTIMERSTATE_DESTROY:
1640 case TMTIMERSTATE_FREE:
1641 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1642 rc = VERR_TM_INVALID_STATE;
1643 break;
1644
1645 default:
1646 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1647 rc = VERR_TM_UNKNOWN_STATE;
1648 break;
1649 }
1650
1651 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1652 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1653 return rc;
1654}
1655
1656
1657/**
1658 * Arm a timer with a expire time relative to the current time.
1659 *
1660 * @returns VBox status code.
1661 * @param pVM The cross context VM structure.
1662 * @param pTimer The timer to arm.
1663 * @param cTicksToNext Clock ticks until the next time expiration.
1664 * @param pu64Now Where to return the current time stamp used.
1665 * Optional.
1666 * @param pQueueCC The context specific queue data (same as @a pQueue
1667 * for ring-3).
1668 * @param pQueue The shared queue data.
1669 */
1670static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1671 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1672{
1673 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1674
1675 /* Treat virtual sync timers specially. */
1676 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1677 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1678
1679 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1680 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1681
1682 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1683
1684#ifdef VBOX_WITH_STATISTICS
1685 /*
1686 * Gather optimization info.
1687 */
1688 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1689 TMTIMERSTATE enmOrgState = pTimer->enmState;
1690 switch (enmOrgState)
1691 {
1692 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1693 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1694 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1695 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1696 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1697 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1698 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1699 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1700 }
1701#endif
1702
1703 /*
1704 * Try to take the TM lock and optimize the common cases.
1705 *
1706 * With the TM lock we can safely make optimizations like immediate
1707 * scheduling and we can also be 100% sure that we're not racing the
1708 * running of the timer queues. As an additional restraint we require the
1709 * timer to have a critical section associated with to be 100% there aren't
1710 * concurrent operations on the timer. (This latter isn't necessary any
1711 * longer as this isn't supported for any timers, critsect or not.)
1712 *
1713 * Note! Lock ordering doesn't apply when we only _try_ to
1714 * get the innermost locks.
1715 */
1716 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1717#if 1
1718 if ( fOwnTMLock
1719 && pTimer->pCritSect)
1720 {
1721 TMTIMERSTATE enmState = pTimer->enmState;
1722 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1723 || enmState == TMTIMERSTATE_STOPPED)
1724 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1725 {
1726 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1727 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1728 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1729 return VINF_SUCCESS;
1730 }
1731
1732 /* Optimize other states when it becomes necessary. */
1733 }
1734#endif
1735
1736 /*
1737 * Unoptimized path.
1738 */
1739 int rc;
1740 for (int cRetries = 1000; ; cRetries--)
1741 {
1742 /*
1743 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1744 */
1745 TMTIMERSTATE enmState = pTimer->enmState;
1746 switch (enmState)
1747 {
1748 case TMTIMERSTATE_STOPPED:
1749 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1750 {
1751 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1752 * Figure a safe way of activating this timer while the queue is
1753 * being run.
1754 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1755 * re-starting the timer in response to a initial_count write.) */
1756 }
1757 RT_FALL_THRU();
1758 case TMTIMERSTATE_EXPIRED_DELIVER:
1759 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1760 {
1761 Assert(pTimer->idxPrev == UINT32_MAX);
1762 Assert(pTimer->idxNext == UINT32_MAX);
1763 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1764 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1765 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1766 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1767 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1768 rc = VINF_SUCCESS;
1769 break;
1770 }
1771 rc = VERR_TRY_AGAIN;
1772 break;
1773
1774 case TMTIMERSTATE_PENDING_SCHEDULE:
1775 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1776 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1777 {
1778 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1779 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1780 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1781 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1782 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1783 rc = VINF_SUCCESS;
1784 break;
1785 }
1786 rc = VERR_TRY_AGAIN;
1787 break;
1788
1789
1790 case TMTIMERSTATE_ACTIVE:
1791 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1792 {
1793 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1794 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1795 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1796 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1797 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1798 rc = VINF_SUCCESS;
1799 break;
1800 }
1801 rc = VERR_TRY_AGAIN;
1802 break;
1803
1804 case TMTIMERSTATE_PENDING_RESCHEDULE:
1805 case TMTIMERSTATE_PENDING_STOP:
1806 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1807 {
1808 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1809 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1810 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1811 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1812 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1813 rc = VINF_SUCCESS;
1814 break;
1815 }
1816 rc = VERR_TRY_AGAIN;
1817 break;
1818
1819
1820 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1821 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1822 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1823#ifdef IN_RING3
1824 if (!RTThreadYield())
1825 RTThreadSleep(1);
1826#else
1827/** @todo call host context and yield after a couple of iterations */
1828#endif
1829 rc = VERR_TRY_AGAIN;
1830 break;
1831
1832 /*
1833 * Invalid states.
1834 */
1835 case TMTIMERSTATE_DESTROY:
1836 case TMTIMERSTATE_FREE:
1837 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1838 rc = VERR_TM_INVALID_STATE;
1839 break;
1840
1841 default:
1842 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1843 rc = VERR_TM_UNKNOWN_STATE;
1844 break;
1845 }
1846
1847 /* switch + loop is tedious to break out of. */
1848 if (rc == VINF_SUCCESS)
1849 break;
1850
1851 if (rc != VERR_TRY_AGAIN)
1852 {
1853 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1854 break;
1855 }
1856 if (cRetries <= 0)
1857 {
1858 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1859 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1860 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1861 break;
1862 }
1863
1864 /*
1865 * Retry to gain locks.
1866 */
1867 if (!fOwnTMLock)
1868 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1869
1870 } /* for (;;) */
1871
1872 /*
1873 * Clean up and return.
1874 */
1875 if (fOwnTMLock)
1876 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1877
1878 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1879 return rc;
1880}
1881
1882
1883/**
1884 * Arm a timer with a expire time relative to the current time.
1885 *
1886 * @returns VBox status code.
1887 * @param pVM The cross context VM structure.
1888 * @param hTimer Timer handle as returned by one of the create functions.
1889 * @param cTicksToNext Clock ticks until the next time expiration.
1890 * @param pu64Now Where to return the current time stamp used.
1891 * Optional.
1892 */
1893VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1894{
1895 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1896 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1897}
1898
1899
1900/**
1901 * Drops a hint about the frequency of the timer.
1902 *
1903 * This is used by TM and the VMM to calculate how often guest execution needs
1904 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1905 *
1906 * @returns VBox status code.
1907 * @param pVM The cross context VM structure.
1908 * @param hTimer Timer handle as returned by one of the create functions.
1909 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1910 *
1911 * @remarks We're using an integer hertz value here since anything above 1 HZ
1912 * is not going to be any trouble satisfying scheduling wise. The
1913 * range where it makes sense is >= 100 HZ.
1914 */
1915VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1916{
1917 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1918 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1919
1920 uint32_t const uHzOldHint = pTimer->uHzHint;
1921 pTimer->uHzHint = uHzHint;
1922
1923 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1924 if ( uHzHint > uMaxHzHint
1925 || uHzOldHint >= uMaxHzHint)
1926 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1927
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * TMTimerStop for the virtual sync timer queue.
1934 *
1935 * This employs a greatly simplified state machine by always acquiring the
1936 * queue lock and bypassing the scheduling list.
1937 *
1938 * @returns VBox status code
1939 * @param pVM The cross context VM structure.
1940 * @param pTimer The timer handle.
1941 */
1942static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1943{
1944 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1945 VM_ASSERT_EMT(pVM);
1946 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1947 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1948 AssertRCReturn(rc, rc);
1949
1950 /* Reset the HZ hint. */
1951 uint32_t uOldHzHint = pTimer->uHzHint;
1952 if (uOldHzHint)
1953 {
1954 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1955 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1956 pTimer->uHzHint = 0;
1957 }
1958
1959 /* Update the timer state. */
1960 TMTIMERSTATE const enmState = pTimer->enmState;
1961 switch (enmState)
1962 {
1963 case TMTIMERSTATE_ACTIVE:
1964 {
1965 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1966 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1967 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1968 rc = VINF_SUCCESS;
1969 break;
1970 }
1971
1972 case TMTIMERSTATE_EXPIRED_DELIVER:
1973 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1974 rc = VINF_SUCCESS;
1975 break;
1976
1977 case TMTIMERSTATE_STOPPED:
1978 rc = VINF_SUCCESS;
1979 break;
1980
1981 case TMTIMERSTATE_PENDING_RESCHEDULE:
1982 case TMTIMERSTATE_PENDING_STOP:
1983 case TMTIMERSTATE_PENDING_SCHEDULE:
1984 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1985 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1986 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1987 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1988 case TMTIMERSTATE_DESTROY:
1989 case TMTIMERSTATE_FREE:
1990 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1991 rc = VERR_TM_INVALID_STATE;
1992 break;
1993
1994 default:
1995 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1996 rc = VERR_TM_UNKNOWN_STATE;
1997 break;
1998 }
1999
2000 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
2001 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
2002 return rc;
2003}
2004
2005
2006/**
2007 * Stop the timer.
2008 * Use TMR3TimerArm() to "un-stop" the timer.
2009 *
2010 * @returns VBox status code.
2011 * @param pVM The cross context VM structure.
2012 * @param hTimer Timer handle as returned by one of the create functions.
2013 */
2014VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
2015{
2016 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2017 STAM_COUNTER_INC(&pTimer->StatStop);
2018
2019 /* Treat virtual sync timers specially. */
2020 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
2021 return tmTimerVirtualSyncStop(pVM, pTimer);
2022
2023 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2024 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2025
2026 /*
2027 * Reset the HZ hint.
2028 */
2029 uint32_t const uOldHzHint = pTimer->uHzHint;
2030 if (uOldHzHint)
2031 {
2032 if (uOldHzHint >= pQueue->uMaxHzHint)
2033 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
2034 pTimer->uHzHint = 0;
2035 }
2036
2037 /** @todo see if this function needs optimizing. */
2038 int cRetries = 1000;
2039 do
2040 {
2041 /*
2042 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2043 */
2044 TMTIMERSTATE enmState = pTimer->enmState;
2045 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2046 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2047 switch (enmState)
2048 {
2049 case TMTIMERSTATE_EXPIRED_DELIVER:
2050 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2051 return VERR_INVALID_PARAMETER;
2052
2053 case TMTIMERSTATE_STOPPED:
2054 case TMTIMERSTATE_PENDING_STOP:
2055 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2056 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2057 return VINF_SUCCESS;
2058
2059 case TMTIMERSTATE_PENDING_SCHEDULE:
2060 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2061 {
2062 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2063 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2064 return VINF_SUCCESS;
2065 }
2066 break;
2067
2068 case TMTIMERSTATE_PENDING_RESCHEDULE:
2069 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2070 {
2071 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2072 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2073 return VINF_SUCCESS;
2074 }
2075 break;
2076
2077 case TMTIMERSTATE_ACTIVE:
2078 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2079 {
2080 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2081 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2082 return VINF_SUCCESS;
2083 }
2084 break;
2085
2086 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2087 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2088 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2089#ifdef IN_RING3
2090 if (!RTThreadYield())
2091 RTThreadSleep(1);
2092#else
2093/** @todo call host and yield cpu after a while. */
2094#endif
2095 break;
2096
2097 /*
2098 * Invalid states.
2099 */
2100 case TMTIMERSTATE_DESTROY:
2101 case TMTIMERSTATE_FREE:
2102 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2103 return VERR_TM_INVALID_STATE;
2104 default:
2105 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2106 return VERR_TM_UNKNOWN_STATE;
2107 }
2108 } while (cRetries-- > 0);
2109
2110 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2111 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2112 return VERR_TM_TIMER_UNSTABLE_STATE;
2113}
2114
2115
2116/**
2117 * Get the current clock time.
2118 * Handy for calculating the new expire time.
2119 *
2120 * @returns Current clock time.
2121 * @param pVM The cross context VM structure.
2122 * @param hTimer Timer handle as returned by one of the create functions.
2123 */
2124VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2125{
2126 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2127 STAM_COUNTER_INC(&pTimer->StatGet);
2128
2129 uint64_t u64;
2130 switch (pQueue->enmClock)
2131 {
2132 case TMCLOCK_VIRTUAL:
2133 u64 = TMVirtualGet(pVM);
2134 break;
2135 case TMCLOCK_VIRTUAL_SYNC:
2136 u64 = TMVirtualSyncGet(pVM);
2137 break;
2138 case TMCLOCK_REAL:
2139 u64 = TMRealGet(pVM);
2140 break;
2141 default:
2142 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2143 return UINT64_MAX;
2144 }
2145 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2146 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2147 return u64;
2148}
2149
2150
2151/**
2152 * Get the frequency of the timer clock.
2153 *
2154 * @returns Clock frequency (as Hz of course).
2155 * @param pVM The cross context VM structure.
2156 * @param hTimer Timer handle as returned by one of the create functions.
2157 */
2158VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2159{
2160 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2161 switch (pQueue->enmClock)
2162 {
2163 case TMCLOCK_VIRTUAL:
2164 case TMCLOCK_VIRTUAL_SYNC:
2165 return TMCLOCK_FREQ_VIRTUAL;
2166
2167 case TMCLOCK_REAL:
2168 return TMCLOCK_FREQ_REAL;
2169
2170 default:
2171 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2172 return 0;
2173 }
2174}
2175
2176
2177/**
2178 * Get the expire time of the timer.
2179 * Only valid for active timers.
2180 *
2181 * @returns Expire time of the timer.
2182 * @param pVM The cross context VM structure.
2183 * @param hTimer Timer handle as returned by one of the create functions.
2184 */
2185VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2186{
2187 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2188 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2189 int cRetries = 1000;
2190 do
2191 {
2192 TMTIMERSTATE enmState = pTimer->enmState;
2193 switch (enmState)
2194 {
2195 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2196 case TMTIMERSTATE_EXPIRED_DELIVER:
2197 case TMTIMERSTATE_STOPPED:
2198 case TMTIMERSTATE_PENDING_STOP:
2199 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2200 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2201 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2202 return UINT64_MAX;
2203
2204 case TMTIMERSTATE_ACTIVE:
2205 case TMTIMERSTATE_PENDING_RESCHEDULE:
2206 case TMTIMERSTATE_PENDING_SCHEDULE:
2207 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2208 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2209 return pTimer->u64Expire;
2210
2211 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2212 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2213#ifdef IN_RING3
2214 if (!RTThreadYield())
2215 RTThreadSleep(1);
2216#endif
2217 break;
2218
2219 /*
2220 * Invalid states.
2221 */
2222 case TMTIMERSTATE_DESTROY:
2223 case TMTIMERSTATE_FREE:
2224 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2225 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2226 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2227 return UINT64_MAX;
2228 default:
2229 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2230 return UINT64_MAX;
2231 }
2232 } while (cRetries-- > 0);
2233
2234 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2235 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2236 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2237 return UINT64_MAX;
2238}
2239
2240
2241/**
2242 * Checks if a timer is active or not.
2243 *
2244 * @returns True if active.
2245 * @returns False if not active.
2246 * @param pVM The cross context VM structure.
2247 * @param hTimer Timer handle as returned by one of the create functions.
2248 */
2249VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2250{
2251 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2252 TMTIMERSTATE enmState = pTimer->enmState;
2253 switch (enmState)
2254 {
2255 case TMTIMERSTATE_STOPPED:
2256 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2257 case TMTIMERSTATE_EXPIRED_DELIVER:
2258 case TMTIMERSTATE_PENDING_STOP:
2259 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2260 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2261 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2262 return false;
2263
2264 case TMTIMERSTATE_ACTIVE:
2265 case TMTIMERSTATE_PENDING_RESCHEDULE:
2266 case TMTIMERSTATE_PENDING_SCHEDULE:
2267 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2268 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2269 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2270 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2271 return true;
2272
2273 /*
2274 * Invalid states.
2275 */
2276 case TMTIMERSTATE_DESTROY:
2277 case TMTIMERSTATE_FREE:
2278 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2279 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2280 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2281 return false;
2282 default:
2283 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2284 return false;
2285 }
2286}
2287
2288
2289/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2290
2291
2292/**
2293 * Arm a timer with a (new) expire time relative to current time.
2294 *
2295 * @returns VBox status code.
2296 * @param pVM The cross context VM structure.
2297 * @param hTimer Timer handle as returned by one of the create functions.
2298 * @param cMilliesToNext Number of milliseconds to the next tick.
2299 */
2300VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2301{
2302 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2303 switch (pQueue->enmClock)
2304 {
2305 case TMCLOCK_VIRTUAL:
2306 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2307 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2308
2309 case TMCLOCK_VIRTUAL_SYNC:
2310 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2311 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2312
2313 case TMCLOCK_REAL:
2314 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2315 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2316
2317 default:
2318 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2319 return VERR_TM_TIMER_BAD_CLOCK;
2320 }
2321}
2322
2323
2324/**
2325 * Arm a timer with a (new) expire time relative to current time.
2326 *
2327 * @returns VBox status code.
2328 * @param pVM The cross context VM structure.
2329 * @param hTimer Timer handle as returned by one of the create functions.
2330 * @param cMicrosToNext Number of microseconds to the next tick.
2331 */
2332VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2333{
2334 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2335 switch (pQueue->enmClock)
2336 {
2337 case TMCLOCK_VIRTUAL:
2338 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2339 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2340
2341 case TMCLOCK_VIRTUAL_SYNC:
2342 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2343 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2344
2345 case TMCLOCK_REAL:
2346 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2347 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2348
2349 default:
2350 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2351 return VERR_TM_TIMER_BAD_CLOCK;
2352 }
2353}
2354
2355
2356/**
2357 * Arm a timer with a (new) expire time relative to current time.
2358 *
2359 * @returns VBox status code.
2360 * @param pVM The cross context VM structure.
2361 * @param hTimer Timer handle as returned by one of the create functions.
2362 * @param cNanosToNext Number of nanoseconds to the next tick.
2363 */
2364VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2365{
2366 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2367 switch (pQueue->enmClock)
2368 {
2369 case TMCLOCK_VIRTUAL:
2370 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2371 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2372
2373 case TMCLOCK_VIRTUAL_SYNC:
2374 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2375 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2376
2377 case TMCLOCK_REAL:
2378 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2379 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2380
2381 default:
2382 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2383 return VERR_TM_TIMER_BAD_CLOCK;
2384 }
2385}
2386
2387
2388/**
2389 * Get the current clock time as nanoseconds.
2390 *
2391 * @returns The timer clock as nanoseconds.
2392 * @param pVM The cross context VM structure.
2393 * @param hTimer Timer handle as returned by one of the create functions.
2394 */
2395VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2396{
2397 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2398}
2399
2400
2401/**
2402 * Get the current clock time as microseconds.
2403 *
2404 * @returns The timer clock as microseconds.
2405 * @param pVM The cross context VM structure.
2406 * @param hTimer Timer handle as returned by one of the create functions.
2407 */
2408VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2409{
2410 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2411}
2412
2413
2414/**
2415 * Get the current clock time as milliseconds.
2416 *
2417 * @returns The timer clock as milliseconds.
2418 * @param pVM The cross context VM structure.
2419 * @param hTimer Timer handle as returned by one of the create functions.
2420 */
2421VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2422{
2423 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2424}
2425
2426
2427/**
2428 * Converts the specified timer clock time to nanoseconds.
2429 *
2430 * @returns nanoseconds.
2431 * @param pVM The cross context VM structure.
2432 * @param hTimer Timer handle as returned by one of the create functions.
2433 * @param cTicks The clock ticks.
2434 * @remark There could be rounding errors here. We just do a simple integer divide
2435 * without any adjustments.
2436 */
2437VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2438{
2439 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2440 switch (pQueue->enmClock)
2441 {
2442 case TMCLOCK_VIRTUAL:
2443 case TMCLOCK_VIRTUAL_SYNC:
2444 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2445 return cTicks;
2446
2447 case TMCLOCK_REAL:
2448 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2449 return cTicks * 1000000;
2450
2451 default:
2452 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2453 return 0;
2454 }
2455}
2456
2457
2458/**
2459 * Converts the specified timer clock time to microseconds.
2460 *
2461 * @returns microseconds.
2462 * @param pVM The cross context VM structure.
2463 * @param hTimer Timer handle as returned by one of the create functions.
2464 * @param cTicks The clock ticks.
2465 * @remark There could be rounding errors here. We just do a simple integer divide
2466 * without any adjustments.
2467 */
2468VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2469{
2470 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2471 switch (pQueue->enmClock)
2472 {
2473 case TMCLOCK_VIRTUAL:
2474 case TMCLOCK_VIRTUAL_SYNC:
2475 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2476 return cTicks / 1000;
2477
2478 case TMCLOCK_REAL:
2479 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2480 return cTicks * 1000;
2481
2482 default:
2483 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2484 return 0;
2485 }
2486}
2487
2488
2489/**
2490 * Converts the specified timer clock time to milliseconds.
2491 *
2492 * @returns milliseconds.
2493 * @param pVM The cross context VM structure.
2494 * @param hTimer Timer handle as returned by one of the create functions.
2495 * @param cTicks The clock ticks.
2496 * @remark There could be rounding errors here. We just do a simple integer divide
2497 * without any adjustments.
2498 */
2499VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2500{
2501 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2502 switch (pQueue->enmClock)
2503 {
2504 case TMCLOCK_VIRTUAL:
2505 case TMCLOCK_VIRTUAL_SYNC:
2506 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2507 return cTicks / 1000000;
2508
2509 case TMCLOCK_REAL:
2510 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2511 return cTicks;
2512
2513 default:
2514 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2515 return 0;
2516 }
2517}
2518
2519
2520/**
2521 * Converts the specified nanosecond timestamp to timer clock ticks.
2522 *
2523 * @returns timer clock ticks.
2524 * @param pVM The cross context VM structure.
2525 * @param hTimer Timer handle as returned by one of the create functions.
2526 * @param cNanoSecs The nanosecond value ticks to convert.
2527 * @remark There could be rounding and overflow errors here.
2528 */
2529VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2530{
2531 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2532 switch (pQueue->enmClock)
2533 {
2534 case TMCLOCK_VIRTUAL:
2535 case TMCLOCK_VIRTUAL_SYNC:
2536 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2537 return cNanoSecs;
2538
2539 case TMCLOCK_REAL:
2540 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2541 return cNanoSecs / 1000000;
2542
2543 default:
2544 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2545 return 0;
2546 }
2547}
2548
2549
2550/**
2551 * Converts the specified microsecond timestamp to timer clock ticks.
2552 *
2553 * @returns timer clock ticks.
2554 * @param pVM The cross context VM structure.
2555 * @param hTimer Timer handle as returned by one of the create functions.
2556 * @param cMicroSecs The microsecond value ticks to convert.
2557 * @remark There could be rounding and overflow errors here.
2558 */
2559VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2560{
2561 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2562 switch (pQueue->enmClock)
2563 {
2564 case TMCLOCK_VIRTUAL:
2565 case TMCLOCK_VIRTUAL_SYNC:
2566 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2567 return cMicroSecs * 1000;
2568
2569 case TMCLOCK_REAL:
2570 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2571 return cMicroSecs / 1000;
2572
2573 default:
2574 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2575 return 0;
2576 }
2577}
2578
2579
2580/**
2581 * Converts the specified millisecond timestamp to timer clock ticks.
2582 *
2583 * @returns timer clock ticks.
2584 * @param pVM The cross context VM structure.
2585 * @param hTimer Timer handle as returned by one of the create functions.
2586 * @param cMilliSecs The millisecond value ticks to convert.
2587 * @remark There could be rounding and overflow errors here.
2588 */
2589VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2590{
2591 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2592 switch (pQueue->enmClock)
2593 {
2594 case TMCLOCK_VIRTUAL:
2595 case TMCLOCK_VIRTUAL_SYNC:
2596 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2597 return cMilliSecs * 1000000;
2598
2599 case TMCLOCK_REAL:
2600 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2601 return cMilliSecs;
2602
2603 default:
2604 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2605 return 0;
2606 }
2607}
2608
2609
2610/**
2611 * Convert state to string.
2612 *
2613 * @returns Readonly status name.
2614 * @param enmState State.
2615 */
2616const char *tmTimerState(TMTIMERSTATE enmState)
2617{
2618 switch (enmState)
2619 {
2620#define CASE(num, state) \
2621 case TMTIMERSTATE_##state: \
2622 AssertCompile(TMTIMERSTATE_##state == (num)); \
2623 return #num "-" #state
2624 CASE( 0,INVALID);
2625 CASE( 1,STOPPED);
2626 CASE( 2,ACTIVE);
2627 CASE( 3,EXPIRED_GET_UNLINK);
2628 CASE( 4,EXPIRED_DELIVER);
2629 CASE( 5,PENDING_STOP);
2630 CASE( 6,PENDING_STOP_SCHEDULE);
2631 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2632 CASE( 8,PENDING_SCHEDULE);
2633 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2634 CASE(10,PENDING_RESCHEDULE);
2635 CASE(11,DESTROY);
2636 CASE(12,FREE);
2637 default:
2638 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2639 return "Invalid state!";
2640#undef CASE
2641 }
2642}
2643
2644
2645#if defined(IN_RING0) || defined(IN_RING3)
2646/**
2647 * Copies over old timers and initialized newly allocted ones.
2648 *
2649 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2650 *
2651 * @param paTimers The new timer allocation.
2652 * @param paOldTimers The old timers.
2653 * @param cNewTimers Number of new timers.
2654 * @param cOldTimers Number of old timers.
2655 */
2656void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2657{
2658 Assert(cOldTimers < cNewTimers);
2659
2660 /*
2661 * Copy over the old info and initialize the new handles.
2662 */
2663 if (cOldTimers > 0)
2664 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2665
2666 size_t i = cNewTimers;
2667 while (i-- > cOldTimers)
2668 {
2669 paTimers[i].u64Expire = UINT64_MAX;
2670 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2671 paTimers[i].enmState = TMTIMERSTATE_FREE;
2672 paTimers[i].idxScheduleNext = UINT32_MAX;
2673 paTimers[i].idxNext = UINT32_MAX;
2674 paTimers[i].idxPrev = UINT32_MAX;
2675 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2676 }
2677
2678 /*
2679 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2680 */
2681 if (cOldTimers == 0)
2682 {
2683 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2684 paTimers[0].szName[0] = 'n';
2685 paTimers[0].szName[1] = 'i';
2686 paTimers[0].szName[2] = 'l';
2687 paTimers[0].szName[3] = '\0';
2688 }
2689}
2690#endif /* IN_RING0 || IN_RING3 */
2691
2692
2693/**
2694 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2695 *
2696 * @returns The highest frequency. 0 if no timers care.
2697 * @param pVM The cross context VM structure.
2698 * @param uOldMaxHzHint The old global hint.
2699 */
2700DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2701{
2702 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2703 but it should force other callers thru the slow path while we're recalculating and
2704 help us detect changes while we're recalculating. */
2705 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2706
2707 /*
2708 * The "right" highest frequency value isn't so important that we'll block
2709 * waiting on the timer semaphores.
2710 */
2711 uint32_t uMaxHzHint = 0;
2712 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2713 {
2714 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2715
2716 /* Get the max Hz hint for the queue. */
2717 uint32_t uMaxHzHintQueue;
2718 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2719 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2720 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2721 else
2722 {
2723 /* Is it still necessary to do updating? */
2724 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2725 {
2726 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2727
2728 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2729 uMaxHzHintQueue = 0;
2730 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2731 pCur;
2732 pCur = tmTimerGetNext(pQueueCC, pCur))
2733 {
2734 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2735 if (uHzHint > uMaxHzHintQueue)
2736 {
2737 TMTIMERSTATE enmState = pCur->enmState;
2738 switch (enmState)
2739 {
2740 case TMTIMERSTATE_ACTIVE:
2741 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2742 case TMTIMERSTATE_EXPIRED_DELIVER:
2743 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2744 case TMTIMERSTATE_PENDING_SCHEDULE:
2745 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2746 case TMTIMERSTATE_PENDING_RESCHEDULE:
2747 uMaxHzHintQueue = uHzHint;
2748 break;
2749
2750 case TMTIMERSTATE_STOPPED:
2751 case TMTIMERSTATE_PENDING_STOP:
2752 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2753 case TMTIMERSTATE_DESTROY:
2754 case TMTIMERSTATE_FREE:
2755 case TMTIMERSTATE_INVALID:
2756 break;
2757 /* no default, want gcc warnings when adding more states. */
2758 }
2759 }
2760 }
2761
2762 /* Write the new Hz hint for the quest and clear the other update flag. */
2763 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2764 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2765 }
2766 else
2767 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2768
2769 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2770 }
2771
2772 /* Update the global max Hz hint. */
2773 if (uMaxHzHint < uMaxHzHintQueue)
2774 uMaxHzHint = uMaxHzHintQueue;
2775 }
2776
2777 /*
2778 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2779 */
2780 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2781 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2782 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2783 else
2784 for (uint32_t iTry = 1;; iTry++)
2785 {
2786 if (RT_LO_U32(u64Actual) != 0)
2787 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2788 else if (iTry >= 4)
2789 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2790 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2791 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2792 else
2793 continue;
2794 break;
2795 }
2796 return uMaxHzHint;
2797}
2798
2799
2800/**
2801 * Gets the highest frequency hint for all the important timers.
2802 *
2803 * @returns The highest frequency. 0 if no timers care.
2804 * @param pVM The cross context VM structure.
2805 */
2806DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2807{
2808 /*
2809 * Query the value, recalculate it if necessary.
2810 */
2811 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2812 if (RT_HI_U32(u64Combined) == 0)
2813 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2814 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2815}
2816
2817
2818/**
2819 * Calculates a host timer frequency that would be suitable for the current
2820 * timer load.
2821 *
2822 * This will take the highest timer frequency, adjust for catch-up and warp
2823 * driver, and finally add a little fudge factor. The caller (VMM) will use
2824 * the result to adjust the per-cpu preemption timer.
2825 *
2826 * @returns The highest frequency. 0 if no important timers around.
2827 * @param pVM The cross context VM structure.
2828 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2829 */
2830VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2831{
2832 uint32_t uHz = tmGetFrequencyHint(pVM);
2833
2834 /* Catch up, we have to be more aggressive than the % indicates at the
2835 beginning of the effort. */
2836 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2837 {
2838 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2839 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2840 {
2841 if (u32Pct <= 100)
2842 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2843 else if (u32Pct <= 200)
2844 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2845 else if (u32Pct <= 400)
2846 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2847 uHz *= u32Pct + 100;
2848 uHz /= 100;
2849 }
2850 }
2851
2852 /* Warp drive. */
2853 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2854 {
2855 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2856 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2857 {
2858 uHz *= u32Pct;
2859 uHz /= 100;
2860 }
2861 }
2862
2863 /* Fudge factor. */
2864 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2865 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2866 else
2867 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2868 uHz /= 100;
2869
2870 /* Make sure it isn't too high. */
2871 if (uHz > pVM->tm.s.cHostHzMax)
2872 uHz = pVM->tm.s.cHostHzMax;
2873
2874 return uHz;
2875}
2876
2877
2878/**
2879 * Whether the guest virtual clock is ticking.
2880 *
2881 * @returns true if ticking, false otherwise.
2882 * @param pVM The cross context VM structure.
2883 */
2884VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2885{
2886 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2887}
2888
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette