VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp

Last change on this file was 106368, checked in by vboxsync, 6 weeks ago

VMM/TMAll: Disable annoying assertion on ARMv8, bugref:1038

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.4 KB
Line 
1/* $Id: TMAll.cpp 106368 2024-10-16 13:16:40Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#ifdef DEBUG_bird
34# define DBGFTRACE_DISABLED /* annoying */
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/dbgftrace.h>
39#ifdef IN_RING3
40#endif
41#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <VBox/sup.h>
49#include <iprt/time.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/asm-math.h>
53#include <iprt/string.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58#include "TMInline.h"
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64#ifdef VBOX_STRICT
65/** @def TMTIMER_GET_CRITSECT
66 * Helper for safely resolving the critical section for a timer belonging to a
67 * device instance.
68 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
69# ifdef IN_RING3
70# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
71# else
72# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
73# endif
74#endif
75
76/** @def TMTIMER_ASSERT_CRITSECT
77 * Checks that the caller owns the critical section if one is associated with
78 * the timer. */
79#ifdef VBOX_STRICT
80# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
81 do { \
82 if ((a_pTimer)->pCritSect) \
83 { \
84 VMSTATE enmState; \
85 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
86 AssertMsg( pCritSect \
87 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
88 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
89 || enmState == VMSTATE_RESETTING \
90 || enmState == VMSTATE_RESETTING_LS ),\
91 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
92 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
93 } \
94 } while (0)
95#else
96# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
97#endif
98
99/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
100 * Checks for lock order trouble between the timer critsect and the critical
101 * section critsect. The virtual sync critsect must always be entered before
102 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
103 * isn't any critical section associated with the timer or if the calling thread
104 * doesn't own it, ASSUMING of course that the thread using this macro is going
105 * to enter the virtual sync critical section anyway.
106 *
107 * @remarks This is a sligtly relaxed timer locking attitude compared to
108 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
109 * should know what it's doing if it's stopping or starting a timer
110 * without taking the device lock.
111 */
112#ifdef VBOX_STRICT
113# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
114 do { \
115 if ((pTimer)->pCritSect) \
116 { \
117 VMSTATE enmState; \
118 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
119 AssertMsg( pCritSect \
120 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
121 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
122 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
123 || enmState == VMSTATE_RESETTING \
124 || enmState == VMSTATE_RESETTING_LS ),\
125 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
126 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
127 } \
128 } while (0)
129#else
130# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
131#endif
132
133
134#if defined(VBOX_STRICT) && defined(IN_RING0)
135/**
136 * Helper for TMTIMER_GET_CRITSECT
137 * @todo This needs a redo!
138 */
139DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
140{
141 if (pTimer->enmType == TMTIMERTYPE_DEV)
142 {
143 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
144 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
145 ASMSetFlags(fSavedFlags);
146 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
147 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
148 return pDevInsR0->pCritSectRoR0;
149 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
150 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
151 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
152 }
153 RT_NOREF(pVM);
154 Assert(pTimer->pCritSect == NULL);
155 return NULL;
156}
157#endif /* VBOX_STRICT && IN_RING0 */
158
159
160/**
161 * Notification that execution is about to start.
162 *
163 * This call must always be paired with a TMNotifyEndOfExecution call.
164 *
165 * The function may, depending on the configuration, resume the TSC and future
166 * clocks that only ticks when we're executing guest code.
167 *
168 * @param pVM The cross context VM structure.
169 * @param pVCpu The cross context virtual CPU structure.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
172{
173#ifndef VBOX_WITHOUT_NS_ACCOUNTING
174 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
175 pVCpu->tm.s.fExecuting = true;
176#endif
177 if (pVM->tm.s.fTSCTiedToExecution)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that execution has ended.
184 *
185 * This call must always be paired with a TMNotifyStartOfExecution call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're executing guest code.
189 *
190 * @param pVM The cross context VM structure.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param uTsc TSC value when exiting guest context.
193 */
194VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
195{
196 if (pVM->tm.s.fTSCTiedToExecution)
197 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
198
199#ifndef VBOX_WITHOUT_NS_ACCOUNTING
200 /*
201 * Calculate the elapsed tick count and convert it to nanoseconds.
202 */
203# ifdef IN_RING3
204 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
205 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
206 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
207# else
208 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
209 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
210# endif
211# ifndef VBOX_VMM_TARGET_ARMV8 /* This is perfectly valid on ARM if the guest is halting in the hypervisor. */
212 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
213# endif
214
215 uint64_t cNsExecutingDelta;
216 if (uCpuHz < _4G)
217 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
218 else if (uCpuHz < 16*_1G64)
219 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
220 else
221 {
222 Assert(uCpuHz < 64 * _1G64);
223 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
224 }
225
226 /*
227 * Update the data.
228 *
229 * Note! We're not using strict memory ordering here to speed things us.
230 * The data is in a single cache line and this thread is the only
231 * one writing to that line, so I cannot quite imagine why we would
232 * need any strict ordering here.
233 */
234 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
235 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
236 ASMCompilerBarrier();
237 pVCpu->tm.s.fExecuting = false;
238 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
239 pVCpu->tm.s.cPeriodsExecuting++;
240 ASMCompilerBarrier();
241 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
242
243 /*
244 * Update stats.
245 */
246# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
247 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
248 if (cNsExecutingDelta < 5000)
249 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
250 else if (cNsExecutingDelta < 50000)
251 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
252 else
253 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
254# endif
255
256 /* The timer triggers occational updating of the others and total stats: */
257 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
258 { /*likely*/ }
259 else
260 {
261 pVCpu->tm.s.fUpdateStats = false;
262
263 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
264 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
265
266# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
267 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
268 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
269 if (cNsOtherNewDelta > 0)
270 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
271# endif
272
273 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
274 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
275 }
276
277#endif
278}
279
280
281/**
282 * Notification that the cpu is entering the halt state
283 *
284 * This call must always be paired with a TMNotifyEndOfExecution call.
285 *
286 * The function may, depending on the configuration, resume the TSC and future
287 * clocks that only ticks when we're halted.
288 *
289 * @param pVCpu The cross context virtual CPU structure.
290 */
291VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
292{
293 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
294
295#ifndef VBOX_WITHOUT_NS_ACCOUNTING
296 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
297 pVCpu->tm.s.fHalting = true;
298#endif
299
300 if ( pVM->tm.s.fTSCTiedToExecution
301 && !pVM->tm.s.fTSCNotTiedToHalt)
302 tmCpuTickResume(pVM, pVCpu);
303}
304
305
306/**
307 * Notification that the cpu is leaving the halt state
308 *
309 * This call must always be paired with a TMNotifyStartOfHalt call.
310 *
311 * The function may, depending on the configuration, suspend the TSC and future
312 * clocks that only ticks when we're halted.
313 *
314 * @param pVCpu The cross context virtual CPU structure.
315 */
316VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
317{
318 PVM pVM = pVCpu->CTX_SUFF(pVM);
319
320 if ( pVM->tm.s.fTSCTiedToExecution
321 && !pVM->tm.s.fTSCNotTiedToHalt)
322 tmCpuTickPause(pVCpu);
323
324#ifndef VBOX_WITHOUT_NS_ACCOUNTING
325 uint64_t const u64NsTs = RTTimeNanoTS();
326 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
327 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
328 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
329 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
330
331 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
332 ASMCompilerBarrier();
333 pVCpu->tm.s.fHalting = false;
334 pVCpu->tm.s.fUpdateStats = false;
335 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
336 pVCpu->tm.s.cPeriodsHalted++;
337 ASMCompilerBarrier();
338 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
339
340# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
341 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
342 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
343 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
344 if (cNsOtherNewDelta > 0)
345 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
346# endif
347 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
348 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
349#endif
350}
351
352
353/**
354 * Raise the timer force action flag and notify the dedicated timer EMT.
355 *
356 * @param pVM The cross context VM structure.
357 */
358DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
359{
360 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
361 AssertReturnVoid(idCpu < pVM->cCpus);
362 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
363
364 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
365 {
366 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
367 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
368#ifdef IN_RING3
369 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
370#endif
371 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
372 }
373}
374
375
376/**
377 * Schedule the queue which was changed.
378 */
379DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
380{
381 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
382 if (RT_SUCCESS_NP(rc))
383 {
384 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
385 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
386 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
387#ifdef VBOX_STRICT
388 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
389#endif
390 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
391 PDMCritSectLeave(pVM, &pQueue->TimerLock);
392 return;
393 }
394
395 TMTIMERSTATE enmState = pTimer->enmState;
396 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
397 tmScheduleNotify(pVM);
398}
399
400
401/**
402 * Try change the state to enmStateNew from enmStateOld
403 * and link the timer into the scheduling queue.
404 *
405 * @returns Success indicator.
406 * @param pTimer Timer in question.
407 * @param enmStateNew The new timer state.
408 * @param enmStateOld The old timer state.
409 */
410DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
411{
412 /*
413 * Attempt state change.
414 */
415 bool fRc;
416 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
417 return fRc;
418}
419
420
421/**
422 * Links the timer onto the scheduling queue.
423 *
424 * @param pQueueCC The current context queue (same as @a pQueue for
425 * ring-3).
426 * @param pQueue The shared queue data.
427 * @param pTimer The timer.
428 *
429 * @todo FIXME: Look into potential race with the thread running the queues
430 * and stuff.
431 */
432DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
433{
434 Assert(pTimer->idxScheduleNext == UINT32_MAX);
435 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
436 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
437
438 uint32_t idxHead;
439 do
440 {
441 idxHead = pQueue->idxSchedule;
442 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
443 pTimer->idxScheduleNext = idxHead;
444 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
445}
446
447
448/**
449 * Try change the state to enmStateNew from enmStateOld
450 * and link the timer into the scheduling queue.
451 *
452 * @returns Success indicator.
453 * @param pQueueCC The current context queue (same as @a pQueue for
454 * ring-3).
455 * @param pQueue The shared queue data.
456 * @param pTimer Timer in question.
457 * @param enmStateNew The new timer state.
458 * @param enmStateOld The old timer state.
459 */
460DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
461 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
462{
463 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
464 {
465 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
466 return true;
467 }
468 return false;
469}
470
471
472/**
473 * Links a timer into the active list of a timer queue.
474 *
475 * @param pVM The cross context VM structure.
476 * @param pQueueCC The current context queue (same as @a pQueue for
477 * ring-3).
478 * @param pQueue The shared queue data.
479 * @param pTimer The timer.
480 * @param u64Expire The timer expiration time.
481 *
482 * @remarks Called while owning the relevant queue lock.
483 */
484DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
485 PTMTIMER pTimer, uint64_t u64Expire)
486{
487 Assert(pTimer->idxNext == UINT32_MAX);
488 Assert(pTimer->idxPrev == UINT32_MAX);
489 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
490 RT_NOREF(pVM);
491
492 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
493 if (pCur)
494 {
495 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
496 {
497 if (pCur->u64Expire > u64Expire)
498 {
499 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
500 tmTimerSetNext(pQueueCC, pTimer, pCur);
501 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
502 if (pPrev)
503 tmTimerSetNext(pQueueCC, pPrev, pTimer);
504 else
505 {
506 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
507 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
508 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
509 }
510 tmTimerSetPrev(pQueueCC, pCur, pTimer);
511 return;
512 }
513 if (pCur->idxNext == UINT32_MAX)
514 {
515 tmTimerSetNext(pQueueCC, pCur, pTimer);
516 tmTimerSetPrev(pQueueCC, pTimer, pCur);
517 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
518 return;
519 }
520 }
521 }
522 else
523 {
524 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
525 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
526 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
527 }
528}
529
530
531
532/**
533 * Schedules the given timer on the given queue.
534 *
535 * @param pVM The cross context VM structure.
536 * @param pQueueCC The current context queue (same as @a pQueue for
537 * ring-3).
538 * @param pQueue The shared queue data.
539 * @param pTimer The timer that needs scheduling.
540 *
541 * @remarks Called while owning the lock.
542 */
543DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
544{
545 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
546 RT_NOREF(pVM);
547
548 /*
549 * Processing.
550 */
551 unsigned cRetries = 2;
552 do
553 {
554 TMTIMERSTATE enmState = pTimer->enmState;
555 switch (enmState)
556 {
557 /*
558 * Reschedule timer (in the active list).
559 */
560 case TMTIMERSTATE_PENDING_RESCHEDULE:
561 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
562 break; /* retry */
563 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
564 RT_FALL_THRU();
565
566 /*
567 * Schedule timer (insert into the active list).
568 */
569 case TMTIMERSTATE_PENDING_SCHEDULE:
570 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
571 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
572 break; /* retry */
573 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
574 return;
575
576 /*
577 * Stop the timer in active list.
578 */
579 case TMTIMERSTATE_PENDING_STOP:
580 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
581 break; /* retry */
582 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
583 RT_FALL_THRU();
584
585 /*
586 * Stop the timer (not on the active list).
587 */
588 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
589 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
590 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
591 break;
592 return;
593
594 /*
595 * The timer is pending destruction by TMR3TimerDestroy, our caller.
596 * Nothing to do here.
597 */
598 case TMTIMERSTATE_DESTROY:
599 break;
600
601 /*
602 * Postpone these until they get into the right state.
603 */
604 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
605 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
606 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
607 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
608 return;
609
610 /*
611 * None of these can be in the schedule.
612 */
613 case TMTIMERSTATE_FREE:
614 case TMTIMERSTATE_STOPPED:
615 case TMTIMERSTATE_ACTIVE:
616 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
617 case TMTIMERSTATE_EXPIRED_DELIVER:
618 default:
619 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
620 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
621 return;
622 }
623 } while (cRetries-- > 0);
624}
625
626
627/**
628 * Schedules the specified timer queue.
629 *
630 * @param pVM The cross context VM structure.
631 * @param pQueueCC The current context queue (same as @a pQueue for
632 * ring-3) data of the queue to schedule.
633 * @param pQueue The shared queue data of the queue to schedule.
634 *
635 * @remarks Called while owning the lock.
636 */
637void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
638{
639 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
640
641 /*
642 * Dequeue the scheduling list and iterate it.
643 */
644 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
645 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
646 while (idxNext != UINT32_MAX)
647 {
648 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
649
650 /*
651 * Unlink the head timer and take down the index of the next one.
652 */
653 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
654 idxNext = pTimer->idxScheduleNext;
655 pTimer->idxScheduleNext = UINT32_MAX;
656
657 /*
658 * Do the scheduling.
659 */
660 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
661 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
662 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
663 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
664 }
665 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
666}
667
668
669#ifdef VBOX_STRICT
670/**
671 * Checks that the timer queues are sane.
672 *
673 * @param pVM The cross context VM structure.
674 * @param pszWhere Caller location clue.
675 */
676void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
677{
678 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
679 {
680 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
681 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
682 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
683
684 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
685 if (RT_SUCCESS(rc))
686 {
687 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
688 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
689 {
690 /* Check the linking of the active lists. */
691 PTMTIMER pPrev = NULL;
692 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
693 pCur;
694 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
695 {
696 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
697 TMTIMERSTATE enmState = pCur->enmState;
698 switch (enmState)
699 {
700 case TMTIMERSTATE_ACTIVE:
701 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
702 || pCur->enmState != TMTIMERSTATE_ACTIVE,
703 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
704 break;
705 case TMTIMERSTATE_PENDING_STOP:
706 case TMTIMERSTATE_PENDING_RESCHEDULE:
707 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
708 break;
709 default:
710 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
711 break;
712 }
713 }
714
715# ifdef IN_RING3
716 /* Go thru all the timers and check that the active ones all are in the active lists. */
717 int const rcAllocLock = PDMCritSectRwTryEnterShared(pVM, &pQueue->AllocLock);
718 uint32_t idxTimer = pQueue->cTimersAlloc;
719 uint32_t cFree = 0;
720 while (idxTimer-- > 0)
721 {
722 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
723 TMTIMERSTATE const enmState = pTimer->enmState;
724 switch (enmState)
725 {
726 case TMTIMERSTATE_FREE:
727 cFree++;
728 break;
729
730 case TMTIMERSTATE_ACTIVE:
731 case TMTIMERSTATE_PENDING_STOP:
732 case TMTIMERSTATE_PENDING_RESCHEDULE:
733 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
734 {
735 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
736 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
737 while (pCurAct && pCurAct != pTimer)
738 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
739 Assert(pCurAct == pTimer);
740 break;
741 }
742
743 case TMTIMERSTATE_PENDING_SCHEDULE:
744 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
745 case TMTIMERSTATE_STOPPED:
746 case TMTIMERSTATE_EXPIRED_DELIVER:
747 {
748 Assert(pTimer->idxNext == UINT32_MAX);
749 Assert(pTimer->idxPrev == UINT32_MAX);
750 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
751 pCurAct;
752 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
753 {
754 Assert(pCurAct != pTimer);
755 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
756 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
757 }
758 break;
759 }
760
761 /* ignore */
762 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
763 break;
764
765 case TMTIMERSTATE_INVALID:
766 Assert(idxTimer == 0);
767 break;
768
769 /* shouldn't get here! */
770 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
771 case TMTIMERSTATE_DESTROY:
772 default:
773 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
774 break;
775 }
776
777 /* Check the handle value. */
778 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
779 {
780 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
781 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
782 }
783 }
784 if (RT_SUCCESS(rcAllocLock))
785 {
786 Assert(cFree == pQueue->cTimersFree);
787 PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
788 }
789 else
790 Assert(cFree >= pQueue->cTimersFree); /* Can be lower as the tmr3TimerCreate may run concurrent. */
791
792# endif /* IN_RING3 */
793
794 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
795 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
796 }
797 PDMCritSectLeave(pVM, &pQueue->TimerLock);
798 }
799 }
800}
801#endif /* !VBOX_STRICT */
802
803#ifdef VBOX_HIGH_RES_TIMERS_HACK
804
805/**
806 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
807 * EMT is polling.
808 *
809 * @returns See tmTimerPollInternal.
810 * @param pVM The cross context VM structure.
811 * @param u64Now Current virtual clock timestamp.
812 * @param u64Delta The delta to the next even in ticks of the
813 * virtual clock.
814 * @param pu64Delta Where to return the delta.
815 */
816DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
817{
818 Assert(!(u64Delta & RT_BIT_64(63)));
819
820 if (!pVM->tm.s.fVirtualWarpDrive)
821 {
822 *pu64Delta = u64Delta;
823 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
824 }
825
826 /*
827 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
828 */
829 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
830 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
831
832 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
833 u64GipTime -= u64Start; /* the start is GIP time. */
834 if (u64GipTime >= u64Delta)
835 {
836 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
837 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
838 }
839 else
840 {
841 u64Delta -= u64GipTime;
842 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
843 u64Delta += u64GipTime;
844 }
845 *pu64Delta = u64Delta;
846 u64GipTime += u64Start;
847 return u64GipTime;
848}
849
850
851/**
852 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
853 * than the one dedicated to timer work.
854 *
855 * @returns See tmTimerPollInternal.
856 * @param pVM The cross context VM structure.
857 * @param u64Now Current virtual clock timestamp.
858 * @param pu64Delta Where to return the delta.
859 */
860DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
861{
862 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
863 *pu64Delta = s_u64OtherRet;
864 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
865}
866
867
868/**
869 * Worker for tmTimerPollInternal.
870 *
871 * @returns See tmTimerPollInternal.
872 * @param pVM The cross context VM structure.
873 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
874 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
875 * timer EMT.
876 * @param u64Now Current virtual clock timestamp.
877 * @param pu64Delta Where to return the delta.
878 * @param pCounter The statistics counter to update.
879 */
880DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
881 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
882{
883 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
884 if (pVCpuDst != pVCpu)
885 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
886 *pu64Delta = 0;
887 return 0;
888}
889
890
891/**
892 * Common worker for TMTimerPollGIP and TMTimerPoll.
893 *
894 * This function is called before FFs are checked in the inner execution EM loops.
895 *
896 * @returns The GIP timestamp of the next event.
897 * 0 if the next event has already expired.
898 *
899 * @param pVM The cross context VM structure.
900 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
901 * @param pu64Delta Where to store the delta.
902 * @param pu64Now Where to store the current time. Optional.
903 *
904 * @thread The emulation thread.
905 *
906 * @remarks GIP uses ns ticks.
907 */
908DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta, uint64_t *pu64Now)
909{
910 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
911 AssertReturn(idCpu < pVM->cCpus, 0);
912 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
913
914 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
915 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
916 if (pu64Now)
917 *pu64Now = u64Now;
918
919 /*
920 * Return straight away if the timer FF is already set ...
921 */
922 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
923 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
924
925 /*
926 * ... or if timers are being run.
927 */
928 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
929 {
930 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
931 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
932 }
933
934 /*
935 * Check for TMCLOCK_VIRTUAL expiration.
936 */
937 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
938 const int64_t i64Delta1 = u64Expire1 - u64Now;
939 if (i64Delta1 <= 0)
940 {
941 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
942 {
943 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
944 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
945 }
946 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
947 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
948 }
949
950 /*
951 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
952 * This isn't quite as straight forward if in a catch-up, not only do
953 * we have to adjust the 'now' but when have to adjust the delta as well.
954 */
955
956 /*
957 * Optimistic lockless approach.
958 */
959 uint64_t u64VirtualSyncNow;
960 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
961 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
962 {
963 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
964 {
965 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
966 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
967 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
968 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
969 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
970 {
971 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
972 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
973 if (i64Delta2 > 0)
974 {
975 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
976 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
977
978 if (pVCpu == pVCpuDst)
979 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
980 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
981 }
982
983 if ( !pVM->tm.s.fRunningQueues
984 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
985 {
986 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
987 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
988 }
989
990 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
991 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
992 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
993 }
994 }
995 }
996 else
997 {
998 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
999 LogFlow(("TMTimerPoll: stopped\n"));
1000 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1001 }
1002
1003 /*
1004 * Complicated lockless approach.
1005 */
1006 uint64_t off;
1007 uint32_t u32Pct = 0;
1008 bool fCatchUp;
1009 int cOuterTries = 42;
1010 for (;; cOuterTries--)
1011 {
1012 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
1013 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
1014 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1015 if (fCatchUp)
1016 {
1017 /* No changes allowed, try get a consistent set of parameters. */
1018 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
1019 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
1020 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1021 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
1022 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1023 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1024 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1025 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1026 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1027 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1028 || cOuterTries <= 0)
1029 {
1030 uint64_t u64Delta = u64Now - u64Prev;
1031 if (RT_LIKELY(!(u64Delta >> 32)))
1032 {
1033 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1034 if (off > u64Sub + offGivenUp)
1035 off -= u64Sub;
1036 else /* we've completely caught up. */
1037 off = offGivenUp;
1038 }
1039 else
1040 /* More than 4 seconds since last time (or negative), ignore it. */
1041 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1042
1043 /* Check that we're still running and in catch up. */
1044 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1045 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1046 break;
1047 }
1048 }
1049 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1050 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1051 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1052 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1053 break; /* Got an consistent offset */
1054
1055 /* Repeat the initial checks before iterating. */
1056 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1057 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1058 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1059 {
1060 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1061 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1062 }
1063 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1064 {
1065 LogFlow(("TMTimerPoll: stopped\n"));
1066 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1067 }
1068 if (cOuterTries <= 0)
1069 break; /* that's enough */
1070 }
1071 if (cOuterTries <= 0)
1072 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1073 u64VirtualSyncNow = u64Now - off;
1074
1075 /* Calc delta and see if we've got a virtual sync hit. */
1076 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1077 if (i64Delta2 <= 0)
1078 {
1079 if ( !pVM->tm.s.fRunningQueues
1080 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1081 {
1082 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1083 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1084 }
1085 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1086 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1087 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1088 }
1089
1090 /*
1091 * Return the time left to the next event.
1092 */
1093 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1094 if (pVCpu == pVCpuDst)
1095 {
1096 if (fCatchUp)
1097 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1098 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1099 }
1100 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1101}
1102
1103
1104/**
1105 * Set FF if we've passed the next virtual event.
1106 *
1107 * This function is called before FFs are checked in the inner execution EM loops.
1108 *
1109 * @returns true if timers are pending, false if not.
1110 *
1111 * @param pVM The cross context VM structure.
1112 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1113 * @thread The emulation thread.
1114 */
1115VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1116{
1117 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1118 uint64_t off = 0;
1119 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1120 return off == 0;
1121}
1122
1123
1124/**
1125 * Set FF if we've passed the next virtual event and return virtual time as MS.
1126 *
1127 * This function is called before FFs are checked in the inner execution EM loops.
1128 *
1129 * This is used by the IEM recompiler for polling timers while also providing a
1130 * free time source for recent use tracking and such.
1131 *
1132 * @returns Nanoseconds till the next event, 0 if event already pending.
1133 *
1134 * @param pVM The cross context VM structure.
1135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1136 * @param pnsNow Where to return the current virtual time in nanoseconds.
1137 * @thread The emulation thread.
1138 */
1139VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)
1140{
1141 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1142 uint64_t offDelta = 0;
1143 tmTimerPollInternal(pVM, pVCpu, &offDelta, pnsNow);
1144 return offDelta;
1145}
1146
1147
1148/**
1149 * Set FF if we've passed the next virtual event.
1150 *
1151 * This function is called before FFs are checked in the inner execution EM loops.
1152 *
1153 * @param pVM The cross context VM structure.
1154 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1155 * @thread The emulation thread.
1156 */
1157VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1158{
1159 uint64_t off;
1160 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1161}
1162
1163
1164/**
1165 * Set FF if we've passed the next virtual event.
1166 *
1167 * This function is called before FFs are checked in the inner execution EM loops.
1168 *
1169 * @returns The GIP timestamp of the next event.
1170 * 0 if the next event has already expired.
1171 * @param pVM The cross context VM structure.
1172 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1173 * @param pu64Delta Where to store the delta.
1174 * @thread The emulation thread.
1175 */
1176VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1177{
1178 return tmTimerPollInternal(pVM, pVCpu, pu64Delta, NULL);
1179}
1180
1181#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1182
1183/**
1184 * Locks the timer clock.
1185 *
1186 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1187 * if the clock does not have a lock.
1188 * @param pVM The cross context VM structure.
1189 * @param hTimer Timer handle as returned by one of the create functions.
1190 * @param rcBusy What to return in ring-0 and raw-mode context if the
1191 * lock is busy. Pass VINF_SUCCESS to acquired the
1192 * critical section thru a ring-3 call if necessary.
1193 *
1194 * @remarks Currently only supported on timers using the virtual sync clock.
1195 */
1196VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1197{
1198 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1199 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1200 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1201}
1202
1203
1204/**
1205 * Unlocks a timer clock locked by TMTimerLock.
1206 *
1207 * @param pVM The cross context VM structure.
1208 * @param hTimer Timer handle as returned by one of the create functions.
1209 */
1210VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1211{
1212 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1213 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1214 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1215}
1216
1217
1218/**
1219 * Checks if the current thread owns the timer clock lock.
1220 *
1221 * @returns @c true if its the owner, @c false if not.
1222 * @param pVM The cross context VM structure.
1223 * @param hTimer Timer handle as returned by one of the create functions.
1224 */
1225VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1226{
1227 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1228 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1229 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1230}
1231
1232
1233/**
1234 * Optimized TMTimerSet code path for starting an inactive timer.
1235 *
1236 * @returns VBox status code.
1237 *
1238 * @param pVM The cross context VM structure.
1239 * @param pTimer The timer handle.
1240 * @param u64Expire The new expire time.
1241 * @param pQueue Pointer to the shared timer queue data.
1242 * @param idxQueue The queue index.
1243 */
1244static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1245{
1246 Assert(pTimer->idxPrev == UINT32_MAX);
1247 Assert(pTimer->idxNext == UINT32_MAX);
1248 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1249
1250 /*
1251 * Calculate and set the expiration time.
1252 */
1253 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1254 {
1255 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1256 AssertMsgStmt(u64Expire >= u64Last,
1257 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1258 u64Expire = u64Last);
1259 }
1260 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1261 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1262
1263 /*
1264 * Link the timer into the active list.
1265 */
1266 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1267
1268 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * TMTimerSet for the virtual sync timer queue.
1275 *
1276 * This employs a greatly simplified state machine by always acquiring the
1277 * queue lock and bypassing the scheduling list.
1278 *
1279 * @returns VBox status code
1280 * @param pVM The cross context VM structure.
1281 * @param pTimer The timer handle.
1282 * @param u64Expire The expiration time.
1283 */
1284static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1285{
1286 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1287 VM_ASSERT_EMT(pVM);
1288 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1289 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1290 AssertRCReturn(rc, rc);
1291
1292 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1293 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1294 TMTIMERSTATE const enmState = pTimer->enmState;
1295 switch (enmState)
1296 {
1297 case TMTIMERSTATE_EXPIRED_DELIVER:
1298 case TMTIMERSTATE_STOPPED:
1299 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1300 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1301 else
1302 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1303
1304 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1305 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1306 pTimer->u64Expire = u64Expire;
1307 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1308 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1309 rc = VINF_SUCCESS;
1310 break;
1311
1312 case TMTIMERSTATE_ACTIVE:
1313 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1314 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1315 pTimer->u64Expire = u64Expire;
1316 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1317 rc = VINF_SUCCESS;
1318 break;
1319
1320 case TMTIMERSTATE_PENDING_RESCHEDULE:
1321 case TMTIMERSTATE_PENDING_STOP:
1322 case TMTIMERSTATE_PENDING_SCHEDULE:
1323 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1324 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1325 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1326 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1327 case TMTIMERSTATE_DESTROY:
1328 case TMTIMERSTATE_FREE:
1329 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1330 rc = VERR_TM_INVALID_STATE;
1331 break;
1332
1333 default:
1334 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1335 rc = VERR_TM_UNKNOWN_STATE;
1336 break;
1337 }
1338
1339 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1340 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1341 return rc;
1342}
1343
1344
1345/**
1346 * Arm a timer with a (new) expire time.
1347 *
1348 * @returns VBox status code.
1349 * @param pVM The cross context VM structure.
1350 * @param hTimer Timer handle as returned by one of the create functions.
1351 * @param u64Expire New expire time.
1352 */
1353VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1354{
1355 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1356 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1357
1358 /* Treat virtual sync timers specially. */
1359 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1360 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1361
1362 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1363 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1364
1365 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1366
1367#ifdef VBOX_WITH_STATISTICS
1368 /*
1369 * Gather optimization info.
1370 */
1371 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1372 TMTIMERSTATE enmOrgState = pTimer->enmState;
1373 switch (enmOrgState)
1374 {
1375 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1376 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1377 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1378 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1379 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1380 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1381 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1382 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1383 }
1384#endif
1385
1386#if 1
1387 /*
1388 * The most common case is setting the timer again during the callback.
1389 * The second most common case is starting a timer at some other time.
1390 */
1391 TMTIMERSTATE enmState1 = pTimer->enmState;
1392 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1393 || ( enmState1 == TMTIMERSTATE_STOPPED
1394 && pTimer->pCritSect))
1395 {
1396 /* Try take the TM lock and check the state again. */
1397 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1398 if (RT_SUCCESS_NP(rc))
1399 {
1400 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1401 {
1402 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1403 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1404 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1405 return VINF_SUCCESS;
1406 }
1407 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1408 }
1409 }
1410#endif
1411
1412 /*
1413 * Unoptimized code path.
1414 */
1415 int cRetries = 1000;
1416 do
1417 {
1418 /*
1419 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1420 */
1421 TMTIMERSTATE enmState = pTimer->enmState;
1422 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1423 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1424 switch (enmState)
1425 {
1426 case TMTIMERSTATE_EXPIRED_DELIVER:
1427 case TMTIMERSTATE_STOPPED:
1428 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1429 {
1430 Assert(pTimer->idxPrev == UINT32_MAX);
1431 Assert(pTimer->idxNext == UINT32_MAX);
1432 pTimer->u64Expire = u64Expire;
1433 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1434 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1435 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1436 return VINF_SUCCESS;
1437 }
1438 break;
1439
1440 case TMTIMERSTATE_PENDING_SCHEDULE:
1441 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1442 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1443 {
1444 pTimer->u64Expire = u64Expire;
1445 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1446 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1447 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1448 return VINF_SUCCESS;
1449 }
1450 break;
1451
1452
1453 case TMTIMERSTATE_ACTIVE:
1454 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1455 {
1456 pTimer->u64Expire = u64Expire;
1457 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1458 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1459 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1460 return VINF_SUCCESS;
1461 }
1462 break;
1463
1464 case TMTIMERSTATE_PENDING_RESCHEDULE:
1465 case TMTIMERSTATE_PENDING_STOP:
1466 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1467 {
1468 pTimer->u64Expire = u64Expire;
1469 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1470 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1471 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1472 return VINF_SUCCESS;
1473 }
1474 break;
1475
1476
1477 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1478 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1479 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1480#ifdef IN_RING3
1481 if (!RTThreadYield())
1482 RTThreadSleep(1);
1483#else
1484/** @todo call host context and yield after a couple of iterations */
1485#endif
1486 break;
1487
1488 /*
1489 * Invalid states.
1490 */
1491 case TMTIMERSTATE_DESTROY:
1492 case TMTIMERSTATE_FREE:
1493 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1494 return VERR_TM_INVALID_STATE;
1495 default:
1496 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1497 return VERR_TM_UNKNOWN_STATE;
1498 }
1499 } while (cRetries-- > 0);
1500
1501 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1502 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1503 return VERR_TM_TIMER_UNSTABLE_STATE;
1504}
1505
1506
1507/**
1508 * Return the current time for the specified clock, setting pu64Now if not NULL.
1509 *
1510 * @returns Current time.
1511 * @param pVM The cross context VM structure.
1512 * @param enmClock The clock to query.
1513 * @param pu64Now Optional pointer where to store the return time
1514 */
1515DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1516{
1517 uint64_t u64Now;
1518 switch (enmClock)
1519 {
1520 case TMCLOCK_VIRTUAL_SYNC:
1521 u64Now = TMVirtualSyncGet(pVM);
1522 break;
1523 case TMCLOCK_VIRTUAL:
1524 u64Now = TMVirtualGet(pVM);
1525 break;
1526 case TMCLOCK_REAL:
1527 u64Now = TMRealGet(pVM);
1528 break;
1529 default:
1530 AssertFatalMsgFailed(("%d\n", enmClock));
1531 }
1532
1533 if (pu64Now)
1534 *pu64Now = u64Now;
1535 return u64Now;
1536}
1537
1538
1539/**
1540 * Optimized TMTimerSetRelative code path.
1541 *
1542 * @returns VBox status code.
1543 *
1544 * @param pVM The cross context VM structure.
1545 * @param pTimer The timer handle.
1546 * @param cTicksToNext Clock ticks until the next time expiration.
1547 * @param pu64Now Where to return the current time stamp used.
1548 * Optional.
1549 * @param pQueueCC The context specific queue data (same as @a pQueue
1550 * for ring-3).
1551 * @param pQueue The shared queue data.
1552 */
1553static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1554 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1555{
1556 Assert(pTimer->idxPrev == UINT32_MAX);
1557 Assert(pTimer->idxNext == UINT32_MAX);
1558 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1559
1560 /*
1561 * Calculate and set the expiration time.
1562 */
1563 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1564 pTimer->u64Expire = u64Expire;
1565 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1566
1567 /*
1568 * Link the timer into the active list.
1569 */
1570 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1571 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1572
1573 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1574 return VINF_SUCCESS;
1575}
1576
1577
1578/**
1579 * TMTimerSetRelative for the virtual sync timer queue.
1580 *
1581 * This employs a greatly simplified state machine by always acquiring the
1582 * queue lock and bypassing the scheduling list.
1583 *
1584 * @returns VBox status code
1585 * @param pVM The cross context VM structure.
1586 * @param pTimer The timer to (re-)arm.
1587 * @param cTicksToNext Clock ticks until the next time expiration.
1588 * @param pu64Now Where to return the current time stamp used.
1589 * Optional.
1590 */
1591static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1592{
1593 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1594 VM_ASSERT_EMT(pVM);
1595 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1596 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1597 AssertRCReturn(rc, rc);
1598
1599 /* Calculate the expiration tick. */
1600 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1601 if (pu64Now)
1602 *pu64Now = u64Expire;
1603 u64Expire += cTicksToNext;
1604
1605 /* Update the timer. */
1606 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1607 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1608 TMTIMERSTATE const enmState = pTimer->enmState;
1609 switch (enmState)
1610 {
1611 case TMTIMERSTATE_EXPIRED_DELIVER:
1612 case TMTIMERSTATE_STOPPED:
1613 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1614 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1615 else
1616 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1617 pTimer->u64Expire = u64Expire;
1618 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1619 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1620 rc = VINF_SUCCESS;
1621 break;
1622
1623 case TMTIMERSTATE_ACTIVE:
1624 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1625 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1626 pTimer->u64Expire = u64Expire;
1627 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1628 rc = VINF_SUCCESS;
1629 break;
1630
1631 case TMTIMERSTATE_PENDING_RESCHEDULE:
1632 case TMTIMERSTATE_PENDING_STOP:
1633 case TMTIMERSTATE_PENDING_SCHEDULE:
1634 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1635 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1636 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1637 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1638 case TMTIMERSTATE_DESTROY:
1639 case TMTIMERSTATE_FREE:
1640 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1641 rc = VERR_TM_INVALID_STATE;
1642 break;
1643
1644 default:
1645 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1646 rc = VERR_TM_UNKNOWN_STATE;
1647 break;
1648 }
1649
1650 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1651 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1652 return rc;
1653}
1654
1655
1656/**
1657 * Arm a timer with a expire time relative to the current time.
1658 *
1659 * @returns VBox status code.
1660 * @param pVM The cross context VM structure.
1661 * @param pTimer The timer to arm.
1662 * @param cTicksToNext Clock ticks until the next time expiration.
1663 * @param pu64Now Where to return the current time stamp used.
1664 * Optional.
1665 * @param pQueueCC The context specific queue data (same as @a pQueue
1666 * for ring-3).
1667 * @param pQueue The shared queue data.
1668 */
1669static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1670 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1671{
1672 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1673
1674 /* Treat virtual sync timers specially. */
1675 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1676 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1677
1678 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1679 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1680
1681 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1682
1683#ifdef VBOX_WITH_STATISTICS
1684 /*
1685 * Gather optimization info.
1686 */
1687 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1688 TMTIMERSTATE enmOrgState = pTimer->enmState;
1689 switch (enmOrgState)
1690 {
1691 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1692 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1693 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1694 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1695 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1696 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1697 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1698 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1699 }
1700#endif
1701
1702 /*
1703 * Try to take the TM lock and optimize the common cases.
1704 *
1705 * With the TM lock we can safely make optimizations like immediate
1706 * scheduling and we can also be 100% sure that we're not racing the
1707 * running of the timer queues. As an additional restraint we require the
1708 * timer to have a critical section associated with to be 100% there aren't
1709 * concurrent operations on the timer. (This latter isn't necessary any
1710 * longer as this isn't supported for any timers, critsect or not.)
1711 *
1712 * Note! Lock ordering doesn't apply when we only _try_ to
1713 * get the innermost locks.
1714 */
1715 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1716#if 1
1717 if ( fOwnTMLock
1718 && pTimer->pCritSect)
1719 {
1720 TMTIMERSTATE enmState = pTimer->enmState;
1721 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1722 || enmState == TMTIMERSTATE_STOPPED)
1723 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1724 {
1725 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1726 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1727 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1728 return VINF_SUCCESS;
1729 }
1730
1731 /* Optimize other states when it becomes necessary. */
1732 }
1733#endif
1734
1735 /*
1736 * Unoptimized path.
1737 */
1738 int rc;
1739 for (int cRetries = 1000; ; cRetries--)
1740 {
1741 /*
1742 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1743 */
1744 TMTIMERSTATE enmState = pTimer->enmState;
1745 switch (enmState)
1746 {
1747 case TMTIMERSTATE_STOPPED:
1748 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1749 {
1750 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1751 * Figure a safe way of activating this timer while the queue is
1752 * being run.
1753 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1754 * re-starting the timer in response to a initial_count write.) */
1755 }
1756 RT_FALL_THRU();
1757 case TMTIMERSTATE_EXPIRED_DELIVER:
1758 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1759 {
1760 Assert(pTimer->idxPrev == UINT32_MAX);
1761 Assert(pTimer->idxNext == UINT32_MAX);
1762 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1763 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1764 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1765 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1766 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1767 rc = VINF_SUCCESS;
1768 break;
1769 }
1770 rc = VERR_TRY_AGAIN;
1771 break;
1772
1773 case TMTIMERSTATE_PENDING_SCHEDULE:
1774 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1775 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1776 {
1777 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1778 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1779 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1780 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1781 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1782 rc = VINF_SUCCESS;
1783 break;
1784 }
1785 rc = VERR_TRY_AGAIN;
1786 break;
1787
1788
1789 case TMTIMERSTATE_ACTIVE:
1790 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1791 {
1792 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1793 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1794 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1795 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1796 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1797 rc = VINF_SUCCESS;
1798 break;
1799 }
1800 rc = VERR_TRY_AGAIN;
1801 break;
1802
1803 case TMTIMERSTATE_PENDING_RESCHEDULE:
1804 case TMTIMERSTATE_PENDING_STOP:
1805 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1806 {
1807 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1808 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1809 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1810 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1811 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1812 rc = VINF_SUCCESS;
1813 break;
1814 }
1815 rc = VERR_TRY_AGAIN;
1816 break;
1817
1818
1819 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1820 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1821 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1822#ifdef IN_RING3
1823 if (!RTThreadYield())
1824 RTThreadSleep(1);
1825#else
1826/** @todo call host context and yield after a couple of iterations */
1827#endif
1828 rc = VERR_TRY_AGAIN;
1829 break;
1830
1831 /*
1832 * Invalid states.
1833 */
1834 case TMTIMERSTATE_DESTROY:
1835 case TMTIMERSTATE_FREE:
1836 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1837 rc = VERR_TM_INVALID_STATE;
1838 break;
1839
1840 default:
1841 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1842 rc = VERR_TM_UNKNOWN_STATE;
1843 break;
1844 }
1845
1846 /* switch + loop is tedious to break out of. */
1847 if (rc == VINF_SUCCESS)
1848 break;
1849
1850 if (rc != VERR_TRY_AGAIN)
1851 {
1852 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1853 break;
1854 }
1855 if (cRetries <= 0)
1856 {
1857 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1858 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1859 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1860 break;
1861 }
1862
1863 /*
1864 * Retry to gain locks.
1865 */
1866 if (!fOwnTMLock)
1867 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1868
1869 } /* for (;;) */
1870
1871 /*
1872 * Clean up and return.
1873 */
1874 if (fOwnTMLock)
1875 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1876
1877 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1878 return rc;
1879}
1880
1881
1882/**
1883 * Arm a timer with a expire time relative to the current time.
1884 *
1885 * @returns VBox status code.
1886 * @param pVM The cross context VM structure.
1887 * @param hTimer Timer handle as returned by one of the create functions.
1888 * @param cTicksToNext Clock ticks until the next time expiration.
1889 * @param pu64Now Where to return the current time stamp used.
1890 * Optional.
1891 */
1892VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1893{
1894 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1895 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1896}
1897
1898
1899/**
1900 * Drops a hint about the frequency of the timer.
1901 *
1902 * This is used by TM and the VMM to calculate how often guest execution needs
1903 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1904 *
1905 * @returns VBox status code.
1906 * @param pVM The cross context VM structure.
1907 * @param hTimer Timer handle as returned by one of the create functions.
1908 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1909 *
1910 * @remarks We're using an integer hertz value here since anything above 1 HZ
1911 * is not going to be any trouble satisfying scheduling wise. The
1912 * range where it makes sense is >= 100 HZ.
1913 */
1914VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1915{
1916 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1917 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1918
1919 uint32_t const uHzOldHint = pTimer->uHzHint;
1920 pTimer->uHzHint = uHzHint;
1921
1922 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1923 if ( uHzHint > uMaxHzHint
1924 || uHzOldHint >= uMaxHzHint)
1925 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1926
1927 return VINF_SUCCESS;
1928}
1929
1930
1931/**
1932 * TMTimerStop for the virtual sync timer queue.
1933 *
1934 * This employs a greatly simplified state machine by always acquiring the
1935 * queue lock and bypassing the scheduling list.
1936 *
1937 * @returns VBox status code
1938 * @param pVM The cross context VM structure.
1939 * @param pTimer The timer handle.
1940 */
1941static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1942{
1943 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1944 VM_ASSERT_EMT(pVM);
1945 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1946 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1947 AssertRCReturn(rc, rc);
1948
1949 /* Reset the HZ hint. */
1950 uint32_t uOldHzHint = pTimer->uHzHint;
1951 if (uOldHzHint)
1952 {
1953 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1954 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1955 pTimer->uHzHint = 0;
1956 }
1957
1958 /* Update the timer state. */
1959 TMTIMERSTATE const enmState = pTimer->enmState;
1960 switch (enmState)
1961 {
1962 case TMTIMERSTATE_ACTIVE:
1963 {
1964 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1965 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1966 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1967 rc = VINF_SUCCESS;
1968 break;
1969 }
1970
1971 case TMTIMERSTATE_EXPIRED_DELIVER:
1972 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1973 rc = VINF_SUCCESS;
1974 break;
1975
1976 case TMTIMERSTATE_STOPPED:
1977 rc = VINF_SUCCESS;
1978 break;
1979
1980 case TMTIMERSTATE_PENDING_RESCHEDULE:
1981 case TMTIMERSTATE_PENDING_STOP:
1982 case TMTIMERSTATE_PENDING_SCHEDULE:
1983 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1984 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1985 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1986 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1987 case TMTIMERSTATE_DESTROY:
1988 case TMTIMERSTATE_FREE:
1989 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1990 rc = VERR_TM_INVALID_STATE;
1991 break;
1992
1993 default:
1994 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1995 rc = VERR_TM_UNKNOWN_STATE;
1996 break;
1997 }
1998
1999 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
2000 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
2001 return rc;
2002}
2003
2004
2005/**
2006 * Stop the timer.
2007 * Use TMR3TimerArm() to "un-stop" the timer.
2008 *
2009 * @returns VBox status code.
2010 * @param pVM The cross context VM structure.
2011 * @param hTimer Timer handle as returned by one of the create functions.
2012 */
2013VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
2014{
2015 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2016 STAM_COUNTER_INC(&pTimer->StatStop);
2017
2018 /* Treat virtual sync timers specially. */
2019 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
2020 return tmTimerVirtualSyncStop(pVM, pTimer);
2021
2022 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2023 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2024
2025 /*
2026 * Reset the HZ hint.
2027 */
2028 uint32_t const uOldHzHint = pTimer->uHzHint;
2029 if (uOldHzHint)
2030 {
2031 if (uOldHzHint >= pQueue->uMaxHzHint)
2032 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
2033 pTimer->uHzHint = 0;
2034 }
2035
2036 /** @todo see if this function needs optimizing. */
2037 int cRetries = 1000;
2038 do
2039 {
2040 /*
2041 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2042 */
2043 TMTIMERSTATE enmState = pTimer->enmState;
2044 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2045 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2046 switch (enmState)
2047 {
2048 case TMTIMERSTATE_EXPIRED_DELIVER:
2049 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2050 return VERR_INVALID_PARAMETER;
2051
2052 case TMTIMERSTATE_STOPPED:
2053 case TMTIMERSTATE_PENDING_STOP:
2054 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2055 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2056 return VINF_SUCCESS;
2057
2058 case TMTIMERSTATE_PENDING_SCHEDULE:
2059 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2060 {
2061 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2062 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2063 return VINF_SUCCESS;
2064 }
2065 break;
2066
2067 case TMTIMERSTATE_PENDING_RESCHEDULE:
2068 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2069 {
2070 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2071 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2072 return VINF_SUCCESS;
2073 }
2074 break;
2075
2076 case TMTIMERSTATE_ACTIVE:
2077 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2078 {
2079 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2080 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2081 return VINF_SUCCESS;
2082 }
2083 break;
2084
2085 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2086 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2087 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2088#ifdef IN_RING3
2089 if (!RTThreadYield())
2090 RTThreadSleep(1);
2091#else
2092/** @todo call host and yield cpu after a while. */
2093#endif
2094 break;
2095
2096 /*
2097 * Invalid states.
2098 */
2099 case TMTIMERSTATE_DESTROY:
2100 case TMTIMERSTATE_FREE:
2101 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2102 return VERR_TM_INVALID_STATE;
2103 default:
2104 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2105 return VERR_TM_UNKNOWN_STATE;
2106 }
2107 } while (cRetries-- > 0);
2108
2109 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2110 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2111 return VERR_TM_TIMER_UNSTABLE_STATE;
2112}
2113
2114
2115/**
2116 * Get the current clock time.
2117 * Handy for calculating the new expire time.
2118 *
2119 * @returns Current clock time.
2120 * @param pVM The cross context VM structure.
2121 * @param hTimer Timer handle as returned by one of the create functions.
2122 */
2123VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2124{
2125 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2126 STAM_COUNTER_INC(&pTimer->StatGet);
2127
2128 uint64_t u64;
2129 switch (pQueue->enmClock)
2130 {
2131 case TMCLOCK_VIRTUAL:
2132 u64 = TMVirtualGet(pVM);
2133 break;
2134 case TMCLOCK_VIRTUAL_SYNC:
2135 u64 = TMVirtualSyncGet(pVM);
2136 break;
2137 case TMCLOCK_REAL:
2138 u64 = TMRealGet(pVM);
2139 break;
2140 default:
2141 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2142 return UINT64_MAX;
2143 }
2144 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2145 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2146 return u64;
2147}
2148
2149
2150/**
2151 * Get the frequency of the timer clock.
2152 *
2153 * @returns Clock frequency (as Hz of course).
2154 * @param pVM The cross context VM structure.
2155 * @param hTimer Timer handle as returned by one of the create functions.
2156 */
2157VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2158{
2159 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2160 switch (pQueue->enmClock)
2161 {
2162 case TMCLOCK_VIRTUAL:
2163 case TMCLOCK_VIRTUAL_SYNC:
2164 return TMCLOCK_FREQ_VIRTUAL;
2165
2166 case TMCLOCK_REAL:
2167 return TMCLOCK_FREQ_REAL;
2168
2169 default:
2170 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2171 return 0;
2172 }
2173}
2174
2175
2176/**
2177 * Get the expire time of the timer.
2178 * Only valid for active timers.
2179 *
2180 * @returns Expire time of the timer.
2181 * @param pVM The cross context VM structure.
2182 * @param hTimer Timer handle as returned by one of the create functions.
2183 */
2184VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2185{
2186 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2187 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2188 int cRetries = 1000;
2189 do
2190 {
2191 TMTIMERSTATE enmState = pTimer->enmState;
2192 switch (enmState)
2193 {
2194 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2195 case TMTIMERSTATE_EXPIRED_DELIVER:
2196 case TMTIMERSTATE_STOPPED:
2197 case TMTIMERSTATE_PENDING_STOP:
2198 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2199 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2200 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2201 return UINT64_MAX;
2202
2203 case TMTIMERSTATE_ACTIVE:
2204 case TMTIMERSTATE_PENDING_RESCHEDULE:
2205 case TMTIMERSTATE_PENDING_SCHEDULE:
2206 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2207 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2208 return pTimer->u64Expire;
2209
2210 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2211 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2212#ifdef IN_RING3
2213 if (!RTThreadYield())
2214 RTThreadSleep(1);
2215#endif
2216 break;
2217
2218 /*
2219 * Invalid states.
2220 */
2221 case TMTIMERSTATE_DESTROY:
2222 case TMTIMERSTATE_FREE:
2223 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2224 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2225 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2226 return UINT64_MAX;
2227 default:
2228 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2229 return UINT64_MAX;
2230 }
2231 } while (cRetries-- > 0);
2232
2233 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2234 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2235 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2236 return UINT64_MAX;
2237}
2238
2239
2240/**
2241 * Checks if a timer is active or not.
2242 *
2243 * @returns True if active.
2244 * @returns False if not active.
2245 * @param pVM The cross context VM structure.
2246 * @param hTimer Timer handle as returned by one of the create functions.
2247 */
2248VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2249{
2250 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2251 TMTIMERSTATE enmState = pTimer->enmState;
2252 switch (enmState)
2253 {
2254 case TMTIMERSTATE_STOPPED:
2255 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2256 case TMTIMERSTATE_EXPIRED_DELIVER:
2257 case TMTIMERSTATE_PENDING_STOP:
2258 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2259 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2260 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2261 return false;
2262
2263 case TMTIMERSTATE_ACTIVE:
2264 case TMTIMERSTATE_PENDING_RESCHEDULE:
2265 case TMTIMERSTATE_PENDING_SCHEDULE:
2266 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2267 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2268 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2269 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2270 return true;
2271
2272 /*
2273 * Invalid states.
2274 */
2275 case TMTIMERSTATE_DESTROY:
2276 case TMTIMERSTATE_FREE:
2277 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2278 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2279 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2280 return false;
2281 default:
2282 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2283 return false;
2284 }
2285}
2286
2287
2288/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2289
2290
2291/**
2292 * Arm a timer with a (new) expire time relative to current time.
2293 *
2294 * @returns VBox status code.
2295 * @param pVM The cross context VM structure.
2296 * @param hTimer Timer handle as returned by one of the create functions.
2297 * @param cMilliesToNext Number of milliseconds to the next tick.
2298 */
2299VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2300{
2301 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2302 switch (pQueue->enmClock)
2303 {
2304 case TMCLOCK_VIRTUAL:
2305 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2306 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2307
2308 case TMCLOCK_VIRTUAL_SYNC:
2309 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2310 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2311
2312 case TMCLOCK_REAL:
2313 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2314 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2315
2316 default:
2317 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2318 return VERR_TM_TIMER_BAD_CLOCK;
2319 }
2320}
2321
2322
2323/**
2324 * Arm a timer with a (new) expire time relative to current time.
2325 *
2326 * @returns VBox status code.
2327 * @param pVM The cross context VM structure.
2328 * @param hTimer Timer handle as returned by one of the create functions.
2329 * @param cMicrosToNext Number of microseconds to the next tick.
2330 */
2331VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2332{
2333 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2334 switch (pQueue->enmClock)
2335 {
2336 case TMCLOCK_VIRTUAL:
2337 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2338 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2339
2340 case TMCLOCK_VIRTUAL_SYNC:
2341 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2342 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2343
2344 case TMCLOCK_REAL:
2345 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2346 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2347
2348 default:
2349 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2350 return VERR_TM_TIMER_BAD_CLOCK;
2351 }
2352}
2353
2354
2355/**
2356 * Arm a timer with a (new) expire time relative to current time.
2357 *
2358 * @returns VBox status code.
2359 * @param pVM The cross context VM structure.
2360 * @param hTimer Timer handle as returned by one of the create functions.
2361 * @param cNanosToNext Number of nanoseconds to the next tick.
2362 */
2363VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2364{
2365 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2366 switch (pQueue->enmClock)
2367 {
2368 case TMCLOCK_VIRTUAL:
2369 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2370 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2371
2372 case TMCLOCK_VIRTUAL_SYNC:
2373 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2374 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2375
2376 case TMCLOCK_REAL:
2377 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2378 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2379
2380 default:
2381 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2382 return VERR_TM_TIMER_BAD_CLOCK;
2383 }
2384}
2385
2386
2387/**
2388 * Get the current clock time as nanoseconds.
2389 *
2390 * @returns The timer clock as nanoseconds.
2391 * @param pVM The cross context VM structure.
2392 * @param hTimer Timer handle as returned by one of the create functions.
2393 */
2394VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2395{
2396 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2397}
2398
2399
2400/**
2401 * Get the current clock time as microseconds.
2402 *
2403 * @returns The timer clock as microseconds.
2404 * @param pVM The cross context VM structure.
2405 * @param hTimer Timer handle as returned by one of the create functions.
2406 */
2407VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2408{
2409 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2410}
2411
2412
2413/**
2414 * Get the current clock time as milliseconds.
2415 *
2416 * @returns The timer clock as milliseconds.
2417 * @param pVM The cross context VM structure.
2418 * @param hTimer Timer handle as returned by one of the create functions.
2419 */
2420VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2421{
2422 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2423}
2424
2425
2426/**
2427 * Converts the specified timer clock time to nanoseconds.
2428 *
2429 * @returns nanoseconds.
2430 * @param pVM The cross context VM structure.
2431 * @param hTimer Timer handle as returned by one of the create functions.
2432 * @param cTicks The clock ticks.
2433 * @remark There could be rounding errors here. We just do a simple integer divide
2434 * without any adjustments.
2435 */
2436VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2437{
2438 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2439 switch (pQueue->enmClock)
2440 {
2441 case TMCLOCK_VIRTUAL:
2442 case TMCLOCK_VIRTUAL_SYNC:
2443 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2444 return cTicks;
2445
2446 case TMCLOCK_REAL:
2447 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2448 return cTicks * 1000000;
2449
2450 default:
2451 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2452 return 0;
2453 }
2454}
2455
2456
2457/**
2458 * Converts the specified timer clock time to microseconds.
2459 *
2460 * @returns microseconds.
2461 * @param pVM The cross context VM structure.
2462 * @param hTimer Timer handle as returned by one of the create functions.
2463 * @param cTicks The clock ticks.
2464 * @remark There could be rounding errors here. We just do a simple integer divide
2465 * without any adjustments.
2466 */
2467VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2468{
2469 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2470 switch (pQueue->enmClock)
2471 {
2472 case TMCLOCK_VIRTUAL:
2473 case TMCLOCK_VIRTUAL_SYNC:
2474 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2475 return cTicks / 1000;
2476
2477 case TMCLOCK_REAL:
2478 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2479 return cTicks * 1000;
2480
2481 default:
2482 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2483 return 0;
2484 }
2485}
2486
2487
2488/**
2489 * Converts the specified timer clock time to milliseconds.
2490 *
2491 * @returns milliseconds.
2492 * @param pVM The cross context VM structure.
2493 * @param hTimer Timer handle as returned by one of the create functions.
2494 * @param cTicks The clock ticks.
2495 * @remark There could be rounding errors here. We just do a simple integer divide
2496 * without any adjustments.
2497 */
2498VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2499{
2500 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2501 switch (pQueue->enmClock)
2502 {
2503 case TMCLOCK_VIRTUAL:
2504 case TMCLOCK_VIRTUAL_SYNC:
2505 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2506 return cTicks / 1000000;
2507
2508 case TMCLOCK_REAL:
2509 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2510 return cTicks;
2511
2512 default:
2513 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2514 return 0;
2515 }
2516}
2517
2518
2519/**
2520 * Converts the specified nanosecond timestamp to timer clock ticks.
2521 *
2522 * @returns timer clock ticks.
2523 * @param pVM The cross context VM structure.
2524 * @param hTimer Timer handle as returned by one of the create functions.
2525 * @param cNanoSecs The nanosecond value ticks to convert.
2526 * @remark There could be rounding and overflow errors here.
2527 */
2528VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2529{
2530 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2531 switch (pQueue->enmClock)
2532 {
2533 case TMCLOCK_VIRTUAL:
2534 case TMCLOCK_VIRTUAL_SYNC:
2535 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2536 return cNanoSecs;
2537
2538 case TMCLOCK_REAL:
2539 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2540 return cNanoSecs / 1000000;
2541
2542 default:
2543 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2544 return 0;
2545 }
2546}
2547
2548
2549/**
2550 * Converts the specified microsecond timestamp to timer clock ticks.
2551 *
2552 * @returns timer clock ticks.
2553 * @param pVM The cross context VM structure.
2554 * @param hTimer Timer handle as returned by one of the create functions.
2555 * @param cMicroSecs The microsecond value ticks to convert.
2556 * @remark There could be rounding and overflow errors here.
2557 */
2558VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2559{
2560 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2561 switch (pQueue->enmClock)
2562 {
2563 case TMCLOCK_VIRTUAL:
2564 case TMCLOCK_VIRTUAL_SYNC:
2565 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2566 return cMicroSecs * 1000;
2567
2568 case TMCLOCK_REAL:
2569 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2570 return cMicroSecs / 1000;
2571
2572 default:
2573 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2574 return 0;
2575 }
2576}
2577
2578
2579/**
2580 * Converts the specified millisecond timestamp to timer clock ticks.
2581 *
2582 * @returns timer clock ticks.
2583 * @param pVM The cross context VM structure.
2584 * @param hTimer Timer handle as returned by one of the create functions.
2585 * @param cMilliSecs The millisecond value ticks to convert.
2586 * @remark There could be rounding and overflow errors here.
2587 */
2588VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2589{
2590 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2591 switch (pQueue->enmClock)
2592 {
2593 case TMCLOCK_VIRTUAL:
2594 case TMCLOCK_VIRTUAL_SYNC:
2595 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2596 return cMilliSecs * 1000000;
2597
2598 case TMCLOCK_REAL:
2599 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2600 return cMilliSecs;
2601
2602 default:
2603 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2604 return 0;
2605 }
2606}
2607
2608
2609/**
2610 * Convert state to string.
2611 *
2612 * @returns Readonly status name.
2613 * @param enmState State.
2614 */
2615const char *tmTimerState(TMTIMERSTATE enmState)
2616{
2617 switch (enmState)
2618 {
2619#define CASE(num, state) \
2620 case TMTIMERSTATE_##state: \
2621 AssertCompile(TMTIMERSTATE_##state == (num)); \
2622 return #num "-" #state
2623 CASE( 0,INVALID);
2624 CASE( 1,STOPPED);
2625 CASE( 2,ACTIVE);
2626 CASE( 3,EXPIRED_GET_UNLINK);
2627 CASE( 4,EXPIRED_DELIVER);
2628 CASE( 5,PENDING_STOP);
2629 CASE( 6,PENDING_STOP_SCHEDULE);
2630 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2631 CASE( 8,PENDING_SCHEDULE);
2632 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2633 CASE(10,PENDING_RESCHEDULE);
2634 CASE(11,DESTROY);
2635 CASE(12,FREE);
2636 default:
2637 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2638 return "Invalid state!";
2639#undef CASE
2640 }
2641}
2642
2643
2644#if defined(IN_RING0) || defined(IN_RING3)
2645/**
2646 * Copies over old timers and initialized newly allocted ones.
2647 *
2648 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2649 *
2650 * @param paTimers The new timer allocation.
2651 * @param paOldTimers The old timers.
2652 * @param cNewTimers Number of new timers.
2653 * @param cOldTimers Number of old timers.
2654 */
2655void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2656{
2657 Assert(cOldTimers < cNewTimers);
2658
2659 /*
2660 * Copy over the old info and initialize the new handles.
2661 */
2662 if (cOldTimers > 0)
2663 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2664
2665 size_t i = cNewTimers;
2666 while (i-- > cOldTimers)
2667 {
2668 paTimers[i].u64Expire = UINT64_MAX;
2669 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2670 paTimers[i].enmState = TMTIMERSTATE_FREE;
2671 paTimers[i].idxScheduleNext = UINT32_MAX;
2672 paTimers[i].idxNext = UINT32_MAX;
2673 paTimers[i].idxPrev = UINT32_MAX;
2674 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2675 }
2676
2677 /*
2678 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2679 */
2680 if (cOldTimers == 0)
2681 {
2682 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2683 paTimers[0].szName[0] = 'n';
2684 paTimers[0].szName[1] = 'i';
2685 paTimers[0].szName[2] = 'l';
2686 paTimers[0].szName[3] = '\0';
2687 }
2688}
2689#endif /* IN_RING0 || IN_RING3 */
2690
2691
2692/**
2693 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2694 *
2695 * @returns The highest frequency. 0 if no timers care.
2696 * @param pVM The cross context VM structure.
2697 * @param uOldMaxHzHint The old global hint.
2698 */
2699DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2700{
2701 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2702 but it should force other callers thru the slow path while we're recalculating and
2703 help us detect changes while we're recalculating. */
2704 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2705
2706 /*
2707 * The "right" highest frequency value isn't so important that we'll block
2708 * waiting on the timer semaphores.
2709 */
2710 uint32_t uMaxHzHint = 0;
2711 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2712 {
2713 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2714
2715 /* Get the max Hz hint for the queue. */
2716 uint32_t uMaxHzHintQueue;
2717 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2718 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2719 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2720 else
2721 {
2722 /* Is it still necessary to do updating? */
2723 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2724 {
2725 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2726
2727 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2728 uMaxHzHintQueue = 0;
2729 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2730 pCur;
2731 pCur = tmTimerGetNext(pQueueCC, pCur))
2732 {
2733 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2734 if (uHzHint > uMaxHzHintQueue)
2735 {
2736 TMTIMERSTATE enmState = pCur->enmState;
2737 switch (enmState)
2738 {
2739 case TMTIMERSTATE_ACTIVE:
2740 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2741 case TMTIMERSTATE_EXPIRED_DELIVER:
2742 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2743 case TMTIMERSTATE_PENDING_SCHEDULE:
2744 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2745 case TMTIMERSTATE_PENDING_RESCHEDULE:
2746 uMaxHzHintQueue = uHzHint;
2747 break;
2748
2749 case TMTIMERSTATE_STOPPED:
2750 case TMTIMERSTATE_PENDING_STOP:
2751 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2752 case TMTIMERSTATE_DESTROY:
2753 case TMTIMERSTATE_FREE:
2754 case TMTIMERSTATE_INVALID:
2755 break;
2756 /* no default, want gcc warnings when adding more states. */
2757 }
2758 }
2759 }
2760
2761 /* Write the new Hz hint for the quest and clear the other update flag. */
2762 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2763 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2764 }
2765 else
2766 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2767
2768 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2769 }
2770
2771 /* Update the global max Hz hint. */
2772 if (uMaxHzHint < uMaxHzHintQueue)
2773 uMaxHzHint = uMaxHzHintQueue;
2774 }
2775
2776 /*
2777 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2778 */
2779 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2780 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2781 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2782 else
2783 for (uint32_t iTry = 1;; iTry++)
2784 {
2785 if (RT_LO_U32(u64Actual) != 0)
2786 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2787 else if (iTry >= 4)
2788 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2789 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2790 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2791 else
2792 continue;
2793 break;
2794 }
2795 return uMaxHzHint;
2796}
2797
2798
2799/**
2800 * Gets the highest frequency hint for all the important timers.
2801 *
2802 * @returns The highest frequency. 0 if no timers care.
2803 * @param pVM The cross context VM structure.
2804 */
2805DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2806{
2807 /*
2808 * Query the value, recalculate it if necessary.
2809 */
2810 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2811 if (RT_HI_U32(u64Combined) == 0)
2812 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2813 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2814}
2815
2816
2817/**
2818 * Calculates a host timer frequency that would be suitable for the current
2819 * timer load.
2820 *
2821 * This will take the highest timer frequency, adjust for catch-up and warp
2822 * driver, and finally add a little fudge factor. The caller (VMM) will use
2823 * the result to adjust the per-cpu preemption timer.
2824 *
2825 * @returns The highest frequency. 0 if no important timers around.
2826 * @param pVM The cross context VM structure.
2827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2828 */
2829VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2830{
2831 uint32_t uHz = tmGetFrequencyHint(pVM);
2832
2833 /* Catch up, we have to be more aggressive than the % indicates at the
2834 beginning of the effort. */
2835 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2836 {
2837 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2838 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2839 {
2840 if (u32Pct <= 100)
2841 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2842 else if (u32Pct <= 200)
2843 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2844 else if (u32Pct <= 400)
2845 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2846 uHz *= u32Pct + 100;
2847 uHz /= 100;
2848 }
2849 }
2850
2851 /* Warp drive. */
2852 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2853 {
2854 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2855 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2856 {
2857 uHz *= u32Pct;
2858 uHz /= 100;
2859 }
2860 }
2861
2862 /* Fudge factor. */
2863 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2864 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2865 else
2866 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2867 uHz /= 100;
2868
2869 /* Make sure it isn't too high. */
2870 if (uHz > pVM->tm.s.cHostHzMax)
2871 uHz = pVM->tm.s.cHostHzMax;
2872
2873 return uHz;
2874}
2875
2876
2877/**
2878 * Whether the guest virtual clock is ticking.
2879 *
2880 * @returns true if ticking, false otherwise.
2881 * @param pVM The cross context VM structure.
2882 */
2883VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2884{
2885 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2886}
2887
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette