VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 105904

Last change on this file since 105904 was 105715, checked in by vboxsync, 4 months ago

VMM/TM: Removed unused TMTimerPollBoolWith32BitMilliTS API. bugref:10656

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.3 KB
Line 
1/* $Id: TMAll.cpp 105715 2024-08-17 00:11:48Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#ifdef DEBUG_bird
34# define DBGFTRACE_DISABLED /* annoying */
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/dbgftrace.h>
39#ifdef IN_RING3
40#endif
41#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <VBox/sup.h>
49#include <iprt/time.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/asm-math.h>
53#include <iprt/string.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58#include "TMInline.h"
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64#ifdef VBOX_STRICT
65/** @def TMTIMER_GET_CRITSECT
66 * Helper for safely resolving the critical section for a timer belonging to a
67 * device instance.
68 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
69# ifdef IN_RING3
70# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
71# else
72# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
73# endif
74#endif
75
76/** @def TMTIMER_ASSERT_CRITSECT
77 * Checks that the caller owns the critical section if one is associated with
78 * the timer. */
79#ifdef VBOX_STRICT
80# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
81 do { \
82 if ((a_pTimer)->pCritSect) \
83 { \
84 VMSTATE enmState; \
85 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
86 AssertMsg( pCritSect \
87 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
88 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
89 || enmState == VMSTATE_RESETTING \
90 || enmState == VMSTATE_RESETTING_LS ),\
91 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
92 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
93 } \
94 } while (0)
95#else
96# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
97#endif
98
99/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
100 * Checks for lock order trouble between the timer critsect and the critical
101 * section critsect. The virtual sync critsect must always be entered before
102 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
103 * isn't any critical section associated with the timer or if the calling thread
104 * doesn't own it, ASSUMING of course that the thread using this macro is going
105 * to enter the virtual sync critical section anyway.
106 *
107 * @remarks This is a sligtly relaxed timer locking attitude compared to
108 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
109 * should know what it's doing if it's stopping or starting a timer
110 * without taking the device lock.
111 */
112#ifdef VBOX_STRICT
113# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
114 do { \
115 if ((pTimer)->pCritSect) \
116 { \
117 VMSTATE enmState; \
118 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
119 AssertMsg( pCritSect \
120 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
121 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
122 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
123 || enmState == VMSTATE_RESETTING \
124 || enmState == VMSTATE_RESETTING_LS ),\
125 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
126 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
127 } \
128 } while (0)
129#else
130# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
131#endif
132
133
134#if defined(VBOX_STRICT) && defined(IN_RING0)
135/**
136 * Helper for TMTIMER_GET_CRITSECT
137 * @todo This needs a redo!
138 */
139DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
140{
141 if (pTimer->enmType == TMTIMERTYPE_DEV)
142 {
143 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
144 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
145 ASMSetFlags(fSavedFlags);
146 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
147 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
148 return pDevInsR0->pCritSectRoR0;
149 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
150 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
151 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
152 }
153 RT_NOREF(pVM);
154 Assert(pTimer->pCritSect == NULL);
155 return NULL;
156}
157#endif /* VBOX_STRICT && IN_RING0 */
158
159
160/**
161 * Notification that execution is about to start.
162 *
163 * This call must always be paired with a TMNotifyEndOfExecution call.
164 *
165 * The function may, depending on the configuration, resume the TSC and future
166 * clocks that only ticks when we're executing guest code.
167 *
168 * @param pVM The cross context VM structure.
169 * @param pVCpu The cross context virtual CPU structure.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
172{
173#ifndef VBOX_WITHOUT_NS_ACCOUNTING
174 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
175 pVCpu->tm.s.fExecuting = true;
176#endif
177 if (pVM->tm.s.fTSCTiedToExecution)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that execution has ended.
184 *
185 * This call must always be paired with a TMNotifyStartOfExecution call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're executing guest code.
189 *
190 * @param pVM The cross context VM structure.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param uTsc TSC value when exiting guest context.
193 */
194VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
195{
196 if (pVM->tm.s.fTSCTiedToExecution)
197 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
198
199#ifndef VBOX_WITHOUT_NS_ACCOUNTING
200 /*
201 * Calculate the elapsed tick count and convert it to nanoseconds.
202 */
203# ifdef IN_RING3
204 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
205 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
206 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
207# else
208 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
209 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
210# endif
211 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
212
213 uint64_t cNsExecutingDelta;
214 if (uCpuHz < _4G)
215 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
216 else if (uCpuHz < 16*_1G64)
217 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
218 else
219 {
220 Assert(uCpuHz < 64 * _1G64);
221 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
222 }
223
224 /*
225 * Update the data.
226 *
227 * Note! We're not using strict memory ordering here to speed things us.
228 * The data is in a single cache line and this thread is the only
229 * one writing to that line, so I cannot quite imagine why we would
230 * need any strict ordering here.
231 */
232 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
233 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
234 ASMCompilerBarrier();
235 pVCpu->tm.s.fExecuting = false;
236 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
237 pVCpu->tm.s.cPeriodsExecuting++;
238 ASMCompilerBarrier();
239 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
240
241 /*
242 * Update stats.
243 */
244# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
245 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
246 if (cNsExecutingDelta < 5000)
247 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
248 else if (cNsExecutingDelta < 50000)
249 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
250 else
251 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
252# endif
253
254 /* The timer triggers occational updating of the others and total stats: */
255 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
256 { /*likely*/ }
257 else
258 {
259 pVCpu->tm.s.fUpdateStats = false;
260
261 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
262 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
263
264# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
265 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
266 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
267 if (cNsOtherNewDelta > 0)
268 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
269# endif
270
271 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
272 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
273 }
274
275#endif
276}
277
278
279/**
280 * Notification that the cpu is entering the halt state
281 *
282 * This call must always be paired with a TMNotifyEndOfExecution call.
283 *
284 * The function may, depending on the configuration, resume the TSC and future
285 * clocks that only ticks when we're halted.
286 *
287 * @param pVCpu The cross context virtual CPU structure.
288 */
289VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
290{
291 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
292
293#ifndef VBOX_WITHOUT_NS_ACCOUNTING
294 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
295 pVCpu->tm.s.fHalting = true;
296#endif
297
298 if ( pVM->tm.s.fTSCTiedToExecution
299 && !pVM->tm.s.fTSCNotTiedToHalt)
300 tmCpuTickResume(pVM, pVCpu);
301}
302
303
304/**
305 * Notification that the cpu is leaving the halt state
306 *
307 * This call must always be paired with a TMNotifyStartOfHalt call.
308 *
309 * The function may, depending on the configuration, suspend the TSC and future
310 * clocks that only ticks when we're halted.
311 *
312 * @param pVCpu The cross context virtual CPU structure.
313 */
314VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
315{
316 PVM pVM = pVCpu->CTX_SUFF(pVM);
317
318 if ( pVM->tm.s.fTSCTiedToExecution
319 && !pVM->tm.s.fTSCNotTiedToHalt)
320 tmCpuTickPause(pVCpu);
321
322#ifndef VBOX_WITHOUT_NS_ACCOUNTING
323 uint64_t const u64NsTs = RTTimeNanoTS();
324 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
325 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
326 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
327 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
328
329 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
330 ASMCompilerBarrier();
331 pVCpu->tm.s.fHalting = false;
332 pVCpu->tm.s.fUpdateStats = false;
333 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
334 pVCpu->tm.s.cPeriodsHalted++;
335 ASMCompilerBarrier();
336 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
337
338# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
339 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
340 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
341 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
342 if (cNsOtherNewDelta > 0)
343 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
344# endif
345 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
346 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
347#endif
348}
349
350
351/**
352 * Raise the timer force action flag and notify the dedicated timer EMT.
353 *
354 * @param pVM The cross context VM structure.
355 */
356DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
357{
358 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
359 AssertReturnVoid(idCpu < pVM->cCpus);
360 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
361
362 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
363 {
364 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
365 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
366#ifdef IN_RING3
367 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
368#endif
369 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
370 }
371}
372
373
374/**
375 * Schedule the queue which was changed.
376 */
377DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
378{
379 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
380 if (RT_SUCCESS_NP(rc))
381 {
382 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
383 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
384 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
385#ifdef VBOX_STRICT
386 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
387#endif
388 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
389 PDMCritSectLeave(pVM, &pQueue->TimerLock);
390 return;
391 }
392
393 TMTIMERSTATE enmState = pTimer->enmState;
394 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
395 tmScheduleNotify(pVM);
396}
397
398
399/**
400 * Try change the state to enmStateNew from enmStateOld
401 * and link the timer into the scheduling queue.
402 *
403 * @returns Success indicator.
404 * @param pTimer Timer in question.
405 * @param enmStateNew The new timer state.
406 * @param enmStateOld The old timer state.
407 */
408DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
409{
410 /*
411 * Attempt state change.
412 */
413 bool fRc;
414 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
415 return fRc;
416}
417
418
419/**
420 * Links the timer onto the scheduling queue.
421 *
422 * @param pQueueCC The current context queue (same as @a pQueue for
423 * ring-3).
424 * @param pQueue The shared queue data.
425 * @param pTimer The timer.
426 *
427 * @todo FIXME: Look into potential race with the thread running the queues
428 * and stuff.
429 */
430DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
431{
432 Assert(pTimer->idxScheduleNext == UINT32_MAX);
433 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
434 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
435
436 uint32_t idxHead;
437 do
438 {
439 idxHead = pQueue->idxSchedule;
440 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
441 pTimer->idxScheduleNext = idxHead;
442 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
443}
444
445
446/**
447 * Try change the state to enmStateNew from enmStateOld
448 * and link the timer into the scheduling queue.
449 *
450 * @returns Success indicator.
451 * @param pQueueCC The current context queue (same as @a pQueue for
452 * ring-3).
453 * @param pQueue The shared queue data.
454 * @param pTimer Timer in question.
455 * @param enmStateNew The new timer state.
456 * @param enmStateOld The old timer state.
457 */
458DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
459 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
460{
461 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
462 {
463 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
464 return true;
465 }
466 return false;
467}
468
469
470/**
471 * Links a timer into the active list of a timer queue.
472 *
473 * @param pVM The cross context VM structure.
474 * @param pQueueCC The current context queue (same as @a pQueue for
475 * ring-3).
476 * @param pQueue The shared queue data.
477 * @param pTimer The timer.
478 * @param u64Expire The timer expiration time.
479 *
480 * @remarks Called while owning the relevant queue lock.
481 */
482DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
483 PTMTIMER pTimer, uint64_t u64Expire)
484{
485 Assert(pTimer->idxNext == UINT32_MAX);
486 Assert(pTimer->idxPrev == UINT32_MAX);
487 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
488 RT_NOREF(pVM);
489
490 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
491 if (pCur)
492 {
493 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
494 {
495 if (pCur->u64Expire > u64Expire)
496 {
497 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
498 tmTimerSetNext(pQueueCC, pTimer, pCur);
499 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
500 if (pPrev)
501 tmTimerSetNext(pQueueCC, pPrev, pTimer);
502 else
503 {
504 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
505 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
506 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
507 }
508 tmTimerSetPrev(pQueueCC, pCur, pTimer);
509 return;
510 }
511 if (pCur->idxNext == UINT32_MAX)
512 {
513 tmTimerSetNext(pQueueCC, pCur, pTimer);
514 tmTimerSetPrev(pQueueCC, pTimer, pCur);
515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
516 return;
517 }
518 }
519 }
520 else
521 {
522 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
523 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
524 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
525 }
526}
527
528
529
530/**
531 * Schedules the given timer on the given queue.
532 *
533 * @param pVM The cross context VM structure.
534 * @param pQueueCC The current context queue (same as @a pQueue for
535 * ring-3).
536 * @param pQueue The shared queue data.
537 * @param pTimer The timer that needs scheduling.
538 *
539 * @remarks Called while owning the lock.
540 */
541DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
542{
543 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
544 RT_NOREF(pVM);
545
546 /*
547 * Processing.
548 */
549 unsigned cRetries = 2;
550 do
551 {
552 TMTIMERSTATE enmState = pTimer->enmState;
553 switch (enmState)
554 {
555 /*
556 * Reschedule timer (in the active list).
557 */
558 case TMTIMERSTATE_PENDING_RESCHEDULE:
559 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
560 break; /* retry */
561 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
562 RT_FALL_THRU();
563
564 /*
565 * Schedule timer (insert into the active list).
566 */
567 case TMTIMERSTATE_PENDING_SCHEDULE:
568 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
569 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
570 break; /* retry */
571 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
572 return;
573
574 /*
575 * Stop the timer in active list.
576 */
577 case TMTIMERSTATE_PENDING_STOP:
578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
579 break; /* retry */
580 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
581 RT_FALL_THRU();
582
583 /*
584 * Stop the timer (not on the active list).
585 */
586 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
587 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
588 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
589 break;
590 return;
591
592 /*
593 * The timer is pending destruction by TMR3TimerDestroy, our caller.
594 * Nothing to do here.
595 */
596 case TMTIMERSTATE_DESTROY:
597 break;
598
599 /*
600 * Postpone these until they get into the right state.
601 */
602 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
603 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
604 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
605 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
606 return;
607
608 /*
609 * None of these can be in the schedule.
610 */
611 case TMTIMERSTATE_FREE:
612 case TMTIMERSTATE_STOPPED:
613 case TMTIMERSTATE_ACTIVE:
614 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
615 case TMTIMERSTATE_EXPIRED_DELIVER:
616 default:
617 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
618 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
619 return;
620 }
621 } while (cRetries-- > 0);
622}
623
624
625/**
626 * Schedules the specified timer queue.
627 *
628 * @param pVM The cross context VM structure.
629 * @param pQueueCC The current context queue (same as @a pQueue for
630 * ring-3) data of the queue to schedule.
631 * @param pQueue The shared queue data of the queue to schedule.
632 *
633 * @remarks Called while owning the lock.
634 */
635void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
636{
637 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
638
639 /*
640 * Dequeue the scheduling list and iterate it.
641 */
642 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
643 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
644 while (idxNext != UINT32_MAX)
645 {
646 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
647
648 /*
649 * Unlink the head timer and take down the index of the next one.
650 */
651 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
652 idxNext = pTimer->idxScheduleNext;
653 pTimer->idxScheduleNext = UINT32_MAX;
654
655 /*
656 * Do the scheduling.
657 */
658 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
659 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
660 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
661 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
662 }
663 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
664}
665
666
667#ifdef VBOX_STRICT
668/**
669 * Checks that the timer queues are sane.
670 *
671 * @param pVM The cross context VM structure.
672 * @param pszWhere Caller location clue.
673 */
674void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
675{
676 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
677 {
678 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
679 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
680 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
681
682 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
683 if (RT_SUCCESS(rc))
684 {
685 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
686 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
687 {
688 /* Check the linking of the active lists. */
689 PTMTIMER pPrev = NULL;
690 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
691 pCur;
692 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
693 {
694 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
695 TMTIMERSTATE enmState = pCur->enmState;
696 switch (enmState)
697 {
698 case TMTIMERSTATE_ACTIVE:
699 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
700 || pCur->enmState != TMTIMERSTATE_ACTIVE,
701 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
702 break;
703 case TMTIMERSTATE_PENDING_STOP:
704 case TMTIMERSTATE_PENDING_RESCHEDULE:
705 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
706 break;
707 default:
708 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
709 break;
710 }
711 }
712
713# ifdef IN_RING3
714 /* Go thru all the timers and check that the active ones all are in the active lists. */
715 int const rcAllocLock = PDMCritSectRwTryEnterShared(pVM, &pQueue->AllocLock);
716 uint32_t idxTimer = pQueue->cTimersAlloc;
717 uint32_t cFree = 0;
718 while (idxTimer-- > 0)
719 {
720 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
721 TMTIMERSTATE const enmState = pTimer->enmState;
722 switch (enmState)
723 {
724 case TMTIMERSTATE_FREE:
725 cFree++;
726 break;
727
728 case TMTIMERSTATE_ACTIVE:
729 case TMTIMERSTATE_PENDING_STOP:
730 case TMTIMERSTATE_PENDING_RESCHEDULE:
731 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
732 {
733 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
734 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
735 while (pCurAct && pCurAct != pTimer)
736 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
737 Assert(pCurAct == pTimer);
738 break;
739 }
740
741 case TMTIMERSTATE_PENDING_SCHEDULE:
742 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
743 case TMTIMERSTATE_STOPPED:
744 case TMTIMERSTATE_EXPIRED_DELIVER:
745 {
746 Assert(pTimer->idxNext == UINT32_MAX);
747 Assert(pTimer->idxPrev == UINT32_MAX);
748 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
749 pCurAct;
750 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
751 {
752 Assert(pCurAct != pTimer);
753 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
754 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
755 }
756 break;
757 }
758
759 /* ignore */
760 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
761 break;
762
763 case TMTIMERSTATE_INVALID:
764 Assert(idxTimer == 0);
765 break;
766
767 /* shouldn't get here! */
768 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
769 case TMTIMERSTATE_DESTROY:
770 default:
771 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
772 break;
773 }
774
775 /* Check the handle value. */
776 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
777 {
778 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
779 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
780 }
781 }
782 if (RT_SUCCESS(rcAllocLock))
783 {
784 Assert(cFree == pQueue->cTimersFree);
785 PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
786 }
787 else
788 Assert(cFree >= pQueue->cTimersFree); /* Can be lower as the tmr3TimerCreate may run concurrent. */
789
790# endif /* IN_RING3 */
791
792 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
793 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
794 }
795 PDMCritSectLeave(pVM, &pQueue->TimerLock);
796 }
797 }
798}
799#endif /* !VBOX_STRICT */
800
801#ifdef VBOX_HIGH_RES_TIMERS_HACK
802
803/**
804 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
805 * EMT is polling.
806 *
807 * @returns See tmTimerPollInternal.
808 * @param pVM The cross context VM structure.
809 * @param u64Now Current virtual clock timestamp.
810 * @param u64Delta The delta to the next even in ticks of the
811 * virtual clock.
812 * @param pu64Delta Where to return the delta.
813 */
814DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
815{
816 Assert(!(u64Delta & RT_BIT_64(63)));
817
818 if (!pVM->tm.s.fVirtualWarpDrive)
819 {
820 *pu64Delta = u64Delta;
821 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
822 }
823
824 /*
825 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
826 */
827 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
828 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
829
830 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
831 u64GipTime -= u64Start; /* the start is GIP time. */
832 if (u64GipTime >= u64Delta)
833 {
834 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
835 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
836 }
837 else
838 {
839 u64Delta -= u64GipTime;
840 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
841 u64Delta += u64GipTime;
842 }
843 *pu64Delta = u64Delta;
844 u64GipTime += u64Start;
845 return u64GipTime;
846}
847
848
849/**
850 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
851 * than the one dedicated to timer work.
852 *
853 * @returns See tmTimerPollInternal.
854 * @param pVM The cross context VM structure.
855 * @param u64Now Current virtual clock timestamp.
856 * @param pu64Delta Where to return the delta.
857 */
858DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
859{
860 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
861 *pu64Delta = s_u64OtherRet;
862 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
863}
864
865
866/**
867 * Worker for tmTimerPollInternal.
868 *
869 * @returns See tmTimerPollInternal.
870 * @param pVM The cross context VM structure.
871 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
872 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
873 * timer EMT.
874 * @param u64Now Current virtual clock timestamp.
875 * @param pu64Delta Where to return the delta.
876 * @param pCounter The statistics counter to update.
877 */
878DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
879 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
880{
881 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
882 if (pVCpuDst != pVCpu)
883 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
884 *pu64Delta = 0;
885 return 0;
886}
887
888
889/**
890 * Common worker for TMTimerPollGIP and TMTimerPoll.
891 *
892 * This function is called before FFs are checked in the inner execution EM loops.
893 *
894 * @returns The GIP timestamp of the next event.
895 * 0 if the next event has already expired.
896 *
897 * @param pVM The cross context VM structure.
898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
899 * @param pu64Delta Where to store the delta.
900 * @param pu64Now Where to store the current time. Optional.
901 *
902 * @thread The emulation thread.
903 *
904 * @remarks GIP uses ns ticks.
905 */
906DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta, uint64_t *pu64Now)
907{
908 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
909 AssertReturn(idCpu < pVM->cCpus, 0);
910 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
911
912 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
913 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
914 if (pu64Now)
915 *pu64Now = u64Now;
916
917 /*
918 * Return straight away if the timer FF is already set ...
919 */
920 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
921 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
922
923 /*
924 * ... or if timers are being run.
925 */
926 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
927 {
928 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
929 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
930 }
931
932 /*
933 * Check for TMCLOCK_VIRTUAL expiration.
934 */
935 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
936 const int64_t i64Delta1 = u64Expire1 - u64Now;
937 if (i64Delta1 <= 0)
938 {
939 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
940 {
941 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
942 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
943 }
944 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
945 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
946 }
947
948 /*
949 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
950 * This isn't quite as straight forward if in a catch-up, not only do
951 * we have to adjust the 'now' but when have to adjust the delta as well.
952 */
953
954 /*
955 * Optimistic lockless approach.
956 */
957 uint64_t u64VirtualSyncNow;
958 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
959 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
960 {
961 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
962 {
963 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
964 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
965 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
966 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
967 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
968 {
969 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
970 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
971 if (i64Delta2 > 0)
972 {
973 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
974 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
975
976 if (pVCpu == pVCpuDst)
977 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
978 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
979 }
980
981 if ( !pVM->tm.s.fRunningQueues
982 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
983 {
984 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
985 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
986 }
987
988 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
989 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
990 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
991 }
992 }
993 }
994 else
995 {
996 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
997 LogFlow(("TMTimerPoll: stopped\n"));
998 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
999 }
1000
1001 /*
1002 * Complicated lockless approach.
1003 */
1004 uint64_t off;
1005 uint32_t u32Pct = 0;
1006 bool fCatchUp;
1007 int cOuterTries = 42;
1008 for (;; cOuterTries--)
1009 {
1010 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
1011 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
1012 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1013 if (fCatchUp)
1014 {
1015 /* No changes allowed, try get a consistent set of parameters. */
1016 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
1017 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
1018 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1019 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
1020 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1021 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1022 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1023 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1024 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1025 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1026 || cOuterTries <= 0)
1027 {
1028 uint64_t u64Delta = u64Now - u64Prev;
1029 if (RT_LIKELY(!(u64Delta >> 32)))
1030 {
1031 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1032 if (off > u64Sub + offGivenUp)
1033 off -= u64Sub;
1034 else /* we've completely caught up. */
1035 off = offGivenUp;
1036 }
1037 else
1038 /* More than 4 seconds since last time (or negative), ignore it. */
1039 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1040
1041 /* Check that we're still running and in catch up. */
1042 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1043 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1044 break;
1045 }
1046 }
1047 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1048 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1049 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1050 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1051 break; /* Got an consistent offset */
1052
1053 /* Repeat the initial checks before iterating. */
1054 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1055 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1056 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1057 {
1058 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1059 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1060 }
1061 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1062 {
1063 LogFlow(("TMTimerPoll: stopped\n"));
1064 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1065 }
1066 if (cOuterTries <= 0)
1067 break; /* that's enough */
1068 }
1069 if (cOuterTries <= 0)
1070 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1071 u64VirtualSyncNow = u64Now - off;
1072
1073 /* Calc delta and see if we've got a virtual sync hit. */
1074 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1075 if (i64Delta2 <= 0)
1076 {
1077 if ( !pVM->tm.s.fRunningQueues
1078 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1079 {
1080 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1081 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1082 }
1083 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1084 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1085 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1086 }
1087
1088 /*
1089 * Return the time left to the next event.
1090 */
1091 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1092 if (pVCpu == pVCpuDst)
1093 {
1094 if (fCatchUp)
1095 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1096 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1097 }
1098 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1099}
1100
1101
1102/**
1103 * Set FF if we've passed the next virtual event.
1104 *
1105 * This function is called before FFs are checked in the inner execution EM loops.
1106 *
1107 * @returns true if timers are pending, false if not.
1108 *
1109 * @param pVM The cross context VM structure.
1110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1111 * @thread The emulation thread.
1112 */
1113VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1114{
1115 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1116 uint64_t off = 0;
1117 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1118 return off == 0;
1119}
1120
1121
1122/**
1123 * Set FF if we've passed the next virtual event and return virtual time as MS.
1124 *
1125 * This function is called before FFs are checked in the inner execution EM loops.
1126 *
1127 * This is used by the IEM recompiler for polling timers while also providing a
1128 * free time source for recent use tracking and such.
1129 *
1130 * @returns Nanoseconds till the next event, 0 if event already pending.
1131 *
1132 * @param pVM The cross context VM structure.
1133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1134 * @param pnsNow Where to return the current virtual time in nanoseconds.
1135 * @thread The emulation thread.
1136 */
1137VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)
1138{
1139 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1140 uint64_t offDelta = 0;
1141 tmTimerPollInternal(pVM, pVCpu, &offDelta, pnsNow);
1142 return offDelta;
1143}
1144
1145
1146/**
1147 * Set FF if we've passed the next virtual event.
1148 *
1149 * This function is called before FFs are checked in the inner execution EM loops.
1150 *
1151 * @param pVM The cross context VM structure.
1152 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1153 * @thread The emulation thread.
1154 */
1155VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1156{
1157 uint64_t off;
1158 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1159}
1160
1161
1162/**
1163 * Set FF if we've passed the next virtual event.
1164 *
1165 * This function is called before FFs are checked in the inner execution EM loops.
1166 *
1167 * @returns The GIP timestamp of the next event.
1168 * 0 if the next event has already expired.
1169 * @param pVM The cross context VM structure.
1170 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1171 * @param pu64Delta Where to store the delta.
1172 * @thread The emulation thread.
1173 */
1174VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1175{
1176 return tmTimerPollInternal(pVM, pVCpu, pu64Delta, NULL);
1177}
1178
1179#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1180
1181/**
1182 * Locks the timer clock.
1183 *
1184 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1185 * if the clock does not have a lock.
1186 * @param pVM The cross context VM structure.
1187 * @param hTimer Timer handle as returned by one of the create functions.
1188 * @param rcBusy What to return in ring-0 and raw-mode context if the
1189 * lock is busy. Pass VINF_SUCCESS to acquired the
1190 * critical section thru a ring-3 call if necessary.
1191 *
1192 * @remarks Currently only supported on timers using the virtual sync clock.
1193 */
1194VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1195{
1196 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1197 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1198 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1199}
1200
1201
1202/**
1203 * Unlocks a timer clock locked by TMTimerLock.
1204 *
1205 * @param pVM The cross context VM structure.
1206 * @param hTimer Timer handle as returned by one of the create functions.
1207 */
1208VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1209{
1210 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1211 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1212 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1213}
1214
1215
1216/**
1217 * Checks if the current thread owns the timer clock lock.
1218 *
1219 * @returns @c true if its the owner, @c false if not.
1220 * @param pVM The cross context VM structure.
1221 * @param hTimer Timer handle as returned by one of the create functions.
1222 */
1223VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1224{
1225 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1226 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1227 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1228}
1229
1230
1231/**
1232 * Optimized TMTimerSet code path for starting an inactive timer.
1233 *
1234 * @returns VBox status code.
1235 *
1236 * @param pVM The cross context VM structure.
1237 * @param pTimer The timer handle.
1238 * @param u64Expire The new expire time.
1239 * @param pQueue Pointer to the shared timer queue data.
1240 * @param idxQueue The queue index.
1241 */
1242static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1243{
1244 Assert(pTimer->idxPrev == UINT32_MAX);
1245 Assert(pTimer->idxNext == UINT32_MAX);
1246 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1247
1248 /*
1249 * Calculate and set the expiration time.
1250 */
1251 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1252 {
1253 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1254 AssertMsgStmt(u64Expire >= u64Last,
1255 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1256 u64Expire = u64Last);
1257 }
1258 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1259 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1260
1261 /*
1262 * Link the timer into the active list.
1263 */
1264 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1265
1266 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1267 return VINF_SUCCESS;
1268}
1269
1270
1271/**
1272 * TMTimerSet for the virtual sync timer queue.
1273 *
1274 * This employs a greatly simplified state machine by always acquiring the
1275 * queue lock and bypassing the scheduling list.
1276 *
1277 * @returns VBox status code
1278 * @param pVM The cross context VM structure.
1279 * @param pTimer The timer handle.
1280 * @param u64Expire The expiration time.
1281 */
1282static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1283{
1284 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1285 VM_ASSERT_EMT(pVM);
1286 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1287 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1288 AssertRCReturn(rc, rc);
1289
1290 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1291 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1292 TMTIMERSTATE const enmState = pTimer->enmState;
1293 switch (enmState)
1294 {
1295 case TMTIMERSTATE_EXPIRED_DELIVER:
1296 case TMTIMERSTATE_STOPPED:
1297 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1298 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1299 else
1300 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1301
1302 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1303 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1304 pTimer->u64Expire = u64Expire;
1305 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1306 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1307 rc = VINF_SUCCESS;
1308 break;
1309
1310 case TMTIMERSTATE_ACTIVE:
1311 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1312 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1313 pTimer->u64Expire = u64Expire;
1314 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1315 rc = VINF_SUCCESS;
1316 break;
1317
1318 case TMTIMERSTATE_PENDING_RESCHEDULE:
1319 case TMTIMERSTATE_PENDING_STOP:
1320 case TMTIMERSTATE_PENDING_SCHEDULE:
1321 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1322 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1323 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1324 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1325 case TMTIMERSTATE_DESTROY:
1326 case TMTIMERSTATE_FREE:
1327 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1328 rc = VERR_TM_INVALID_STATE;
1329 break;
1330
1331 default:
1332 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1333 rc = VERR_TM_UNKNOWN_STATE;
1334 break;
1335 }
1336
1337 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1338 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1339 return rc;
1340}
1341
1342
1343/**
1344 * Arm a timer with a (new) expire time.
1345 *
1346 * @returns VBox status code.
1347 * @param pVM The cross context VM structure.
1348 * @param hTimer Timer handle as returned by one of the create functions.
1349 * @param u64Expire New expire time.
1350 */
1351VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1352{
1353 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1354 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1355
1356 /* Treat virtual sync timers specially. */
1357 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1358 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1359
1360 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1361 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1362
1363 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1364
1365#ifdef VBOX_WITH_STATISTICS
1366 /*
1367 * Gather optimization info.
1368 */
1369 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1370 TMTIMERSTATE enmOrgState = pTimer->enmState;
1371 switch (enmOrgState)
1372 {
1373 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1374 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1375 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1376 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1377 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1378 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1379 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1380 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1381 }
1382#endif
1383
1384#if 1
1385 /*
1386 * The most common case is setting the timer again during the callback.
1387 * The second most common case is starting a timer at some other time.
1388 */
1389 TMTIMERSTATE enmState1 = pTimer->enmState;
1390 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1391 || ( enmState1 == TMTIMERSTATE_STOPPED
1392 && pTimer->pCritSect))
1393 {
1394 /* Try take the TM lock and check the state again. */
1395 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1396 if (RT_SUCCESS_NP(rc))
1397 {
1398 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1399 {
1400 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1401 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1402 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1403 return VINF_SUCCESS;
1404 }
1405 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1406 }
1407 }
1408#endif
1409
1410 /*
1411 * Unoptimized code path.
1412 */
1413 int cRetries = 1000;
1414 do
1415 {
1416 /*
1417 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1418 */
1419 TMTIMERSTATE enmState = pTimer->enmState;
1420 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1421 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1422 switch (enmState)
1423 {
1424 case TMTIMERSTATE_EXPIRED_DELIVER:
1425 case TMTIMERSTATE_STOPPED:
1426 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1427 {
1428 Assert(pTimer->idxPrev == UINT32_MAX);
1429 Assert(pTimer->idxNext == UINT32_MAX);
1430 pTimer->u64Expire = u64Expire;
1431 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1432 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1433 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1434 return VINF_SUCCESS;
1435 }
1436 break;
1437
1438 case TMTIMERSTATE_PENDING_SCHEDULE:
1439 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1440 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1441 {
1442 pTimer->u64Expire = u64Expire;
1443 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1444 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1445 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1446 return VINF_SUCCESS;
1447 }
1448 break;
1449
1450
1451 case TMTIMERSTATE_ACTIVE:
1452 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1453 {
1454 pTimer->u64Expire = u64Expire;
1455 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1456 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1457 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1458 return VINF_SUCCESS;
1459 }
1460 break;
1461
1462 case TMTIMERSTATE_PENDING_RESCHEDULE:
1463 case TMTIMERSTATE_PENDING_STOP:
1464 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1465 {
1466 pTimer->u64Expire = u64Expire;
1467 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1468 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1469 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1470 return VINF_SUCCESS;
1471 }
1472 break;
1473
1474
1475 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1476 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1477 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1478#ifdef IN_RING3
1479 if (!RTThreadYield())
1480 RTThreadSleep(1);
1481#else
1482/** @todo call host context and yield after a couple of iterations */
1483#endif
1484 break;
1485
1486 /*
1487 * Invalid states.
1488 */
1489 case TMTIMERSTATE_DESTROY:
1490 case TMTIMERSTATE_FREE:
1491 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1492 return VERR_TM_INVALID_STATE;
1493 default:
1494 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1495 return VERR_TM_UNKNOWN_STATE;
1496 }
1497 } while (cRetries-- > 0);
1498
1499 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1500 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1501 return VERR_TM_TIMER_UNSTABLE_STATE;
1502}
1503
1504
1505/**
1506 * Return the current time for the specified clock, setting pu64Now if not NULL.
1507 *
1508 * @returns Current time.
1509 * @param pVM The cross context VM structure.
1510 * @param enmClock The clock to query.
1511 * @param pu64Now Optional pointer where to store the return time
1512 */
1513DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1514{
1515 uint64_t u64Now;
1516 switch (enmClock)
1517 {
1518 case TMCLOCK_VIRTUAL_SYNC:
1519 u64Now = TMVirtualSyncGet(pVM);
1520 break;
1521 case TMCLOCK_VIRTUAL:
1522 u64Now = TMVirtualGet(pVM);
1523 break;
1524 case TMCLOCK_REAL:
1525 u64Now = TMRealGet(pVM);
1526 break;
1527 default:
1528 AssertFatalMsgFailed(("%d\n", enmClock));
1529 }
1530
1531 if (pu64Now)
1532 *pu64Now = u64Now;
1533 return u64Now;
1534}
1535
1536
1537/**
1538 * Optimized TMTimerSetRelative code path.
1539 *
1540 * @returns VBox status code.
1541 *
1542 * @param pVM The cross context VM structure.
1543 * @param pTimer The timer handle.
1544 * @param cTicksToNext Clock ticks until the next time expiration.
1545 * @param pu64Now Where to return the current time stamp used.
1546 * Optional.
1547 * @param pQueueCC The context specific queue data (same as @a pQueue
1548 * for ring-3).
1549 * @param pQueue The shared queue data.
1550 */
1551static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1552 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1553{
1554 Assert(pTimer->idxPrev == UINT32_MAX);
1555 Assert(pTimer->idxNext == UINT32_MAX);
1556 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1557
1558 /*
1559 * Calculate and set the expiration time.
1560 */
1561 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1562 pTimer->u64Expire = u64Expire;
1563 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1564
1565 /*
1566 * Link the timer into the active list.
1567 */
1568 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1569 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1570
1571 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1572 return VINF_SUCCESS;
1573}
1574
1575
1576/**
1577 * TMTimerSetRelative for the virtual sync timer queue.
1578 *
1579 * This employs a greatly simplified state machine by always acquiring the
1580 * queue lock and bypassing the scheduling list.
1581 *
1582 * @returns VBox status code
1583 * @param pVM The cross context VM structure.
1584 * @param pTimer The timer to (re-)arm.
1585 * @param cTicksToNext Clock ticks until the next time expiration.
1586 * @param pu64Now Where to return the current time stamp used.
1587 * Optional.
1588 */
1589static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1590{
1591 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1592 VM_ASSERT_EMT(pVM);
1593 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1594 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1595 AssertRCReturn(rc, rc);
1596
1597 /* Calculate the expiration tick. */
1598 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1599 if (pu64Now)
1600 *pu64Now = u64Expire;
1601 u64Expire += cTicksToNext;
1602
1603 /* Update the timer. */
1604 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1605 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1606 TMTIMERSTATE const enmState = pTimer->enmState;
1607 switch (enmState)
1608 {
1609 case TMTIMERSTATE_EXPIRED_DELIVER:
1610 case TMTIMERSTATE_STOPPED:
1611 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1612 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1613 else
1614 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1615 pTimer->u64Expire = u64Expire;
1616 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1617 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1618 rc = VINF_SUCCESS;
1619 break;
1620
1621 case TMTIMERSTATE_ACTIVE:
1622 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1623 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1624 pTimer->u64Expire = u64Expire;
1625 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1626 rc = VINF_SUCCESS;
1627 break;
1628
1629 case TMTIMERSTATE_PENDING_RESCHEDULE:
1630 case TMTIMERSTATE_PENDING_STOP:
1631 case TMTIMERSTATE_PENDING_SCHEDULE:
1632 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1633 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1634 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1635 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1636 case TMTIMERSTATE_DESTROY:
1637 case TMTIMERSTATE_FREE:
1638 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1639 rc = VERR_TM_INVALID_STATE;
1640 break;
1641
1642 default:
1643 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1644 rc = VERR_TM_UNKNOWN_STATE;
1645 break;
1646 }
1647
1648 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1649 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1650 return rc;
1651}
1652
1653
1654/**
1655 * Arm a timer with a expire time relative to the current time.
1656 *
1657 * @returns VBox status code.
1658 * @param pVM The cross context VM structure.
1659 * @param pTimer The timer to arm.
1660 * @param cTicksToNext Clock ticks until the next time expiration.
1661 * @param pu64Now Where to return the current time stamp used.
1662 * Optional.
1663 * @param pQueueCC The context specific queue data (same as @a pQueue
1664 * for ring-3).
1665 * @param pQueue The shared queue data.
1666 */
1667static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1668 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1669{
1670 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1671
1672 /* Treat virtual sync timers specially. */
1673 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1674 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1675
1676 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1677 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1678
1679 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1680
1681#ifdef VBOX_WITH_STATISTICS
1682 /*
1683 * Gather optimization info.
1684 */
1685 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1686 TMTIMERSTATE enmOrgState = pTimer->enmState;
1687 switch (enmOrgState)
1688 {
1689 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1690 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1691 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1692 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1693 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1694 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1695 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1696 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1697 }
1698#endif
1699
1700 /*
1701 * Try to take the TM lock and optimize the common cases.
1702 *
1703 * With the TM lock we can safely make optimizations like immediate
1704 * scheduling and we can also be 100% sure that we're not racing the
1705 * running of the timer queues. As an additional restraint we require the
1706 * timer to have a critical section associated with to be 100% there aren't
1707 * concurrent operations on the timer. (This latter isn't necessary any
1708 * longer as this isn't supported for any timers, critsect or not.)
1709 *
1710 * Note! Lock ordering doesn't apply when we only _try_ to
1711 * get the innermost locks.
1712 */
1713 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1714#if 1
1715 if ( fOwnTMLock
1716 && pTimer->pCritSect)
1717 {
1718 TMTIMERSTATE enmState = pTimer->enmState;
1719 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1720 || enmState == TMTIMERSTATE_STOPPED)
1721 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1722 {
1723 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1724 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1725 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1726 return VINF_SUCCESS;
1727 }
1728
1729 /* Optimize other states when it becomes necessary. */
1730 }
1731#endif
1732
1733 /*
1734 * Unoptimized path.
1735 */
1736 int rc;
1737 for (int cRetries = 1000; ; cRetries--)
1738 {
1739 /*
1740 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1741 */
1742 TMTIMERSTATE enmState = pTimer->enmState;
1743 switch (enmState)
1744 {
1745 case TMTIMERSTATE_STOPPED:
1746 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1747 {
1748 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1749 * Figure a safe way of activating this timer while the queue is
1750 * being run.
1751 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1752 * re-starting the timer in response to a initial_count write.) */
1753 }
1754 RT_FALL_THRU();
1755 case TMTIMERSTATE_EXPIRED_DELIVER:
1756 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1757 {
1758 Assert(pTimer->idxPrev == UINT32_MAX);
1759 Assert(pTimer->idxNext == UINT32_MAX);
1760 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1761 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1762 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1763 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1764 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1765 rc = VINF_SUCCESS;
1766 break;
1767 }
1768 rc = VERR_TRY_AGAIN;
1769 break;
1770
1771 case TMTIMERSTATE_PENDING_SCHEDULE:
1772 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1773 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1774 {
1775 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1776 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1777 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1778 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1779 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1780 rc = VINF_SUCCESS;
1781 break;
1782 }
1783 rc = VERR_TRY_AGAIN;
1784 break;
1785
1786
1787 case TMTIMERSTATE_ACTIVE:
1788 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1789 {
1790 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1791 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1792 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1793 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1794 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1795 rc = VINF_SUCCESS;
1796 break;
1797 }
1798 rc = VERR_TRY_AGAIN;
1799 break;
1800
1801 case TMTIMERSTATE_PENDING_RESCHEDULE:
1802 case TMTIMERSTATE_PENDING_STOP:
1803 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1804 {
1805 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1806 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1807 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1808 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1809 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1810 rc = VINF_SUCCESS;
1811 break;
1812 }
1813 rc = VERR_TRY_AGAIN;
1814 break;
1815
1816
1817 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1818 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1819 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1820#ifdef IN_RING3
1821 if (!RTThreadYield())
1822 RTThreadSleep(1);
1823#else
1824/** @todo call host context and yield after a couple of iterations */
1825#endif
1826 rc = VERR_TRY_AGAIN;
1827 break;
1828
1829 /*
1830 * Invalid states.
1831 */
1832 case TMTIMERSTATE_DESTROY:
1833 case TMTIMERSTATE_FREE:
1834 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1835 rc = VERR_TM_INVALID_STATE;
1836 break;
1837
1838 default:
1839 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1840 rc = VERR_TM_UNKNOWN_STATE;
1841 break;
1842 }
1843
1844 /* switch + loop is tedious to break out of. */
1845 if (rc == VINF_SUCCESS)
1846 break;
1847
1848 if (rc != VERR_TRY_AGAIN)
1849 {
1850 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1851 break;
1852 }
1853 if (cRetries <= 0)
1854 {
1855 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1856 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1857 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1858 break;
1859 }
1860
1861 /*
1862 * Retry to gain locks.
1863 */
1864 if (!fOwnTMLock)
1865 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1866
1867 } /* for (;;) */
1868
1869 /*
1870 * Clean up and return.
1871 */
1872 if (fOwnTMLock)
1873 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1874
1875 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1876 return rc;
1877}
1878
1879
1880/**
1881 * Arm a timer with a expire time relative to the current time.
1882 *
1883 * @returns VBox status code.
1884 * @param pVM The cross context VM structure.
1885 * @param hTimer Timer handle as returned by one of the create functions.
1886 * @param cTicksToNext Clock ticks until the next time expiration.
1887 * @param pu64Now Where to return the current time stamp used.
1888 * Optional.
1889 */
1890VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1891{
1892 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1893 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1894}
1895
1896
1897/**
1898 * Drops a hint about the frequency of the timer.
1899 *
1900 * This is used by TM and the VMM to calculate how often guest execution needs
1901 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1902 *
1903 * @returns VBox status code.
1904 * @param pVM The cross context VM structure.
1905 * @param hTimer Timer handle as returned by one of the create functions.
1906 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1907 *
1908 * @remarks We're using an integer hertz value here since anything above 1 HZ
1909 * is not going to be any trouble satisfying scheduling wise. The
1910 * range where it makes sense is >= 100 HZ.
1911 */
1912VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1913{
1914 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1915 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1916
1917 uint32_t const uHzOldHint = pTimer->uHzHint;
1918 pTimer->uHzHint = uHzHint;
1919
1920 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1921 if ( uHzHint > uMaxHzHint
1922 || uHzOldHint >= uMaxHzHint)
1923 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1924
1925 return VINF_SUCCESS;
1926}
1927
1928
1929/**
1930 * TMTimerStop for the virtual sync timer queue.
1931 *
1932 * This employs a greatly simplified state machine by always acquiring the
1933 * queue lock and bypassing the scheduling list.
1934 *
1935 * @returns VBox status code
1936 * @param pVM The cross context VM structure.
1937 * @param pTimer The timer handle.
1938 */
1939static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1940{
1941 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1942 VM_ASSERT_EMT(pVM);
1943 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1944 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1945 AssertRCReturn(rc, rc);
1946
1947 /* Reset the HZ hint. */
1948 uint32_t uOldHzHint = pTimer->uHzHint;
1949 if (uOldHzHint)
1950 {
1951 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1952 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1953 pTimer->uHzHint = 0;
1954 }
1955
1956 /* Update the timer state. */
1957 TMTIMERSTATE const enmState = pTimer->enmState;
1958 switch (enmState)
1959 {
1960 case TMTIMERSTATE_ACTIVE:
1961 {
1962 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1963 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1964 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1965 rc = VINF_SUCCESS;
1966 break;
1967 }
1968
1969 case TMTIMERSTATE_EXPIRED_DELIVER:
1970 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1971 rc = VINF_SUCCESS;
1972 break;
1973
1974 case TMTIMERSTATE_STOPPED:
1975 rc = VINF_SUCCESS;
1976 break;
1977
1978 case TMTIMERSTATE_PENDING_RESCHEDULE:
1979 case TMTIMERSTATE_PENDING_STOP:
1980 case TMTIMERSTATE_PENDING_SCHEDULE:
1981 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1982 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1983 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1984 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1985 case TMTIMERSTATE_DESTROY:
1986 case TMTIMERSTATE_FREE:
1987 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1988 rc = VERR_TM_INVALID_STATE;
1989 break;
1990
1991 default:
1992 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1993 rc = VERR_TM_UNKNOWN_STATE;
1994 break;
1995 }
1996
1997 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1998 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1999 return rc;
2000}
2001
2002
2003/**
2004 * Stop the timer.
2005 * Use TMR3TimerArm() to "un-stop" the timer.
2006 *
2007 * @returns VBox status code.
2008 * @param pVM The cross context VM structure.
2009 * @param hTimer Timer handle as returned by one of the create functions.
2010 */
2011VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
2012{
2013 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2014 STAM_COUNTER_INC(&pTimer->StatStop);
2015
2016 /* Treat virtual sync timers specially. */
2017 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
2018 return tmTimerVirtualSyncStop(pVM, pTimer);
2019
2020 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2021 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2022
2023 /*
2024 * Reset the HZ hint.
2025 */
2026 uint32_t const uOldHzHint = pTimer->uHzHint;
2027 if (uOldHzHint)
2028 {
2029 if (uOldHzHint >= pQueue->uMaxHzHint)
2030 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
2031 pTimer->uHzHint = 0;
2032 }
2033
2034 /** @todo see if this function needs optimizing. */
2035 int cRetries = 1000;
2036 do
2037 {
2038 /*
2039 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2040 */
2041 TMTIMERSTATE enmState = pTimer->enmState;
2042 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2043 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2044 switch (enmState)
2045 {
2046 case TMTIMERSTATE_EXPIRED_DELIVER:
2047 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2048 return VERR_INVALID_PARAMETER;
2049
2050 case TMTIMERSTATE_STOPPED:
2051 case TMTIMERSTATE_PENDING_STOP:
2052 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2053 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2054 return VINF_SUCCESS;
2055
2056 case TMTIMERSTATE_PENDING_SCHEDULE:
2057 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2058 {
2059 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2060 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2061 return VINF_SUCCESS;
2062 }
2063 break;
2064
2065 case TMTIMERSTATE_PENDING_RESCHEDULE:
2066 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2067 {
2068 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2069 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2070 return VINF_SUCCESS;
2071 }
2072 break;
2073
2074 case TMTIMERSTATE_ACTIVE:
2075 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2076 {
2077 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2078 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2079 return VINF_SUCCESS;
2080 }
2081 break;
2082
2083 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2084 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2085 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2086#ifdef IN_RING3
2087 if (!RTThreadYield())
2088 RTThreadSleep(1);
2089#else
2090/** @todo call host and yield cpu after a while. */
2091#endif
2092 break;
2093
2094 /*
2095 * Invalid states.
2096 */
2097 case TMTIMERSTATE_DESTROY:
2098 case TMTIMERSTATE_FREE:
2099 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2100 return VERR_TM_INVALID_STATE;
2101 default:
2102 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2103 return VERR_TM_UNKNOWN_STATE;
2104 }
2105 } while (cRetries-- > 0);
2106
2107 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2108 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2109 return VERR_TM_TIMER_UNSTABLE_STATE;
2110}
2111
2112
2113/**
2114 * Get the current clock time.
2115 * Handy for calculating the new expire time.
2116 *
2117 * @returns Current clock time.
2118 * @param pVM The cross context VM structure.
2119 * @param hTimer Timer handle as returned by one of the create functions.
2120 */
2121VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2122{
2123 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2124 STAM_COUNTER_INC(&pTimer->StatGet);
2125
2126 uint64_t u64;
2127 switch (pQueue->enmClock)
2128 {
2129 case TMCLOCK_VIRTUAL:
2130 u64 = TMVirtualGet(pVM);
2131 break;
2132 case TMCLOCK_VIRTUAL_SYNC:
2133 u64 = TMVirtualSyncGet(pVM);
2134 break;
2135 case TMCLOCK_REAL:
2136 u64 = TMRealGet(pVM);
2137 break;
2138 default:
2139 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2140 return UINT64_MAX;
2141 }
2142 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2143 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2144 return u64;
2145}
2146
2147
2148/**
2149 * Get the frequency of the timer clock.
2150 *
2151 * @returns Clock frequency (as Hz of course).
2152 * @param pVM The cross context VM structure.
2153 * @param hTimer Timer handle as returned by one of the create functions.
2154 */
2155VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2156{
2157 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2158 switch (pQueue->enmClock)
2159 {
2160 case TMCLOCK_VIRTUAL:
2161 case TMCLOCK_VIRTUAL_SYNC:
2162 return TMCLOCK_FREQ_VIRTUAL;
2163
2164 case TMCLOCK_REAL:
2165 return TMCLOCK_FREQ_REAL;
2166
2167 default:
2168 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2169 return 0;
2170 }
2171}
2172
2173
2174/**
2175 * Get the expire time of the timer.
2176 * Only valid for active timers.
2177 *
2178 * @returns Expire time of the timer.
2179 * @param pVM The cross context VM structure.
2180 * @param hTimer Timer handle as returned by one of the create functions.
2181 */
2182VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2183{
2184 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2185 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2186 int cRetries = 1000;
2187 do
2188 {
2189 TMTIMERSTATE enmState = pTimer->enmState;
2190 switch (enmState)
2191 {
2192 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2193 case TMTIMERSTATE_EXPIRED_DELIVER:
2194 case TMTIMERSTATE_STOPPED:
2195 case TMTIMERSTATE_PENDING_STOP:
2196 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2197 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2198 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2199 return UINT64_MAX;
2200
2201 case TMTIMERSTATE_ACTIVE:
2202 case TMTIMERSTATE_PENDING_RESCHEDULE:
2203 case TMTIMERSTATE_PENDING_SCHEDULE:
2204 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2205 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2206 return pTimer->u64Expire;
2207
2208 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2209 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2210#ifdef IN_RING3
2211 if (!RTThreadYield())
2212 RTThreadSleep(1);
2213#endif
2214 break;
2215
2216 /*
2217 * Invalid states.
2218 */
2219 case TMTIMERSTATE_DESTROY:
2220 case TMTIMERSTATE_FREE:
2221 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2222 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2223 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2224 return UINT64_MAX;
2225 default:
2226 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2227 return UINT64_MAX;
2228 }
2229 } while (cRetries-- > 0);
2230
2231 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2232 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2233 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2234 return UINT64_MAX;
2235}
2236
2237
2238/**
2239 * Checks if a timer is active or not.
2240 *
2241 * @returns True if active.
2242 * @returns False if not active.
2243 * @param pVM The cross context VM structure.
2244 * @param hTimer Timer handle as returned by one of the create functions.
2245 */
2246VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2247{
2248 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2249 TMTIMERSTATE enmState = pTimer->enmState;
2250 switch (enmState)
2251 {
2252 case TMTIMERSTATE_STOPPED:
2253 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2254 case TMTIMERSTATE_EXPIRED_DELIVER:
2255 case TMTIMERSTATE_PENDING_STOP:
2256 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2257 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2258 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2259 return false;
2260
2261 case TMTIMERSTATE_ACTIVE:
2262 case TMTIMERSTATE_PENDING_RESCHEDULE:
2263 case TMTIMERSTATE_PENDING_SCHEDULE:
2264 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2265 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2266 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2267 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2268 return true;
2269
2270 /*
2271 * Invalid states.
2272 */
2273 case TMTIMERSTATE_DESTROY:
2274 case TMTIMERSTATE_FREE:
2275 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2276 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2277 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2278 return false;
2279 default:
2280 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2281 return false;
2282 }
2283}
2284
2285
2286/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2287
2288
2289/**
2290 * Arm a timer with a (new) expire time relative to current time.
2291 *
2292 * @returns VBox status code.
2293 * @param pVM The cross context VM structure.
2294 * @param hTimer Timer handle as returned by one of the create functions.
2295 * @param cMilliesToNext Number of milliseconds to the next tick.
2296 */
2297VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2298{
2299 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2300 switch (pQueue->enmClock)
2301 {
2302 case TMCLOCK_VIRTUAL:
2303 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2304 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2305
2306 case TMCLOCK_VIRTUAL_SYNC:
2307 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2308 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2309
2310 case TMCLOCK_REAL:
2311 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2312 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2313
2314 default:
2315 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2316 return VERR_TM_TIMER_BAD_CLOCK;
2317 }
2318}
2319
2320
2321/**
2322 * Arm a timer with a (new) expire time relative to current time.
2323 *
2324 * @returns VBox status code.
2325 * @param pVM The cross context VM structure.
2326 * @param hTimer Timer handle as returned by one of the create functions.
2327 * @param cMicrosToNext Number of microseconds to the next tick.
2328 */
2329VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2330{
2331 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2332 switch (pQueue->enmClock)
2333 {
2334 case TMCLOCK_VIRTUAL:
2335 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2336 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2337
2338 case TMCLOCK_VIRTUAL_SYNC:
2339 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2340 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2341
2342 case TMCLOCK_REAL:
2343 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2344 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2345
2346 default:
2347 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2348 return VERR_TM_TIMER_BAD_CLOCK;
2349 }
2350}
2351
2352
2353/**
2354 * Arm a timer with a (new) expire time relative to current time.
2355 *
2356 * @returns VBox status code.
2357 * @param pVM The cross context VM structure.
2358 * @param hTimer Timer handle as returned by one of the create functions.
2359 * @param cNanosToNext Number of nanoseconds to the next tick.
2360 */
2361VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2362{
2363 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2364 switch (pQueue->enmClock)
2365 {
2366 case TMCLOCK_VIRTUAL:
2367 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2368 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2369
2370 case TMCLOCK_VIRTUAL_SYNC:
2371 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2372 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2373
2374 case TMCLOCK_REAL:
2375 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2376 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2377
2378 default:
2379 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2380 return VERR_TM_TIMER_BAD_CLOCK;
2381 }
2382}
2383
2384
2385/**
2386 * Get the current clock time as nanoseconds.
2387 *
2388 * @returns The timer clock as nanoseconds.
2389 * @param pVM The cross context VM structure.
2390 * @param hTimer Timer handle as returned by one of the create functions.
2391 */
2392VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2393{
2394 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2395}
2396
2397
2398/**
2399 * Get the current clock time as microseconds.
2400 *
2401 * @returns The timer clock as microseconds.
2402 * @param pVM The cross context VM structure.
2403 * @param hTimer Timer handle as returned by one of the create functions.
2404 */
2405VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2406{
2407 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2408}
2409
2410
2411/**
2412 * Get the current clock time as milliseconds.
2413 *
2414 * @returns The timer clock as milliseconds.
2415 * @param pVM The cross context VM structure.
2416 * @param hTimer Timer handle as returned by one of the create functions.
2417 */
2418VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2419{
2420 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2421}
2422
2423
2424/**
2425 * Converts the specified timer clock time to nanoseconds.
2426 *
2427 * @returns nanoseconds.
2428 * @param pVM The cross context VM structure.
2429 * @param hTimer Timer handle as returned by one of the create functions.
2430 * @param cTicks The clock ticks.
2431 * @remark There could be rounding errors here. We just do a simple integer divide
2432 * without any adjustments.
2433 */
2434VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2435{
2436 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2437 switch (pQueue->enmClock)
2438 {
2439 case TMCLOCK_VIRTUAL:
2440 case TMCLOCK_VIRTUAL_SYNC:
2441 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2442 return cTicks;
2443
2444 case TMCLOCK_REAL:
2445 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2446 return cTicks * 1000000;
2447
2448 default:
2449 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2450 return 0;
2451 }
2452}
2453
2454
2455/**
2456 * Converts the specified timer clock time to microseconds.
2457 *
2458 * @returns microseconds.
2459 * @param pVM The cross context VM structure.
2460 * @param hTimer Timer handle as returned by one of the create functions.
2461 * @param cTicks The clock ticks.
2462 * @remark There could be rounding errors here. We just do a simple integer divide
2463 * without any adjustments.
2464 */
2465VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2466{
2467 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2468 switch (pQueue->enmClock)
2469 {
2470 case TMCLOCK_VIRTUAL:
2471 case TMCLOCK_VIRTUAL_SYNC:
2472 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2473 return cTicks / 1000;
2474
2475 case TMCLOCK_REAL:
2476 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2477 return cTicks * 1000;
2478
2479 default:
2480 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2481 return 0;
2482 }
2483}
2484
2485
2486/**
2487 * Converts the specified timer clock time to milliseconds.
2488 *
2489 * @returns milliseconds.
2490 * @param pVM The cross context VM structure.
2491 * @param hTimer Timer handle as returned by one of the create functions.
2492 * @param cTicks The clock ticks.
2493 * @remark There could be rounding errors here. We just do a simple integer divide
2494 * without any adjustments.
2495 */
2496VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2497{
2498 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2499 switch (pQueue->enmClock)
2500 {
2501 case TMCLOCK_VIRTUAL:
2502 case TMCLOCK_VIRTUAL_SYNC:
2503 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2504 return cTicks / 1000000;
2505
2506 case TMCLOCK_REAL:
2507 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2508 return cTicks;
2509
2510 default:
2511 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2512 return 0;
2513 }
2514}
2515
2516
2517/**
2518 * Converts the specified nanosecond timestamp to timer clock ticks.
2519 *
2520 * @returns timer clock ticks.
2521 * @param pVM The cross context VM structure.
2522 * @param hTimer Timer handle as returned by one of the create functions.
2523 * @param cNanoSecs The nanosecond value ticks to convert.
2524 * @remark There could be rounding and overflow errors here.
2525 */
2526VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2527{
2528 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2529 switch (pQueue->enmClock)
2530 {
2531 case TMCLOCK_VIRTUAL:
2532 case TMCLOCK_VIRTUAL_SYNC:
2533 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2534 return cNanoSecs;
2535
2536 case TMCLOCK_REAL:
2537 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2538 return cNanoSecs / 1000000;
2539
2540 default:
2541 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2542 return 0;
2543 }
2544}
2545
2546
2547/**
2548 * Converts the specified microsecond timestamp to timer clock ticks.
2549 *
2550 * @returns timer clock ticks.
2551 * @param pVM The cross context VM structure.
2552 * @param hTimer Timer handle as returned by one of the create functions.
2553 * @param cMicroSecs The microsecond value ticks to convert.
2554 * @remark There could be rounding and overflow errors here.
2555 */
2556VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2557{
2558 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2559 switch (pQueue->enmClock)
2560 {
2561 case TMCLOCK_VIRTUAL:
2562 case TMCLOCK_VIRTUAL_SYNC:
2563 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2564 return cMicroSecs * 1000;
2565
2566 case TMCLOCK_REAL:
2567 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2568 return cMicroSecs / 1000;
2569
2570 default:
2571 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2572 return 0;
2573 }
2574}
2575
2576
2577/**
2578 * Converts the specified millisecond timestamp to timer clock ticks.
2579 *
2580 * @returns timer clock ticks.
2581 * @param pVM The cross context VM structure.
2582 * @param hTimer Timer handle as returned by one of the create functions.
2583 * @param cMilliSecs The millisecond value ticks to convert.
2584 * @remark There could be rounding and overflow errors here.
2585 */
2586VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2587{
2588 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2589 switch (pQueue->enmClock)
2590 {
2591 case TMCLOCK_VIRTUAL:
2592 case TMCLOCK_VIRTUAL_SYNC:
2593 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2594 return cMilliSecs * 1000000;
2595
2596 case TMCLOCK_REAL:
2597 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2598 return cMilliSecs;
2599
2600 default:
2601 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2602 return 0;
2603 }
2604}
2605
2606
2607/**
2608 * Convert state to string.
2609 *
2610 * @returns Readonly status name.
2611 * @param enmState State.
2612 */
2613const char *tmTimerState(TMTIMERSTATE enmState)
2614{
2615 switch (enmState)
2616 {
2617#define CASE(num, state) \
2618 case TMTIMERSTATE_##state: \
2619 AssertCompile(TMTIMERSTATE_##state == (num)); \
2620 return #num "-" #state
2621 CASE( 0,INVALID);
2622 CASE( 1,STOPPED);
2623 CASE( 2,ACTIVE);
2624 CASE( 3,EXPIRED_GET_UNLINK);
2625 CASE( 4,EXPIRED_DELIVER);
2626 CASE( 5,PENDING_STOP);
2627 CASE( 6,PENDING_STOP_SCHEDULE);
2628 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2629 CASE( 8,PENDING_SCHEDULE);
2630 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2631 CASE(10,PENDING_RESCHEDULE);
2632 CASE(11,DESTROY);
2633 CASE(12,FREE);
2634 default:
2635 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2636 return "Invalid state!";
2637#undef CASE
2638 }
2639}
2640
2641
2642#if defined(IN_RING0) || defined(IN_RING3)
2643/**
2644 * Copies over old timers and initialized newly allocted ones.
2645 *
2646 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2647 *
2648 * @param paTimers The new timer allocation.
2649 * @param paOldTimers The old timers.
2650 * @param cNewTimers Number of new timers.
2651 * @param cOldTimers Number of old timers.
2652 */
2653void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2654{
2655 Assert(cOldTimers < cNewTimers);
2656
2657 /*
2658 * Copy over the old info and initialize the new handles.
2659 */
2660 if (cOldTimers > 0)
2661 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2662
2663 size_t i = cNewTimers;
2664 while (i-- > cOldTimers)
2665 {
2666 paTimers[i].u64Expire = UINT64_MAX;
2667 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2668 paTimers[i].enmState = TMTIMERSTATE_FREE;
2669 paTimers[i].idxScheduleNext = UINT32_MAX;
2670 paTimers[i].idxNext = UINT32_MAX;
2671 paTimers[i].idxPrev = UINT32_MAX;
2672 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2673 }
2674
2675 /*
2676 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2677 */
2678 if (cOldTimers == 0)
2679 {
2680 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2681 paTimers[0].szName[0] = 'n';
2682 paTimers[0].szName[1] = 'i';
2683 paTimers[0].szName[2] = 'l';
2684 paTimers[0].szName[3] = '\0';
2685 }
2686}
2687#endif /* IN_RING0 || IN_RING3 */
2688
2689
2690/**
2691 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2692 *
2693 * @returns The highest frequency. 0 if no timers care.
2694 * @param pVM The cross context VM structure.
2695 * @param uOldMaxHzHint The old global hint.
2696 */
2697DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2698{
2699 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2700 but it should force other callers thru the slow path while we're recalculating and
2701 help us detect changes while we're recalculating. */
2702 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2703
2704 /*
2705 * The "right" highest frequency value isn't so important that we'll block
2706 * waiting on the timer semaphores.
2707 */
2708 uint32_t uMaxHzHint = 0;
2709 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2710 {
2711 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2712
2713 /* Get the max Hz hint for the queue. */
2714 uint32_t uMaxHzHintQueue;
2715 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2716 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2717 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2718 else
2719 {
2720 /* Is it still necessary to do updating? */
2721 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2722 {
2723 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2724
2725 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2726 uMaxHzHintQueue = 0;
2727 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2728 pCur;
2729 pCur = tmTimerGetNext(pQueueCC, pCur))
2730 {
2731 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2732 if (uHzHint > uMaxHzHintQueue)
2733 {
2734 TMTIMERSTATE enmState = pCur->enmState;
2735 switch (enmState)
2736 {
2737 case TMTIMERSTATE_ACTIVE:
2738 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2739 case TMTIMERSTATE_EXPIRED_DELIVER:
2740 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2741 case TMTIMERSTATE_PENDING_SCHEDULE:
2742 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2743 case TMTIMERSTATE_PENDING_RESCHEDULE:
2744 uMaxHzHintQueue = uHzHint;
2745 break;
2746
2747 case TMTIMERSTATE_STOPPED:
2748 case TMTIMERSTATE_PENDING_STOP:
2749 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2750 case TMTIMERSTATE_DESTROY:
2751 case TMTIMERSTATE_FREE:
2752 case TMTIMERSTATE_INVALID:
2753 break;
2754 /* no default, want gcc warnings when adding more states. */
2755 }
2756 }
2757 }
2758
2759 /* Write the new Hz hint for the quest and clear the other update flag. */
2760 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2761 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2762 }
2763 else
2764 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2765
2766 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2767 }
2768
2769 /* Update the global max Hz hint. */
2770 if (uMaxHzHint < uMaxHzHintQueue)
2771 uMaxHzHint = uMaxHzHintQueue;
2772 }
2773
2774 /*
2775 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2776 */
2777 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2778 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2779 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2780 else
2781 for (uint32_t iTry = 1;; iTry++)
2782 {
2783 if (RT_LO_U32(u64Actual) != 0)
2784 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2785 else if (iTry >= 4)
2786 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2787 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2788 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2789 else
2790 continue;
2791 break;
2792 }
2793 return uMaxHzHint;
2794}
2795
2796
2797/**
2798 * Gets the highest frequency hint for all the important timers.
2799 *
2800 * @returns The highest frequency. 0 if no timers care.
2801 * @param pVM The cross context VM structure.
2802 */
2803DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2804{
2805 /*
2806 * Query the value, recalculate it if necessary.
2807 */
2808 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2809 if (RT_HI_U32(u64Combined) == 0)
2810 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2811 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2812}
2813
2814
2815/**
2816 * Calculates a host timer frequency that would be suitable for the current
2817 * timer load.
2818 *
2819 * This will take the highest timer frequency, adjust for catch-up and warp
2820 * driver, and finally add a little fudge factor. The caller (VMM) will use
2821 * the result to adjust the per-cpu preemption timer.
2822 *
2823 * @returns The highest frequency. 0 if no important timers around.
2824 * @param pVM The cross context VM structure.
2825 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2826 */
2827VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2828{
2829 uint32_t uHz = tmGetFrequencyHint(pVM);
2830
2831 /* Catch up, we have to be more aggressive than the % indicates at the
2832 beginning of the effort. */
2833 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2834 {
2835 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2836 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2837 {
2838 if (u32Pct <= 100)
2839 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2840 else if (u32Pct <= 200)
2841 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2842 else if (u32Pct <= 400)
2843 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2844 uHz *= u32Pct + 100;
2845 uHz /= 100;
2846 }
2847 }
2848
2849 /* Warp drive. */
2850 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2851 {
2852 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2853 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2854 {
2855 uHz *= u32Pct;
2856 uHz /= 100;
2857 }
2858 }
2859
2860 /* Fudge factor. */
2861 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2862 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2863 else
2864 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2865 uHz /= 100;
2866
2867 /* Make sure it isn't too high. */
2868 if (uHz > pVM->tm.s.cHostHzMax)
2869 uHz = pVM->tm.s.cHostHzMax;
2870
2871 return uHz;
2872}
2873
2874
2875/**
2876 * Whether the guest virtual clock is ticking.
2877 *
2878 * @returns true if ticking, false otherwise.
2879 * @param pVM The cross context VM structure.
2880 */
2881VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2882{
2883 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2884}
2885
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette