VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 80333

Last change on this file since 80333 was 80333, checked in by vboxsync, 6 years ago

VMM: Eliminating the VBOX_BUGREF_9217_PART_I preprocessor macro. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 92.0 KB
Line 
1/* $Id: TMAll.cpp 80333 2019-08-16 20:28:38Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30# ifdef VBOX_WITH_REM
31# include <VBox/vmm/rem.h>
32# endif
33#endif
34#include "TMInternal.h"
35#include <VBox/vmm/vmcc.h>
36
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <VBox/sup.h>
41#include <iprt/time.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/asm-math.h>
45#ifdef IN_RING3
46# include <iprt/thread.h>
47#endif
48
49#include "TMInline.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** @def TMTIMER_ASSERT_CRITSECT
56 * Checks that the caller owns the critical section if one is associated with
57 * the timer. */
58#ifdef VBOX_STRICT
59# define TMTIMER_ASSERT_CRITSECT(pTimer) \
60 do { \
61 if ((pTimer)->pCritSect) \
62 { \
63 VMSTATE enmState; \
64 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
65 AssertMsg( pCritSect \
66 && ( PDMCritSectIsOwner(pCritSect) \
67 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
68 || enmState == VMSTATE_RESETTING \
69 || enmState == VMSTATE_RESETTING_LS ),\
70 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
71 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
72 } \
73 } while (0)
74#else
75# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
76#endif
77
78/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
79 * Checks for lock order trouble between the timer critsect and the critical
80 * section critsect. The virtual sync critsect must always be entered before
81 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
82 * isn't any critical section associated with the timer or if the calling thread
83 * doesn't own it, ASSUMING of course that the thread using this macro is going
84 * to enter the virtual sync critical section anyway.
85 *
86 * @remarks This is a sligtly relaxed timer locking attitude compared to
87 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
88 * should know what it's doing if it's stopping or starting a timer
89 * without taking the device lock.
90 */
91#ifdef VBOX_STRICT
92# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
93 do { \
94 if ((pTimer)->pCritSect) \
95 { \
96 VMSTATE enmState; \
97 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVM, (pTimer)->pCritSect); \
98 AssertMsg( pCritSect \
99 && ( !PDMCritSectIsOwner(pCritSect) \
100 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
101 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
102 || enmState == VMSTATE_RESETTING \
103 || enmState == VMSTATE_RESETTING_LS ),\
104 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
105 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
106 } \
107 } while (0)
108#else
109# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
110#endif
111
112
113/**
114 * Notification that execution is about to start.
115 *
116 * This call must always be paired with a TMNotifyEndOfExecution call.
117 *
118 * The function may, depending on the configuration, resume the TSC and future
119 * clocks that only ticks when we're executing guest code.
120 *
121 * @param pVM The cross context VM structure.
122 * @param pVCpu The cross context virtual CPU structure.
123 */
124VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
125{
126#ifndef VBOX_WITHOUT_NS_ACCOUNTING
127 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
128#endif
129 if (pVM->tm.s.fTSCTiedToExecution)
130 tmCpuTickResume(pVM, pVCpu);
131}
132
133
134/**
135 * Notification that execution has ended.
136 *
137 * This call must always be paired with a TMNotifyStartOfExecution call.
138 *
139 * The function may, depending on the configuration, suspend the TSC and future
140 * clocks that only ticks when we're executing guest code.
141 *
142 * @param pVM The cross context VM structure.
143 * @param pVCpu The cross context virtual CPU structure.
144 */
145VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
146{
147 if (pVM->tm.s.fTSCTiedToExecution)
148 tmCpuTickPause(pVCpu);
149
150#ifndef VBOX_WITHOUT_NS_ACCOUNTING
151 uint64_t const u64NsTs = RTTimeNanoTS();
152 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
153 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
154 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
155 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
156
157# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
158 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
159 if (cNsExecutingDelta < 5000)
160 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
161 else if (cNsExecutingDelta < 50000)
162 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
163 else
164 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
165 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
166 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
167 if (cNsOtherNewDelta > 0)
168 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
169# endif
170
171 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
172 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
173 pVCpu->tm.s.cNsTotal = cNsTotalNew;
174 pVCpu->tm.s.cNsOther = cNsOtherNew;
175 pVCpu->tm.s.cPeriodsExecuting++;
176 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
177#endif
178}
179
180
181/**
182 * Notification that the cpu is entering the halt state
183 *
184 * This call must always be paired with a TMNotifyEndOfExecution call.
185 *
186 * The function may, depending on the configuration, resume the TSC and future
187 * clocks that only ticks when we're halted.
188 *
189 * @param pVCpu The cross context virtual CPU structure.
190 */
191VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
192{
193 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
194
195#ifndef VBOX_WITHOUT_NS_ACCOUNTING
196 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
197#endif
198
199 if ( pVM->tm.s.fTSCTiedToExecution
200 && !pVM->tm.s.fTSCNotTiedToHalt)
201 tmCpuTickResume(pVM, pVCpu);
202}
203
204
205/**
206 * Notification that the cpu is leaving the halt state
207 *
208 * This call must always be paired with a TMNotifyStartOfHalt call.
209 *
210 * The function may, depending on the configuration, suspend the TSC and future
211 * clocks that only ticks when we're halted.
212 *
213 * @param pVCpu The cross context virtual CPU structure.
214 */
215VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
216{
217 PVM pVM = pVCpu->CTX_SUFF(pVM);
218
219 if ( pVM->tm.s.fTSCTiedToExecution
220 && !pVM->tm.s.fTSCNotTiedToHalt)
221 tmCpuTickPause(pVCpu);
222
223#ifndef VBOX_WITHOUT_NS_ACCOUNTING
224 uint64_t const u64NsTs = RTTimeNanoTS();
225 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
226 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
227 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
228 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
229
230# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
232 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
233 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
234 if (cNsOtherNewDelta > 0)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
236# endif
237
238 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
239 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
240 pVCpu->tm.s.cNsTotal = cNsTotalNew;
241 pVCpu->tm.s.cNsOther = cNsOtherNew;
242 pVCpu->tm.s.cPeriodsHalted++;
243 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
244#endif
245}
246
247
248/**
249 * Raise the timer force action flag and notify the dedicated timer EMT.
250 *
251 * @param pVM The cross context VM structure.
252 */
253DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
254{
255 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
256 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
257 {
258 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
259 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
260#ifdef IN_RING3
261# ifdef VBOX_WITH_REM
262 REMR3NotifyTimerPending(pVM, pVCpuDst);
263# endif
264 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
265#endif
266 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
267 }
268}
269
270
271/**
272 * Schedule the queue which was changed.
273 */
274DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
275{
276 PVMCC pVM = pTimer->CTX_SUFF(pVM);
277 if ( VM_IS_EMT(pVM)
278 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
279 {
280 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
281 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
282 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
283#ifdef VBOX_STRICT
284 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
285#endif
286 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
287 TM_UNLOCK_TIMERS(pVM);
288 }
289 else
290 {
291 TMTIMERSTATE enmState = pTimer->enmState;
292 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
293 tmScheduleNotify(pVM);
294 }
295}
296
297
298/**
299 * Try change the state to enmStateNew from enmStateOld
300 * and link the timer into the scheduling queue.
301 *
302 * @returns Success indicator.
303 * @param pTimer Timer in question.
304 * @param enmStateNew The new timer state.
305 * @param enmStateOld The old timer state.
306 */
307DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
308{
309 /*
310 * Attempt state change.
311 */
312 bool fRc;
313 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
314 return fRc;
315}
316
317
318/**
319 * Links the timer onto the scheduling queue.
320 *
321 * @param pQueue The timer queue the timer belongs to.
322 * @param pTimer The timer.
323 *
324 * @todo FIXME: Look into potential race with the thread running the queues
325 * and stuff.
326 */
327DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
328{
329 Assert(!pTimer->offScheduleNext);
330 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
331 int32_t offHead;
332 do
333 {
334 offHead = pQueue->offSchedule;
335 if (offHead)
336 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
337 else
338 pTimer->offScheduleNext = 0;
339 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
340}
341
342
343/**
344 * Try change the state to enmStateNew from enmStateOld
345 * and link the timer into the scheduling queue.
346 *
347 * @returns Success indicator.
348 * @param pTimer Timer in question.
349 * @param enmStateNew The new timer state.
350 * @param enmStateOld The old timer state.
351 */
352DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
353{
354 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
355 {
356 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
357 return true;
358 }
359 return false;
360}
361
362
363/**
364 * Links a timer into the active list of a timer queue.
365 *
366 * @param pQueue The queue.
367 * @param pTimer The timer.
368 * @param u64Expire The timer expiration time.
369 *
370 * @remarks Called while owning the relevant queue lock.
371 */
372DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
373{
374 Assert(!pTimer->offNext);
375 Assert(!pTimer->offPrev);
376 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
377
378 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
379 if (pCur)
380 {
381 for (;; pCur = TMTIMER_GET_NEXT(pCur))
382 {
383 if (pCur->u64Expire > u64Expire)
384 {
385 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
386 TMTIMER_SET_NEXT(pTimer, pCur);
387 TMTIMER_SET_PREV(pTimer, pPrev);
388 if (pPrev)
389 TMTIMER_SET_NEXT(pPrev, pTimer);
390 else
391 {
392 TMTIMER_SET_HEAD(pQueue, pTimer);
393 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
394 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
395 }
396 TMTIMER_SET_PREV(pCur, pTimer);
397 return;
398 }
399 if (!pCur->offNext)
400 {
401 TMTIMER_SET_NEXT(pCur, pTimer);
402 TMTIMER_SET_PREV(pTimer, pCur);
403 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
404 return;
405 }
406 }
407 }
408 else
409 {
410 TMTIMER_SET_HEAD(pQueue, pTimer);
411 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
412 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
413 }
414}
415
416
417
418/**
419 * Schedules the given timer on the given queue.
420 *
421 * @param pQueue The timer queue.
422 * @param pTimer The timer that needs scheduling.
423 *
424 * @remarks Called while owning the lock.
425 */
426DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
427{
428 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
429
430 /*
431 * Processing.
432 */
433 unsigned cRetries = 2;
434 do
435 {
436 TMTIMERSTATE enmState = pTimer->enmState;
437 switch (enmState)
438 {
439 /*
440 * Reschedule timer (in the active list).
441 */
442 case TMTIMERSTATE_PENDING_RESCHEDULE:
443 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
444 break; /* retry */
445 tmTimerQueueUnlinkActive(pQueue, pTimer);
446 RT_FALL_THRU();
447
448 /*
449 * Schedule timer (insert into the active list).
450 */
451 case TMTIMERSTATE_PENDING_SCHEDULE:
452 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
453 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
454 break; /* retry */
455 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
456 return;
457
458 /*
459 * Stop the timer in active list.
460 */
461 case TMTIMERSTATE_PENDING_STOP:
462 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
463 break; /* retry */
464 tmTimerQueueUnlinkActive(pQueue, pTimer);
465 RT_FALL_THRU();
466
467 /*
468 * Stop the timer (not on the active list).
469 */
470 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
471 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
472 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
473 break;
474 return;
475
476 /*
477 * The timer is pending destruction by TMR3TimerDestroy, our caller.
478 * Nothing to do here.
479 */
480 case TMTIMERSTATE_DESTROY:
481 break;
482
483 /*
484 * Postpone these until they get into the right state.
485 */
486 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
487 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
488 tmTimerLinkSchedule(pQueue, pTimer);
489 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
490 return;
491
492 /*
493 * None of these can be in the schedule.
494 */
495 case TMTIMERSTATE_FREE:
496 case TMTIMERSTATE_STOPPED:
497 case TMTIMERSTATE_ACTIVE:
498 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
499 case TMTIMERSTATE_EXPIRED_DELIVER:
500 default:
501 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
502 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
503 return;
504 }
505 } while (cRetries-- > 0);
506}
507
508
509/**
510 * Schedules the specified timer queue.
511 *
512 * @param pVM The cross context VM structure.
513 * @param pQueue The queue to schedule.
514 *
515 * @remarks Called while owning the lock.
516 */
517void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
518{
519 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
520 NOREF(pVM);
521
522 /*
523 * Dequeue the scheduling list and iterate it.
524 */
525 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
526 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
527 if (!offNext)
528 return;
529 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
530 while (pNext)
531 {
532 /*
533 * Unlink the head timer and find the next one.
534 */
535 PTMTIMER pTimer = pNext;
536 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
537 pTimer->offScheduleNext = 0;
538
539 /*
540 * Do the scheduling.
541 */
542 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
543 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
544 tmTimerQueueScheduleOne(pQueue, pTimer);
545 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
546 } /* foreach timer in current schedule batch. */
547 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
548}
549
550
551#ifdef VBOX_STRICT
552/**
553 * Checks that the timer queues are sane.
554 *
555 * @param pVM The cross context VM structure.
556 * @param pszWhere Caller location clue.
557 *
558 * @remarks Called while owning the lock.
559 */
560void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
561{
562 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
563
564 /*
565 * Check the linking of the active lists.
566 */
567 bool fHaveVirtualSyncLock = false;
568 for (int i = 0; i < TMCLOCK_MAX; i++)
569 {
570 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
571 Assert((int)pQueue->enmClock == i);
572 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
573 {
574 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
575 continue;
576 fHaveVirtualSyncLock = true;
577 }
578 PTMTIMER pPrev = NULL;
579 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
580 {
581 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
582 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
583 TMTIMERSTATE enmState = pCur->enmState;
584 switch (enmState)
585 {
586 case TMTIMERSTATE_ACTIVE:
587 AssertMsg( !pCur->offScheduleNext
588 || pCur->enmState != TMTIMERSTATE_ACTIVE,
589 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
590 break;
591 case TMTIMERSTATE_PENDING_STOP:
592 case TMTIMERSTATE_PENDING_RESCHEDULE:
593 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
594 break;
595 default:
596 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
597 break;
598 }
599 }
600 }
601
602
603# ifdef IN_RING3
604 /*
605 * Do the big list and check that active timers all are in the active lists.
606 */
607 PTMTIMERR3 pPrev = NULL;
608 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
609 {
610 Assert(pCur->pBigPrev == pPrev);
611 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
612
613 TMTIMERSTATE enmState = pCur->enmState;
614 switch (enmState)
615 {
616 case TMTIMERSTATE_ACTIVE:
617 case TMTIMERSTATE_PENDING_STOP:
618 case TMTIMERSTATE_PENDING_RESCHEDULE:
619 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
620 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
621 {
622 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
623 Assert(pCur->offPrev || pCur == pCurAct);
624 while (pCurAct && pCurAct != pCur)
625 pCurAct = TMTIMER_GET_NEXT(pCurAct);
626 Assert(pCurAct == pCur);
627 }
628 break;
629
630 case TMTIMERSTATE_PENDING_SCHEDULE:
631 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
632 case TMTIMERSTATE_STOPPED:
633 case TMTIMERSTATE_EXPIRED_DELIVER:
634 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
635 {
636 Assert(!pCur->offNext);
637 Assert(!pCur->offPrev);
638 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
639 pCurAct;
640 pCurAct = TMTIMER_GET_NEXT(pCurAct))
641 {
642 Assert(pCurAct != pCur);
643 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
644 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
645 }
646 }
647 break;
648
649 /* ignore */
650 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
651 break;
652
653 /* shouldn't get here! */
654 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
655 case TMTIMERSTATE_DESTROY:
656 default:
657 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
658 break;
659 }
660 }
661# endif /* IN_RING3 */
662
663 if (fHaveVirtualSyncLock)
664 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
665}
666#endif /* !VBOX_STRICT */
667
668#ifdef VBOX_HIGH_RES_TIMERS_HACK
669
670/**
671 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
672 * EMT is polling.
673 *
674 * @returns See tmTimerPollInternal.
675 * @param pVM The cross context VM structure.
676 * @param u64Now Current virtual clock timestamp.
677 * @param u64Delta The delta to the next even in ticks of the
678 * virtual clock.
679 * @param pu64Delta Where to return the delta.
680 */
681DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
682{
683 Assert(!(u64Delta & RT_BIT_64(63)));
684
685 if (!pVM->tm.s.fVirtualWarpDrive)
686 {
687 *pu64Delta = u64Delta;
688 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
689 }
690
691 /*
692 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
693 */
694 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
695 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
696
697 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
698 u64GipTime -= u64Start; /* the start is GIP time. */
699 if (u64GipTime >= u64Delta)
700 {
701 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
702 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
703 }
704 else
705 {
706 u64Delta -= u64GipTime;
707 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
708 u64Delta += u64GipTime;
709 }
710 *pu64Delta = u64Delta;
711 u64GipTime += u64Start;
712 return u64GipTime;
713}
714
715
716/**
717 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
718 * than the one dedicated to timer work.
719 *
720 * @returns See tmTimerPollInternal.
721 * @param pVM The cross context VM structure.
722 * @param u64Now Current virtual clock timestamp.
723 * @param pu64Delta Where to return the delta.
724 */
725DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
726{
727 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
728 *pu64Delta = s_u64OtherRet;
729 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
730}
731
732
733/**
734 * Worker for tmTimerPollInternal.
735 *
736 * @returns See tmTimerPollInternal.
737 * @param pVM The cross context VM structure.
738 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
739 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
740 * timer EMT.
741 * @param u64Now Current virtual clock timestamp.
742 * @param pu64Delta Where to return the delta.
743 * @param pCounter The statistics counter to update.
744 */
745DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
746 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
747{
748 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
749 if (pVCpuDst != pVCpu)
750 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
751 *pu64Delta = 0;
752 return 0;
753}
754
755/**
756 * Common worker for TMTimerPollGIP and TMTimerPoll.
757 *
758 * This function is called before FFs are checked in the inner execution EM loops.
759 *
760 * @returns The GIP timestamp of the next event.
761 * 0 if the next event has already expired.
762 *
763 * @param pVM The cross context VM structure.
764 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
765 * @param pu64Delta Where to store the delta.
766 *
767 * @thread The emulation thread.
768 *
769 * @remarks GIP uses ns ticks.
770 */
771DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
772{
773 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
774 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
775 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
776
777 /*
778 * Return straight away if the timer FF is already set ...
779 */
780 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
781 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
782
783 /*
784 * ... or if timers are being run.
785 */
786 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
787 {
788 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
789 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
790 }
791
792 /*
793 * Check for TMCLOCK_VIRTUAL expiration.
794 */
795 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
796 const int64_t i64Delta1 = u64Expire1 - u64Now;
797 if (i64Delta1 <= 0)
798 {
799 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
800 {
801 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
802 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
803#if defined(IN_RING3) && defined(VBOX_WITH_REM)
804 REMR3NotifyTimerPending(pVM, pVCpuDst);
805#endif
806 }
807 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
808 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
809 }
810
811 /*
812 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
813 * This isn't quite as straight forward if in a catch-up, not only do
814 * we have to adjust the 'now' but when have to adjust the delta as well.
815 */
816
817 /*
818 * Optimistic lockless approach.
819 */
820 uint64_t u64VirtualSyncNow;
821 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
822 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
823 {
824 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
825 {
826 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
827 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
828 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
829 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
830 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
831 {
832 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
833 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
834 if (i64Delta2 > 0)
835 {
836 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
837 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
838
839 if (pVCpu == pVCpuDst)
840 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
841 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
842 }
843
844 if ( !pVM->tm.s.fRunningQueues
845 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
846 {
847 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
848 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
849#if defined(IN_RING3) && defined(VBOX_WITH_REM)
850 REMR3NotifyTimerPending(pVM, pVCpuDst);
851#endif
852 }
853
854 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
855 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
856 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
857 }
858 }
859 }
860 else
861 {
862 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
863 LogFlow(("TMTimerPoll: stopped\n"));
864 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
865 }
866
867 /*
868 * Complicated lockless approach.
869 */
870 uint64_t off;
871 uint32_t u32Pct = 0;
872 bool fCatchUp;
873 int cOuterTries = 42;
874 for (;; cOuterTries--)
875 {
876 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
877 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
878 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
879 if (fCatchUp)
880 {
881 /* No changes allowed, try get a consistent set of parameters. */
882 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
883 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
884 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
885 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
886 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
887 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
888 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
889 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
890 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
891 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
892 || cOuterTries <= 0)
893 {
894 uint64_t u64Delta = u64Now - u64Prev;
895 if (RT_LIKELY(!(u64Delta >> 32)))
896 {
897 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
898 if (off > u64Sub + offGivenUp)
899 off -= u64Sub;
900 else /* we've completely caught up. */
901 off = offGivenUp;
902 }
903 else
904 /* More than 4 seconds since last time (or negative), ignore it. */
905 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
906
907 /* Check that we're still running and in catch up. */
908 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
909 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
910 break;
911 }
912 }
913 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
914 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
915 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
916 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
917 break; /* Got an consistent offset */
918
919 /* Repeat the initial checks before iterating. */
920 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
921 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
922 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
923 {
924 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
925 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
926 }
927 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
928 {
929 LogFlow(("TMTimerPoll: stopped\n"));
930 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
931 }
932 if (cOuterTries <= 0)
933 break; /* that's enough */
934 }
935 if (cOuterTries <= 0)
936 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
937 u64VirtualSyncNow = u64Now - off;
938
939 /* Calc delta and see if we've got a virtual sync hit. */
940 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
941 if (i64Delta2 <= 0)
942 {
943 if ( !pVM->tm.s.fRunningQueues
944 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
945 {
946 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
947 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
948#if defined(IN_RING3) && defined(VBOX_WITH_REM)
949 REMR3NotifyTimerPending(pVM, pVCpuDst);
950#endif
951 }
952 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
953 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
954 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
955 }
956
957 /*
958 * Return the time left to the next event.
959 */
960 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
961 if (pVCpu == pVCpuDst)
962 {
963 if (fCatchUp)
964 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
965 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
966 }
967 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
968}
969
970
971/**
972 * Set FF if we've passed the next virtual event.
973 *
974 * This function is called before FFs are checked in the inner execution EM loops.
975 *
976 * @returns true if timers are pending, false if not.
977 *
978 * @param pVM The cross context VM structure.
979 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
980 * @thread The emulation thread.
981 */
982VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
983{
984 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
985 uint64_t off = 0;
986 tmTimerPollInternal(pVM, pVCpu, &off);
987 return off == 0;
988}
989
990
991/**
992 * Set FF if we've passed the next virtual event.
993 *
994 * This function is called before FFs are checked in the inner execution EM loops.
995 *
996 * @param pVM The cross context VM structure.
997 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
998 * @thread The emulation thread.
999 */
1000VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1001{
1002 uint64_t off;
1003 tmTimerPollInternal(pVM, pVCpu, &off);
1004}
1005
1006
1007/**
1008 * Set FF if we've passed the next virtual event.
1009 *
1010 * This function is called before FFs are checked in the inner execution EM loops.
1011 *
1012 * @returns The GIP timestamp of the next event.
1013 * 0 if the next event has already expired.
1014 * @param pVM The cross context VM structure.
1015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1016 * @param pu64Delta Where to store the delta.
1017 * @thread The emulation thread.
1018 */
1019VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1020{
1021 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1022}
1023
1024#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1025
1026/**
1027 * Gets the host context ring-3 pointer of the timer.
1028 *
1029 * @returns HC R3 pointer.
1030 * @param pTimer Timer handle as returned by one of the create functions.
1031 */
1032VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1033{
1034 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1035}
1036
1037
1038/**
1039 * Gets the host context ring-0 pointer of the timer.
1040 *
1041 * @returns HC R0 pointer.
1042 * @param pTimer Timer handle as returned by one of the create functions.
1043 */
1044VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1045{
1046 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1047}
1048
1049
1050/**
1051 * Gets the RC pointer of the timer.
1052 *
1053 * @returns RC pointer.
1054 * @param pTimer Timer handle as returned by one of the create functions.
1055 */
1056VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1057{
1058 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1059}
1060
1061
1062/**
1063 * Locks the timer clock.
1064 *
1065 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1066 * if the clock does not have a lock.
1067 * @param pTimer The timer which clock lock we wish to take.
1068 * @param rcBusy What to return in ring-0 and raw-mode context
1069 * if the lock is busy. Pass VINF_SUCCESS to
1070 * acquired the critical section thru a ring-3
1071 call if necessary.
1072 *
1073 * @remarks Currently only supported on timers using the virtual sync clock.
1074 */
1075VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1076{
1077 AssertPtr(pTimer);
1078 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1079 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1080}
1081
1082
1083/**
1084 * Unlocks a timer clock locked by TMTimerLock.
1085 *
1086 * @param pTimer The timer which clock to unlock.
1087 */
1088VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1089{
1090 AssertPtr(pTimer);
1091 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1092 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1093}
1094
1095
1096/**
1097 * Checks if the current thread owns the timer clock lock.
1098 *
1099 * @returns @c true if its the owner, @c false if not.
1100 * @param pTimer The timer handle.
1101 */
1102VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1103{
1104 AssertPtr(pTimer);
1105 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1106 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1107}
1108
1109
1110/**
1111 * Optimized TMTimerSet code path for starting an inactive timer.
1112 *
1113 * @returns VBox status code.
1114 *
1115 * @param pVM The cross context VM structure.
1116 * @param pTimer The timer handle.
1117 * @param u64Expire The new expire time.
1118 */
1119static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1120{
1121 Assert(!pTimer->offPrev);
1122 Assert(!pTimer->offNext);
1123 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1124
1125 TMCLOCK const enmClock = pTimer->enmClock;
1126
1127 /*
1128 * Calculate and set the expiration time.
1129 */
1130 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1131 {
1132 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1133 AssertMsgStmt(u64Expire >= u64Last,
1134 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1135 u64Expire = u64Last);
1136 }
1137 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1138 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1139
1140 /*
1141 * Link the timer into the active list.
1142 */
1143 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1144
1145 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1146 TM_UNLOCK_TIMERS(pVM);
1147 return VINF_SUCCESS;
1148}
1149
1150
1151/**
1152 * TMTimerSet for the virtual sync timer queue.
1153 *
1154 * This employs a greatly simplified state machine by always acquiring the
1155 * queue lock and bypassing the scheduling list.
1156 *
1157 * @returns VBox status code
1158 * @param pVM The cross context VM structure.
1159 * @param pTimer The timer handle.
1160 * @param u64Expire The expiration time.
1161 */
1162static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1163{
1164 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1165 VM_ASSERT_EMT(pVM);
1166 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1167 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1168 AssertRCReturn(rc, rc);
1169
1170 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1171 TMTIMERSTATE enmState = pTimer->enmState;
1172 switch (enmState)
1173 {
1174 case TMTIMERSTATE_EXPIRED_DELIVER:
1175 case TMTIMERSTATE_STOPPED:
1176 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1177 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1178 else
1179 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1180
1181 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1182 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1183 pTimer->u64Expire = u64Expire;
1184 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1185 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1186 rc = VINF_SUCCESS;
1187 break;
1188
1189 case TMTIMERSTATE_ACTIVE:
1190 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1191 tmTimerQueueUnlinkActive(pQueue, pTimer);
1192 pTimer->u64Expire = u64Expire;
1193 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1194 rc = VINF_SUCCESS;
1195 break;
1196
1197 case TMTIMERSTATE_PENDING_RESCHEDULE:
1198 case TMTIMERSTATE_PENDING_STOP:
1199 case TMTIMERSTATE_PENDING_SCHEDULE:
1200 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1201 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1202 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1203 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1204 case TMTIMERSTATE_DESTROY:
1205 case TMTIMERSTATE_FREE:
1206 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1207 rc = VERR_TM_INVALID_STATE;
1208 break;
1209
1210 default:
1211 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1212 rc = VERR_TM_UNKNOWN_STATE;
1213 break;
1214 }
1215
1216 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1217 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1218 return rc;
1219}
1220
1221
1222/**
1223 * Arm a timer with a (new) expire time.
1224 *
1225 * @returns VBox status code.
1226 * @param pTimer Timer handle as returned by one of the create functions.
1227 * @param u64Expire New expire time.
1228 */
1229VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1230{
1231 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1232
1233 /* Treat virtual sync timers specially. */
1234 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1235 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1236
1237 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1238 TMTIMER_ASSERT_CRITSECT(pTimer);
1239
1240 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1241
1242#ifdef VBOX_WITH_STATISTICS
1243 /*
1244 * Gather optimization info.
1245 */
1246 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1247 TMTIMERSTATE enmOrgState = pTimer->enmState;
1248 switch (enmOrgState)
1249 {
1250 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1251 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1252 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1253 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1254 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1255 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1256 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1257 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1258 }
1259#endif
1260
1261 /*
1262 * The most common case is setting the timer again during the callback.
1263 * The second most common case is starting a timer at some other time.
1264 */
1265#if 1
1266 TMTIMERSTATE enmState1 = pTimer->enmState;
1267 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1268 || ( enmState1 == TMTIMERSTATE_STOPPED
1269 && pTimer->pCritSect))
1270 {
1271 /* Try take the TM lock and check the state again. */
1272 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1273 {
1274 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1275 {
1276 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1277 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1278 return VINF_SUCCESS;
1279 }
1280 TM_UNLOCK_TIMERS(pVM);
1281 }
1282 }
1283#endif
1284
1285 /*
1286 * Unoptimized code path.
1287 */
1288 int cRetries = 1000;
1289 do
1290 {
1291 /*
1292 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1293 */
1294 TMTIMERSTATE enmState = pTimer->enmState;
1295 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1296 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1297 switch (enmState)
1298 {
1299 case TMTIMERSTATE_EXPIRED_DELIVER:
1300 case TMTIMERSTATE_STOPPED:
1301 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1302 {
1303 Assert(!pTimer->offPrev);
1304 Assert(!pTimer->offNext);
1305 pTimer->u64Expire = u64Expire;
1306 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1307 tmSchedule(pTimer);
1308 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1309 return VINF_SUCCESS;
1310 }
1311 break;
1312
1313 case TMTIMERSTATE_PENDING_SCHEDULE:
1314 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1315 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1316 {
1317 pTimer->u64Expire = u64Expire;
1318 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1319 tmSchedule(pTimer);
1320 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1321 return VINF_SUCCESS;
1322 }
1323 break;
1324
1325
1326 case TMTIMERSTATE_ACTIVE:
1327 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1328 {
1329 pTimer->u64Expire = u64Expire;
1330 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1331 tmSchedule(pTimer);
1332 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1333 return VINF_SUCCESS;
1334 }
1335 break;
1336
1337 case TMTIMERSTATE_PENDING_RESCHEDULE:
1338 case TMTIMERSTATE_PENDING_STOP:
1339 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1340 {
1341 pTimer->u64Expire = u64Expire;
1342 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1343 tmSchedule(pTimer);
1344 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1345 return VINF_SUCCESS;
1346 }
1347 break;
1348
1349
1350 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1351 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1352 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1353#ifdef IN_RING3
1354 if (!RTThreadYield())
1355 RTThreadSleep(1);
1356#else
1357/** @todo call host context and yield after a couple of iterations */
1358#endif
1359 break;
1360
1361 /*
1362 * Invalid states.
1363 */
1364 case TMTIMERSTATE_DESTROY:
1365 case TMTIMERSTATE_FREE:
1366 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1367 return VERR_TM_INVALID_STATE;
1368 default:
1369 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1370 return VERR_TM_UNKNOWN_STATE;
1371 }
1372 } while (cRetries-- > 0);
1373
1374 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1375 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1376 return VERR_TM_TIMER_UNSTABLE_STATE;
1377}
1378
1379
1380/**
1381 * Return the current time for the specified clock, setting pu64Now if not NULL.
1382 *
1383 * @returns Current time.
1384 * @param pVM The cross context VM structure.
1385 * @param enmClock The clock to query.
1386 * @param pu64Now Optional pointer where to store the return time
1387 */
1388DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1389{
1390 uint64_t u64Now;
1391 switch (enmClock)
1392 {
1393 case TMCLOCK_VIRTUAL_SYNC:
1394 u64Now = TMVirtualSyncGet(pVM);
1395 break;
1396 case TMCLOCK_VIRTUAL:
1397 u64Now = TMVirtualGet(pVM);
1398 break;
1399 case TMCLOCK_REAL:
1400 u64Now = TMRealGet(pVM);
1401 break;
1402 default:
1403 AssertFatalMsgFailed(("%d\n", enmClock));
1404 }
1405
1406 if (pu64Now)
1407 *pu64Now = u64Now;
1408 return u64Now;
1409}
1410
1411
1412/**
1413 * Optimized TMTimerSetRelative code path.
1414 *
1415 * @returns VBox status code.
1416 *
1417 * @param pVM The cross context VM structure.
1418 * @param pTimer The timer handle.
1419 * @param cTicksToNext Clock ticks until the next time expiration.
1420 * @param pu64Now Where to return the current time stamp used.
1421 * Optional.
1422 */
1423static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1424{
1425 Assert(!pTimer->offPrev);
1426 Assert(!pTimer->offNext);
1427 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1428
1429 /*
1430 * Calculate and set the expiration time.
1431 */
1432 TMCLOCK const enmClock = pTimer->enmClock;
1433 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1434 pTimer->u64Expire = u64Expire;
1435 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1436
1437 /*
1438 * Link the timer into the active list.
1439 */
1440 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1441 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1442
1443 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1444 TM_UNLOCK_TIMERS(pVM);
1445 return VINF_SUCCESS;
1446}
1447
1448
1449/**
1450 * TMTimerSetRelative for the virtual sync timer queue.
1451 *
1452 * This employs a greatly simplified state machine by always acquiring the
1453 * queue lock and bypassing the scheduling list.
1454 *
1455 * @returns VBox status code
1456 * @param pVM The cross context VM structure.
1457 * @param pTimer The timer to (re-)arm.
1458 * @param cTicksToNext Clock ticks until the next time expiration.
1459 * @param pu64Now Where to return the current time stamp used.
1460 * Optional.
1461 */
1462static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1463{
1464 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1465 VM_ASSERT_EMT(pVM);
1466 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1467 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1468 AssertRCReturn(rc, rc);
1469
1470 /* Calculate the expiration tick. */
1471 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1472 if (pu64Now)
1473 *pu64Now = u64Expire;
1474 u64Expire += cTicksToNext;
1475
1476 /* Update the timer. */
1477 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1478 TMTIMERSTATE enmState = pTimer->enmState;
1479 switch (enmState)
1480 {
1481 case TMTIMERSTATE_EXPIRED_DELIVER:
1482 case TMTIMERSTATE_STOPPED:
1483 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1484 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1485 else
1486 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1487 pTimer->u64Expire = u64Expire;
1488 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1489 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1490 rc = VINF_SUCCESS;
1491 break;
1492
1493 case TMTIMERSTATE_ACTIVE:
1494 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1495 tmTimerQueueUnlinkActive(pQueue, pTimer);
1496 pTimer->u64Expire = u64Expire;
1497 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1498 rc = VINF_SUCCESS;
1499 break;
1500
1501 case TMTIMERSTATE_PENDING_RESCHEDULE:
1502 case TMTIMERSTATE_PENDING_STOP:
1503 case TMTIMERSTATE_PENDING_SCHEDULE:
1504 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1505 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1506 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1507 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1508 case TMTIMERSTATE_DESTROY:
1509 case TMTIMERSTATE_FREE:
1510 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1511 rc = VERR_TM_INVALID_STATE;
1512 break;
1513
1514 default:
1515 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1516 rc = VERR_TM_UNKNOWN_STATE;
1517 break;
1518 }
1519
1520 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1521 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1522 return rc;
1523}
1524
1525
1526/**
1527 * Arm a timer with a expire time relative to the current time.
1528 *
1529 * @returns VBox status code.
1530 * @param pTimer Timer handle as returned by one of the create functions.
1531 * @param cTicksToNext Clock ticks until the next time expiration.
1532 * @param pu64Now Where to return the current time stamp used.
1533 * Optional.
1534 */
1535VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1536{
1537 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1538
1539 /* Treat virtual sync timers specially. */
1540 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1541 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1542
1543 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1544 TMTIMER_ASSERT_CRITSECT(pTimer);
1545
1546 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1547
1548#ifdef VBOX_WITH_STATISTICS
1549 /*
1550 * Gather optimization info.
1551 */
1552 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1553 TMTIMERSTATE enmOrgState = pTimer->enmState;
1554 switch (enmOrgState)
1555 {
1556 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1557 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1558 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1559 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1560 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1561 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1562 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1563 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1564 }
1565#endif
1566
1567 /*
1568 * Try to take the TM lock and optimize the common cases.
1569 *
1570 * With the TM lock we can safely make optimizations like immediate
1571 * scheduling and we can also be 100% sure that we're not racing the
1572 * running of the timer queues. As an additional restraint we require the
1573 * timer to have a critical section associated with to be 100% there aren't
1574 * concurrent operations on the timer. (This latter isn't necessary any
1575 * longer as this isn't supported for any timers, critsect or not.)
1576 *
1577 * Note! Lock ordering doesn't apply when we only tries to
1578 * get the innermost locks.
1579 */
1580 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1581#if 1
1582 if ( fOwnTMLock
1583 && pTimer->pCritSect)
1584 {
1585 TMTIMERSTATE enmState = pTimer->enmState;
1586 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1587 || enmState == TMTIMERSTATE_STOPPED)
1588 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1589 {
1590 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1591 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1592 return VINF_SUCCESS;
1593 }
1594
1595 /* Optimize other states when it becomes necessary. */
1596 }
1597#endif
1598
1599 /*
1600 * Unoptimized path.
1601 */
1602 int rc;
1603 TMCLOCK const enmClock = pTimer->enmClock;
1604 for (int cRetries = 1000; ; cRetries--)
1605 {
1606 /*
1607 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1608 */
1609 TMTIMERSTATE enmState = pTimer->enmState;
1610 switch (enmState)
1611 {
1612 case TMTIMERSTATE_STOPPED:
1613 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1614 {
1615 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1616 * Figure a safe way of activating this timer while the queue is
1617 * being run.
1618 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1619 * re-starting the timer in response to a initial_count write.) */
1620 }
1621 RT_FALL_THRU();
1622 case TMTIMERSTATE_EXPIRED_DELIVER:
1623 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1624 {
1625 Assert(!pTimer->offPrev);
1626 Assert(!pTimer->offNext);
1627 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1628 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1629 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1630 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1631 tmSchedule(pTimer);
1632 rc = VINF_SUCCESS;
1633 break;
1634 }
1635 rc = VERR_TRY_AGAIN;
1636 break;
1637
1638 case TMTIMERSTATE_PENDING_SCHEDULE:
1639 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1640 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1641 {
1642 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1643 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1644 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1645 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1646 tmSchedule(pTimer);
1647 rc = VINF_SUCCESS;
1648 break;
1649 }
1650 rc = VERR_TRY_AGAIN;
1651 break;
1652
1653
1654 case TMTIMERSTATE_ACTIVE:
1655 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1656 {
1657 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1658 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1659 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1660 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1661 tmSchedule(pTimer);
1662 rc = VINF_SUCCESS;
1663 break;
1664 }
1665 rc = VERR_TRY_AGAIN;
1666 break;
1667
1668 case TMTIMERSTATE_PENDING_RESCHEDULE:
1669 case TMTIMERSTATE_PENDING_STOP:
1670 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1671 {
1672 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1673 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1674 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1675 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1676 tmSchedule(pTimer);
1677 rc = VINF_SUCCESS;
1678 break;
1679 }
1680 rc = VERR_TRY_AGAIN;
1681 break;
1682
1683
1684 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1685 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1686 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1687#ifdef IN_RING3
1688 if (!RTThreadYield())
1689 RTThreadSleep(1);
1690#else
1691/** @todo call host context and yield after a couple of iterations */
1692#endif
1693 rc = VERR_TRY_AGAIN;
1694 break;
1695
1696 /*
1697 * Invalid states.
1698 */
1699 case TMTIMERSTATE_DESTROY:
1700 case TMTIMERSTATE_FREE:
1701 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1702 rc = VERR_TM_INVALID_STATE;
1703 break;
1704
1705 default:
1706 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1707 rc = VERR_TM_UNKNOWN_STATE;
1708 break;
1709 }
1710
1711 /* switch + loop is tedious to break out of. */
1712 if (rc == VINF_SUCCESS)
1713 break;
1714
1715 if (rc != VERR_TRY_AGAIN)
1716 {
1717 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1718 break;
1719 }
1720 if (cRetries <= 0)
1721 {
1722 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1723 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1724 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1725 break;
1726 }
1727
1728 /*
1729 * Retry to gain locks.
1730 */
1731 if (!fOwnTMLock)
1732 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1733
1734 } /* for (;;) */
1735
1736 /*
1737 * Clean up and return.
1738 */
1739 if (fOwnTMLock)
1740 TM_UNLOCK_TIMERS(pVM);
1741
1742 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1743 return rc;
1744}
1745
1746
1747/**
1748 * Drops a hint about the frequency of the timer.
1749 *
1750 * This is used by TM and the VMM to calculate how often guest execution needs
1751 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1752 *
1753 * @returns VBox status code.
1754 * @param pTimer Timer handle as returned by one of the create
1755 * functions.
1756 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1757 *
1758 * @remarks We're using an integer hertz value here since anything above 1 HZ
1759 * is not going to be any trouble satisfying scheduling wise. The
1760 * range where it makes sense is >= 100 HZ.
1761 */
1762VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1763{
1764 TMTIMER_ASSERT_CRITSECT(pTimer);
1765
1766 uint32_t const uHzOldHint = pTimer->uHzHint;
1767 pTimer->uHzHint = uHzHint;
1768
1769 PVM pVM = pTimer->CTX_SUFF(pVM);
1770 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1771 if ( uHzHint > uMaxHzHint
1772 || uHzOldHint >= uMaxHzHint)
1773 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1774
1775 return VINF_SUCCESS;
1776}
1777
1778
1779/**
1780 * TMTimerStop for the virtual sync timer queue.
1781 *
1782 * This employs a greatly simplified state machine by always acquiring the
1783 * queue lock and bypassing the scheduling list.
1784 *
1785 * @returns VBox status code
1786 * @param pVM The cross context VM structure.
1787 * @param pTimer The timer handle.
1788 */
1789static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1790{
1791 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1792 VM_ASSERT_EMT(pVM);
1793 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1794 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1795 AssertRCReturn(rc, rc);
1796
1797 /* Reset the HZ hint. */
1798 if (pTimer->uHzHint)
1799 {
1800 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1801 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1802 pTimer->uHzHint = 0;
1803 }
1804
1805 /* Update the timer state. */
1806 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1807 TMTIMERSTATE enmState = pTimer->enmState;
1808 switch (enmState)
1809 {
1810 case TMTIMERSTATE_ACTIVE:
1811 tmTimerQueueUnlinkActive(pQueue, pTimer);
1812 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1813 rc = VINF_SUCCESS;
1814 break;
1815
1816 case TMTIMERSTATE_EXPIRED_DELIVER:
1817 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1818 rc = VINF_SUCCESS;
1819 break;
1820
1821 case TMTIMERSTATE_STOPPED:
1822 rc = VINF_SUCCESS;
1823 break;
1824
1825 case TMTIMERSTATE_PENDING_RESCHEDULE:
1826 case TMTIMERSTATE_PENDING_STOP:
1827 case TMTIMERSTATE_PENDING_SCHEDULE:
1828 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1829 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1830 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1831 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1832 case TMTIMERSTATE_DESTROY:
1833 case TMTIMERSTATE_FREE:
1834 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1835 rc = VERR_TM_INVALID_STATE;
1836 break;
1837
1838 default:
1839 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1840 rc = VERR_TM_UNKNOWN_STATE;
1841 break;
1842 }
1843
1844 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1845 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1846 return rc;
1847}
1848
1849
1850/**
1851 * Stop the timer.
1852 * Use TMR3TimerArm() to "un-stop" the timer.
1853 *
1854 * @returns VBox status code.
1855 * @param pTimer Timer handle as returned by one of the create functions.
1856 */
1857VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1858{
1859 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1860
1861 /* Treat virtual sync timers specially. */
1862 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1863 return tmTimerVirtualSyncStop(pVM, pTimer);
1864
1865 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1866 TMTIMER_ASSERT_CRITSECT(pTimer);
1867
1868 /*
1869 * Reset the HZ hint.
1870 */
1871 if (pTimer->uHzHint)
1872 {
1873 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1874 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1875 pTimer->uHzHint = 0;
1876 }
1877
1878 /** @todo see if this function needs optimizing. */
1879 int cRetries = 1000;
1880 do
1881 {
1882 /*
1883 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1884 */
1885 TMTIMERSTATE enmState = pTimer->enmState;
1886 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1887 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1888 switch (enmState)
1889 {
1890 case TMTIMERSTATE_EXPIRED_DELIVER:
1891 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1892 return VERR_INVALID_PARAMETER;
1893
1894 case TMTIMERSTATE_STOPPED:
1895 case TMTIMERSTATE_PENDING_STOP:
1896 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1897 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1898 return VINF_SUCCESS;
1899
1900 case TMTIMERSTATE_PENDING_SCHEDULE:
1901 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1902 {
1903 tmSchedule(pTimer);
1904 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1905 return VINF_SUCCESS;
1906 }
1907 break;
1908
1909 case TMTIMERSTATE_PENDING_RESCHEDULE:
1910 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1911 {
1912 tmSchedule(pTimer);
1913 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1914 return VINF_SUCCESS;
1915 }
1916 break;
1917
1918 case TMTIMERSTATE_ACTIVE:
1919 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1920 {
1921 tmSchedule(pTimer);
1922 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1923 return VINF_SUCCESS;
1924 }
1925 break;
1926
1927 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1928 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1929 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1930#ifdef IN_RING3
1931 if (!RTThreadYield())
1932 RTThreadSleep(1);
1933#else
1934/** @todo call host and yield cpu after a while. */
1935#endif
1936 break;
1937
1938 /*
1939 * Invalid states.
1940 */
1941 case TMTIMERSTATE_DESTROY:
1942 case TMTIMERSTATE_FREE:
1943 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1944 return VERR_TM_INVALID_STATE;
1945 default:
1946 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1947 return VERR_TM_UNKNOWN_STATE;
1948 }
1949 } while (cRetries-- > 0);
1950
1951 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1952 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1953 return VERR_TM_TIMER_UNSTABLE_STATE;
1954}
1955
1956
1957/**
1958 * Get the current clock time.
1959 * Handy for calculating the new expire time.
1960 *
1961 * @returns Current clock time.
1962 * @param pTimer Timer handle as returned by one of the create functions.
1963 */
1964VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1965{
1966 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1967
1968 uint64_t u64;
1969 switch (pTimer->enmClock)
1970 {
1971 case TMCLOCK_VIRTUAL:
1972 u64 = TMVirtualGet(pVM);
1973 break;
1974 case TMCLOCK_VIRTUAL_SYNC:
1975 u64 = TMVirtualSyncGet(pVM);
1976 break;
1977 case TMCLOCK_REAL:
1978 u64 = TMRealGet(pVM);
1979 break;
1980 default:
1981 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1982 return UINT64_MAX;
1983 }
1984 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1985 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1986 return u64;
1987}
1988
1989
1990/**
1991 * Get the frequency of the timer clock.
1992 *
1993 * @returns Clock frequency (as Hz of course).
1994 * @param pTimer Timer handle as returned by one of the create functions.
1995 */
1996VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1997{
1998 switch (pTimer->enmClock)
1999 {
2000 case TMCLOCK_VIRTUAL:
2001 case TMCLOCK_VIRTUAL_SYNC:
2002 return TMCLOCK_FREQ_VIRTUAL;
2003
2004 case TMCLOCK_REAL:
2005 return TMCLOCK_FREQ_REAL;
2006
2007 default:
2008 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2009 return 0;
2010 }
2011}
2012
2013
2014/**
2015 * Get the expire time of the timer.
2016 * Only valid for active timers.
2017 *
2018 * @returns Expire time of the timer.
2019 * @param pTimer Timer handle as returned by one of the create functions.
2020 */
2021VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2022{
2023 TMTIMER_ASSERT_CRITSECT(pTimer);
2024 int cRetries = 1000;
2025 do
2026 {
2027 TMTIMERSTATE enmState = pTimer->enmState;
2028 switch (enmState)
2029 {
2030 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2031 case TMTIMERSTATE_EXPIRED_DELIVER:
2032 case TMTIMERSTATE_STOPPED:
2033 case TMTIMERSTATE_PENDING_STOP:
2034 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2035 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2036 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2037 return ~(uint64_t)0;
2038
2039 case TMTIMERSTATE_ACTIVE:
2040 case TMTIMERSTATE_PENDING_RESCHEDULE:
2041 case TMTIMERSTATE_PENDING_SCHEDULE:
2042 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2043 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2044 return pTimer->u64Expire;
2045
2046 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2047 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2048#ifdef IN_RING3
2049 if (!RTThreadYield())
2050 RTThreadSleep(1);
2051#endif
2052 break;
2053
2054 /*
2055 * Invalid states.
2056 */
2057 case TMTIMERSTATE_DESTROY:
2058 case TMTIMERSTATE_FREE:
2059 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2060 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2061 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2062 return ~(uint64_t)0;
2063 default:
2064 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2065 return ~(uint64_t)0;
2066 }
2067 } while (cRetries-- > 0);
2068
2069 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2070 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2071 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2072 return ~(uint64_t)0;
2073}
2074
2075
2076/**
2077 * Checks if a timer is active or not.
2078 *
2079 * @returns True if active.
2080 * @returns False if not active.
2081 * @param pTimer Timer handle as returned by one of the create functions.
2082 */
2083VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2084{
2085 TMTIMERSTATE enmState = pTimer->enmState;
2086 switch (enmState)
2087 {
2088 case TMTIMERSTATE_STOPPED:
2089 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2090 case TMTIMERSTATE_EXPIRED_DELIVER:
2091 case TMTIMERSTATE_PENDING_STOP:
2092 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2093 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2094 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2095 return false;
2096
2097 case TMTIMERSTATE_ACTIVE:
2098 case TMTIMERSTATE_PENDING_RESCHEDULE:
2099 case TMTIMERSTATE_PENDING_SCHEDULE:
2100 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2101 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2102 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2103 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2104 return true;
2105
2106 /*
2107 * Invalid states.
2108 */
2109 case TMTIMERSTATE_DESTROY:
2110 case TMTIMERSTATE_FREE:
2111 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2112 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2113 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2114 return false;
2115 default:
2116 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2117 return false;
2118 }
2119}
2120
2121
2122/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2123
2124
2125/**
2126 * Arm a timer with a (new) expire time relative to current time.
2127 *
2128 * @returns VBox status code.
2129 * @param pTimer Timer handle as returned by one of the create functions.
2130 * @param cMilliesToNext Number of milliseconds to the next tick.
2131 */
2132VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2133{
2134 switch (pTimer->enmClock)
2135 {
2136 case TMCLOCK_VIRTUAL:
2137 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2138 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2139
2140 case TMCLOCK_VIRTUAL_SYNC:
2141 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2142 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2143
2144 case TMCLOCK_REAL:
2145 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2146 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2147
2148 default:
2149 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2150 return VERR_TM_TIMER_BAD_CLOCK;
2151 }
2152}
2153
2154
2155/**
2156 * Arm a timer with a (new) expire time relative to current time.
2157 *
2158 * @returns VBox status code.
2159 * @param pTimer Timer handle as returned by one of the create functions.
2160 * @param cMicrosToNext Number of microseconds to the next tick.
2161 */
2162VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2163{
2164 switch (pTimer->enmClock)
2165 {
2166 case TMCLOCK_VIRTUAL:
2167 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2168 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2169
2170 case TMCLOCK_VIRTUAL_SYNC:
2171 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2172 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2173
2174 case TMCLOCK_REAL:
2175 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2176 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2177
2178 default:
2179 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2180 return VERR_TM_TIMER_BAD_CLOCK;
2181 }
2182}
2183
2184
2185/**
2186 * Arm a timer with a (new) expire time relative to current time.
2187 *
2188 * @returns VBox status code.
2189 * @param pTimer Timer handle as returned by one of the create functions.
2190 * @param cNanosToNext Number of nanoseconds to the next tick.
2191 */
2192VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2193{
2194 switch (pTimer->enmClock)
2195 {
2196 case TMCLOCK_VIRTUAL:
2197 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2198 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2199
2200 case TMCLOCK_VIRTUAL_SYNC:
2201 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2202 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2203
2204 case TMCLOCK_REAL:
2205 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2206 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2207
2208 default:
2209 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2210 return VERR_TM_TIMER_BAD_CLOCK;
2211 }
2212}
2213
2214
2215/**
2216 * Get the current clock time as nanoseconds.
2217 *
2218 * @returns The timer clock as nanoseconds.
2219 * @param pTimer Timer handle as returned by one of the create functions.
2220 */
2221VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2222{
2223 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2224}
2225
2226
2227/**
2228 * Get the current clock time as microseconds.
2229 *
2230 * @returns The timer clock as microseconds.
2231 * @param pTimer Timer handle as returned by one of the create functions.
2232 */
2233VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2234{
2235 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2236}
2237
2238
2239/**
2240 * Get the current clock time as milliseconds.
2241 *
2242 * @returns The timer clock as milliseconds.
2243 * @param pTimer Timer handle as returned by one of the create functions.
2244 */
2245VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2246{
2247 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2248}
2249
2250
2251/**
2252 * Converts the specified timer clock time to nanoseconds.
2253 *
2254 * @returns nanoseconds.
2255 * @param pTimer Timer handle as returned by one of the create functions.
2256 * @param u64Ticks The clock ticks.
2257 * @remark There could be rounding errors here. We just do a simple integer divide
2258 * without any adjustments.
2259 */
2260VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2261{
2262 switch (pTimer->enmClock)
2263 {
2264 case TMCLOCK_VIRTUAL:
2265 case TMCLOCK_VIRTUAL_SYNC:
2266 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2267 return u64Ticks;
2268
2269 case TMCLOCK_REAL:
2270 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2271 return u64Ticks * 1000000;
2272
2273 default:
2274 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2275 return 0;
2276 }
2277}
2278
2279
2280/**
2281 * Converts the specified timer clock time to microseconds.
2282 *
2283 * @returns microseconds.
2284 * @param pTimer Timer handle as returned by one of the create functions.
2285 * @param u64Ticks The clock ticks.
2286 * @remark There could be rounding errors here. We just do a simple integer divide
2287 * without any adjustments.
2288 */
2289VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2290{
2291 switch (pTimer->enmClock)
2292 {
2293 case TMCLOCK_VIRTUAL:
2294 case TMCLOCK_VIRTUAL_SYNC:
2295 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2296 return u64Ticks / 1000;
2297
2298 case TMCLOCK_REAL:
2299 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2300 return u64Ticks * 1000;
2301
2302 default:
2303 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2304 return 0;
2305 }
2306}
2307
2308
2309/**
2310 * Converts the specified timer clock time to milliseconds.
2311 *
2312 * @returns milliseconds.
2313 * @param pTimer Timer handle as returned by one of the create functions.
2314 * @param u64Ticks The clock ticks.
2315 * @remark There could be rounding errors here. We just do a simple integer divide
2316 * without any adjustments.
2317 */
2318VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2319{
2320 switch (pTimer->enmClock)
2321 {
2322 case TMCLOCK_VIRTUAL:
2323 case TMCLOCK_VIRTUAL_SYNC:
2324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2325 return u64Ticks / 1000000;
2326
2327 case TMCLOCK_REAL:
2328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2329 return u64Ticks;
2330
2331 default:
2332 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2333 return 0;
2334 }
2335}
2336
2337
2338/**
2339 * Converts the specified nanosecond timestamp to timer clock ticks.
2340 *
2341 * @returns timer clock ticks.
2342 * @param pTimer Timer handle as returned by one of the create functions.
2343 * @param cNanoSecs The nanosecond value ticks to convert.
2344 * @remark There could be rounding and overflow errors here.
2345 */
2346VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2347{
2348 switch (pTimer->enmClock)
2349 {
2350 case TMCLOCK_VIRTUAL:
2351 case TMCLOCK_VIRTUAL_SYNC:
2352 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2353 return cNanoSecs;
2354
2355 case TMCLOCK_REAL:
2356 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2357 return cNanoSecs / 1000000;
2358
2359 default:
2360 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2361 return 0;
2362 }
2363}
2364
2365
2366/**
2367 * Converts the specified microsecond timestamp to timer clock ticks.
2368 *
2369 * @returns timer clock ticks.
2370 * @param pTimer Timer handle as returned by one of the create functions.
2371 * @param cMicroSecs The microsecond value ticks to convert.
2372 * @remark There could be rounding and overflow errors here.
2373 */
2374VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2375{
2376 switch (pTimer->enmClock)
2377 {
2378 case TMCLOCK_VIRTUAL:
2379 case TMCLOCK_VIRTUAL_SYNC:
2380 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2381 return cMicroSecs * 1000;
2382
2383 case TMCLOCK_REAL:
2384 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2385 return cMicroSecs / 1000;
2386
2387 default:
2388 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2389 return 0;
2390 }
2391}
2392
2393
2394/**
2395 * Converts the specified millisecond timestamp to timer clock ticks.
2396 *
2397 * @returns timer clock ticks.
2398 * @param pTimer Timer handle as returned by one of the create functions.
2399 * @param cMilliSecs The millisecond value ticks to convert.
2400 * @remark There could be rounding and overflow errors here.
2401 */
2402VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2403{
2404 switch (pTimer->enmClock)
2405 {
2406 case TMCLOCK_VIRTUAL:
2407 case TMCLOCK_VIRTUAL_SYNC:
2408 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2409 return cMilliSecs * 1000000;
2410
2411 case TMCLOCK_REAL:
2412 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2413 return cMilliSecs;
2414
2415 default:
2416 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2417 return 0;
2418 }
2419}
2420
2421
2422/**
2423 * Convert state to string.
2424 *
2425 * @returns Readonly status name.
2426 * @param enmState State.
2427 */
2428const char *tmTimerState(TMTIMERSTATE enmState)
2429{
2430 switch (enmState)
2431 {
2432#define CASE(num, state) \
2433 case TMTIMERSTATE_##state: \
2434 AssertCompile(TMTIMERSTATE_##state == (num)); \
2435 return #num "-" #state
2436 CASE( 1,STOPPED);
2437 CASE( 2,ACTIVE);
2438 CASE( 3,EXPIRED_GET_UNLINK);
2439 CASE( 4,EXPIRED_DELIVER);
2440 CASE( 5,PENDING_STOP);
2441 CASE( 6,PENDING_STOP_SCHEDULE);
2442 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2443 CASE( 8,PENDING_SCHEDULE);
2444 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2445 CASE(10,PENDING_RESCHEDULE);
2446 CASE(11,DESTROY);
2447 CASE(12,FREE);
2448 default:
2449 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2450 return "Invalid state!";
2451#undef CASE
2452 }
2453}
2454
2455
2456/**
2457 * Gets the highest frequency hint for all the important timers.
2458 *
2459 * @returns The highest frequency. 0 if no timers care.
2460 * @param pVM The cross context VM structure.
2461 */
2462static uint32_t tmGetFrequencyHint(PVM pVM)
2463{
2464 /*
2465 * Query the value, recalculate it if necessary.
2466 *
2467 * The "right" highest frequency value isn't so important that we'll block
2468 * waiting on the timer semaphore.
2469 */
2470 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2471 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2472 {
2473 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2474 {
2475 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2476
2477 /*
2478 * Loop over the timers associated with each clock.
2479 */
2480 uMaxHzHint = 0;
2481 for (int i = 0; i < TMCLOCK_MAX; i++)
2482 {
2483 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2484 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2485 {
2486 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2487 if (uHzHint > uMaxHzHint)
2488 {
2489 switch (pCur->enmState)
2490 {
2491 case TMTIMERSTATE_ACTIVE:
2492 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2493 case TMTIMERSTATE_EXPIRED_DELIVER:
2494 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2495 case TMTIMERSTATE_PENDING_SCHEDULE:
2496 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2497 case TMTIMERSTATE_PENDING_RESCHEDULE:
2498 uMaxHzHint = uHzHint;
2499 break;
2500
2501 case TMTIMERSTATE_STOPPED:
2502 case TMTIMERSTATE_PENDING_STOP:
2503 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2504 case TMTIMERSTATE_DESTROY:
2505 case TMTIMERSTATE_FREE:
2506 break;
2507 /* no default, want gcc warnings when adding more states. */
2508 }
2509 }
2510 }
2511 }
2512 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2513 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2514 TM_UNLOCK_TIMERS(pVM);
2515 }
2516 }
2517 return uMaxHzHint;
2518}
2519
2520
2521/**
2522 * Calculates a host timer frequency that would be suitable for the current
2523 * timer load.
2524 *
2525 * This will take the highest timer frequency, adjust for catch-up and warp
2526 * driver, and finally add a little fudge factor. The caller (VMM) will use
2527 * the result to adjust the per-cpu preemption timer.
2528 *
2529 * @returns The highest frequency. 0 if no important timers around.
2530 * @param pVM The cross context VM structure.
2531 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2532 */
2533VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2534{
2535 uint32_t uHz = tmGetFrequencyHint(pVM);
2536
2537 /* Catch up, we have to be more aggressive than the % indicates at the
2538 beginning of the effort. */
2539 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2540 {
2541 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2542 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2543 {
2544 if (u32Pct <= 100)
2545 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2546 else if (u32Pct <= 200)
2547 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2548 else if (u32Pct <= 400)
2549 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2550 uHz *= u32Pct + 100;
2551 uHz /= 100;
2552 }
2553 }
2554
2555 /* Warp drive. */
2556 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2557 {
2558 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2559 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2560 {
2561 uHz *= u32Pct;
2562 uHz /= 100;
2563 }
2564 }
2565
2566 /* Fudge factor. */
2567 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2568 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2569 else
2570 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2571 uHz /= 100;
2572
2573 /* Make sure it isn't too high. */
2574 if (uHz > pVM->tm.s.cHostHzMax)
2575 uHz = pVM->tm.s.cHostHzMax;
2576
2577 return uHz;
2578}
2579
2580
2581/**
2582 * Whether the guest virtual clock is ticking.
2583 *
2584 * @returns true if ticking, false otherwise.
2585 * @param pVM The cross context VM structure.
2586 */
2587VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2588{
2589 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2590}
2591
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette