VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 80268

Last change on this file since 80268 was 80268, checked in by vboxsync, 5 years ago

VMM: Refactoring VMMAll/* to use VMCC & VMMCPUCC. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 92.1 KB
Line 
1/* $Id: TMAll.cpp 80268 2019-08-14 11:25:13Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_TM
24#ifdef DEBUG_bird
25# define DBGFTRACE_DISABLED /* annoying */
26#endif
27#include <VBox/vmm/tm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/dbgftrace.h>
30#ifdef IN_RING3
31# ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33# endif
34#endif
35#include "TMInternal.h"
36#include <VBox/vmm/vmcc.h>
37
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <VBox/log.h>
41#include <VBox/sup.h>
42#include <iprt/time.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/asm-math.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50#include "TMInline.h"
51
52
53/*********************************************************************************************************************************
54* Defined Constants And Macros *
55*********************************************************************************************************************************/
56/** @def TMTIMER_ASSERT_CRITSECT
57 * Checks that the caller owns the critical section if one is associated with
58 * the timer. */
59#ifdef VBOX_STRICT
60# define TMTIMER_ASSERT_CRITSECT(pTimer) \
61 do { \
62 if ((pTimer)->pCritSect) \
63 { \
64 VMSTATE enmState; \
65 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
66 AssertMsg( pCritSect \
67 && ( PDMCritSectIsOwner(pCritSect) \
68 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
69 || enmState == VMSTATE_RESETTING \
70 || enmState == VMSTATE_RESETTING_LS ),\
71 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
72 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
73 } \
74 } while (0)
75#else
76# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
77#endif
78
79/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
80 * Checks for lock order trouble between the timer critsect and the critical
81 * section critsect. The virtual sync critsect must always be entered before
82 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
83 * isn't any critical section associated with the timer or if the calling thread
84 * doesn't own it, ASSUMING of course that the thread using this macro is going
85 * to enter the virtual sync critical section anyway.
86 *
87 * @remarks This is a sligtly relaxed timer locking attitude compared to
88 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
89 * should know what it's doing if it's stopping or starting a timer
90 * without taking the device lock.
91 */
92#ifdef VBOX_STRICT
93# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
94 do { \
95 if ((pTimer)->pCritSect) \
96 { \
97 VMSTATE enmState; \
98 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVM, (pTimer)->pCritSect); \
99 AssertMsg( pCritSect \
100 && ( !PDMCritSectIsOwner(pCritSect) \
101 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
102 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
103 || enmState == VMSTATE_RESETTING \
104 || enmState == VMSTATE_RESETTING_LS ),\
105 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
106 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
107 } \
108 } while (0)
109#else
110# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
111#endif
112
113
114/**
115 * Notification that execution is about to start.
116 *
117 * This call must always be paired with a TMNotifyEndOfExecution call.
118 *
119 * The function may, depending on the configuration, resume the TSC and future
120 * clocks that only ticks when we're executing guest code.
121 *
122 * @param pVM The cross context VM structure.
123 * @param pVCpu The cross context virtual CPU structure.
124 */
125VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
126{
127#ifndef VBOX_WITHOUT_NS_ACCOUNTING
128 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
129#endif
130 if (pVM->tm.s.fTSCTiedToExecution)
131 tmCpuTickResume(pVM, pVCpu);
132}
133
134
135/**
136 * Notification that execution has ended.
137 *
138 * This call must always be paired with a TMNotifyStartOfExecution call.
139 *
140 * The function may, depending on the configuration, suspend the TSC and future
141 * clocks that only ticks when we're executing guest code.
142 *
143 * @param pVM The cross context VM structure.
144 * @param pVCpu The cross context virtual CPU structure.
145 */
146VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
147{
148 if (pVM->tm.s.fTSCTiedToExecution)
149 tmCpuTickPause(pVCpu);
150
151#ifndef VBOX_WITHOUT_NS_ACCOUNTING
152 uint64_t const u64NsTs = RTTimeNanoTS();
153 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
154 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
155 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
156 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
157
158# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
159 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
160 if (cNsExecutingDelta < 5000)
161 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
162 else if (cNsExecutingDelta < 50000)
163 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
164 else
165 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
166 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
167 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
168 if (cNsOtherNewDelta > 0)
169 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
170# endif
171
172 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
173 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
174 pVCpu->tm.s.cNsTotal = cNsTotalNew;
175 pVCpu->tm.s.cNsOther = cNsOtherNew;
176 pVCpu->tm.s.cPeriodsExecuting++;
177 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
178#endif
179}
180
181
182/**
183 * Notification that the cpu is entering the halt state
184 *
185 * This call must always be paired with a TMNotifyEndOfExecution call.
186 *
187 * The function may, depending on the configuration, resume the TSC and future
188 * clocks that only ticks when we're halted.
189 *
190 * @param pVCpu The cross context virtual CPU structure.
191 */
192VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
193{
194 PVM pVM = pVCpu->CTX_SUFF(pVM);
195
196#ifndef VBOX_WITHOUT_NS_ACCOUNTING
197 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
198#endif
199
200 if ( pVM->tm.s.fTSCTiedToExecution
201 && !pVM->tm.s.fTSCNotTiedToHalt)
202 tmCpuTickResume(pVM, pVCpu);
203}
204
205
206/**
207 * Notification that the cpu is leaving the halt state
208 *
209 * This call must always be paired with a TMNotifyStartOfHalt call.
210 *
211 * The function may, depending on the configuration, suspend the TSC and future
212 * clocks that only ticks when we're halted.
213 *
214 * @param pVCpu The cross context virtual CPU structure.
215 */
216VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
217{
218 PVM pVM = pVCpu->CTX_SUFF(pVM);
219
220 if ( pVM->tm.s.fTSCTiedToExecution
221 && !pVM->tm.s.fTSCNotTiedToHalt)
222 tmCpuTickPause(pVCpu);
223
224#ifndef VBOX_WITHOUT_NS_ACCOUNTING
225 uint64_t const u64NsTs = RTTimeNanoTS();
226 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
227 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
228 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
229 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
230
231# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
232 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
233 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
234 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
235 if (cNsOtherNewDelta > 0)
236 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
237# endif
238
239 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
240 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
241 pVCpu->tm.s.cNsTotal = cNsTotalNew;
242 pVCpu->tm.s.cNsOther = cNsOtherNew;
243 pVCpu->tm.s.cPeriodsHalted++;
244 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
245#endif
246}
247
248
249/**
250 * Raise the timer force action flag and notify the dedicated timer EMT.
251 *
252 * @param pVM The cross context VM structure.
253 */
254DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
255{
256 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
257 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
258 {
259 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
260 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
261#ifdef IN_RING3
262# ifdef VBOX_WITH_REM
263 REMR3NotifyTimerPending(pVM, pVCpuDst);
264# endif
265 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
266#endif
267 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
268 }
269}
270
271
272/**
273 * Schedule the queue which was changed.
274 */
275DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
276{
277 PVMCC pVM = pTimer->CTX_SUFF(pVM);
278 if ( VM_IS_EMT(pVM)
279 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
280 {
281 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
282 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
283 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
284#ifdef VBOX_STRICT
285 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
286#endif
287 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
288 TM_UNLOCK_TIMERS(pVM);
289 }
290 else
291 {
292 TMTIMERSTATE enmState = pTimer->enmState;
293 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
294 tmScheduleNotify(pVM);
295 }
296}
297
298
299/**
300 * Try change the state to enmStateNew from enmStateOld
301 * and link the timer into the scheduling queue.
302 *
303 * @returns Success indicator.
304 * @param pTimer Timer in question.
305 * @param enmStateNew The new timer state.
306 * @param enmStateOld The old timer state.
307 */
308DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
309{
310 /*
311 * Attempt state change.
312 */
313 bool fRc;
314 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
315 return fRc;
316}
317
318
319/**
320 * Links the timer onto the scheduling queue.
321 *
322 * @param pQueue The timer queue the timer belongs to.
323 * @param pTimer The timer.
324 *
325 * @todo FIXME: Look into potential race with the thread running the queues
326 * and stuff.
327 */
328DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
329{
330 Assert(!pTimer->offScheduleNext);
331 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
332 int32_t offHead;
333 do
334 {
335 offHead = pQueue->offSchedule;
336 if (offHead)
337 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
338 else
339 pTimer->offScheduleNext = 0;
340 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
341}
342
343
344/**
345 * Try change the state to enmStateNew from enmStateOld
346 * and link the timer into the scheduling queue.
347 *
348 * @returns Success indicator.
349 * @param pTimer Timer in question.
350 * @param enmStateNew The new timer state.
351 * @param enmStateOld The old timer state.
352 */
353DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
354{
355 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
356 {
357 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
358 return true;
359 }
360 return false;
361}
362
363
364/**
365 * Links a timer into the active list of a timer queue.
366 *
367 * @param pQueue The queue.
368 * @param pTimer The timer.
369 * @param u64Expire The timer expiration time.
370 *
371 * @remarks Called while owning the relevant queue lock.
372 */
373DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
374{
375 Assert(!pTimer->offNext);
376 Assert(!pTimer->offPrev);
377 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
378
379 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
380 if (pCur)
381 {
382 for (;; pCur = TMTIMER_GET_NEXT(pCur))
383 {
384 if (pCur->u64Expire > u64Expire)
385 {
386 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
387 TMTIMER_SET_NEXT(pTimer, pCur);
388 TMTIMER_SET_PREV(pTimer, pPrev);
389 if (pPrev)
390 TMTIMER_SET_NEXT(pPrev, pTimer);
391 else
392 {
393 TMTIMER_SET_HEAD(pQueue, pTimer);
394 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
395 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
396 }
397 TMTIMER_SET_PREV(pCur, pTimer);
398 return;
399 }
400 if (!pCur->offNext)
401 {
402 TMTIMER_SET_NEXT(pCur, pTimer);
403 TMTIMER_SET_PREV(pTimer, pCur);
404 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
405 return;
406 }
407 }
408 }
409 else
410 {
411 TMTIMER_SET_HEAD(pQueue, pTimer);
412 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
413 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
414 }
415}
416
417
418
419/**
420 * Schedules the given timer on the given queue.
421 *
422 * @param pQueue The timer queue.
423 * @param pTimer The timer that needs scheduling.
424 *
425 * @remarks Called while owning the lock.
426 */
427DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
428{
429 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
430
431 /*
432 * Processing.
433 */
434 unsigned cRetries = 2;
435 do
436 {
437 TMTIMERSTATE enmState = pTimer->enmState;
438 switch (enmState)
439 {
440 /*
441 * Reschedule timer (in the active list).
442 */
443 case TMTIMERSTATE_PENDING_RESCHEDULE:
444 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
445 break; /* retry */
446 tmTimerQueueUnlinkActive(pQueue, pTimer);
447 RT_FALL_THRU();
448
449 /*
450 * Schedule timer (insert into the active list).
451 */
452 case TMTIMERSTATE_PENDING_SCHEDULE:
453 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
454 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
455 break; /* retry */
456 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
457 return;
458
459 /*
460 * Stop the timer in active list.
461 */
462 case TMTIMERSTATE_PENDING_STOP:
463 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
464 break; /* retry */
465 tmTimerQueueUnlinkActive(pQueue, pTimer);
466 RT_FALL_THRU();
467
468 /*
469 * Stop the timer (not on the active list).
470 */
471 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
472 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
473 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
474 break;
475 return;
476
477 /*
478 * The timer is pending destruction by TMR3TimerDestroy, our caller.
479 * Nothing to do here.
480 */
481 case TMTIMERSTATE_DESTROY:
482 break;
483
484 /*
485 * Postpone these until they get into the right state.
486 */
487 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
488 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
489 tmTimerLinkSchedule(pQueue, pTimer);
490 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
491 return;
492
493 /*
494 * None of these can be in the schedule.
495 */
496 case TMTIMERSTATE_FREE:
497 case TMTIMERSTATE_STOPPED:
498 case TMTIMERSTATE_ACTIVE:
499 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
500 case TMTIMERSTATE_EXPIRED_DELIVER:
501 default:
502 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
503 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
504 return;
505 }
506 } while (cRetries-- > 0);
507}
508
509
510/**
511 * Schedules the specified timer queue.
512 *
513 * @param pVM The cross context VM structure.
514 * @param pQueue The queue to schedule.
515 *
516 * @remarks Called while owning the lock.
517 */
518void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
519{
520 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
521 NOREF(pVM);
522
523 /*
524 * Dequeue the scheduling list and iterate it.
525 */
526 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
527 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
528 if (!offNext)
529 return;
530 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
531 while (pNext)
532 {
533 /*
534 * Unlink the head timer and find the next one.
535 */
536 PTMTIMER pTimer = pNext;
537 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
538 pTimer->offScheduleNext = 0;
539
540 /*
541 * Do the scheduling.
542 */
543 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
544 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
545 tmTimerQueueScheduleOne(pQueue, pTimer);
546 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
547 } /* foreach timer in current schedule batch. */
548 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
549}
550
551
552#ifdef VBOX_STRICT
553/**
554 * Checks that the timer queues are sane.
555 *
556 * @param pVM The cross context VM structure.
557 * @param pszWhere Caller location clue.
558 *
559 * @remarks Called while owning the lock.
560 */
561void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
562{
563 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
564
565 /*
566 * Check the linking of the active lists.
567 */
568 bool fHaveVirtualSyncLock = false;
569 for (int i = 0; i < TMCLOCK_MAX; i++)
570 {
571 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
572 Assert((int)pQueue->enmClock == i);
573 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
574 {
575 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
576 continue;
577 fHaveVirtualSyncLock = true;
578 }
579 PTMTIMER pPrev = NULL;
580 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
581 {
582 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
583 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
584 TMTIMERSTATE enmState = pCur->enmState;
585 switch (enmState)
586 {
587 case TMTIMERSTATE_ACTIVE:
588 AssertMsg( !pCur->offScheduleNext
589 || pCur->enmState != TMTIMERSTATE_ACTIVE,
590 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
591 break;
592 case TMTIMERSTATE_PENDING_STOP:
593 case TMTIMERSTATE_PENDING_RESCHEDULE:
594 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
595 break;
596 default:
597 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
598 break;
599 }
600 }
601 }
602
603
604# ifdef IN_RING3
605 /*
606 * Do the big list and check that active timers all are in the active lists.
607 */
608 PTMTIMERR3 pPrev = NULL;
609 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
610 {
611 Assert(pCur->pBigPrev == pPrev);
612 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
613
614 TMTIMERSTATE enmState = pCur->enmState;
615 switch (enmState)
616 {
617 case TMTIMERSTATE_ACTIVE:
618 case TMTIMERSTATE_PENDING_STOP:
619 case TMTIMERSTATE_PENDING_RESCHEDULE:
620 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
621 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
622 {
623 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
624 Assert(pCur->offPrev || pCur == pCurAct);
625 while (pCurAct && pCurAct != pCur)
626 pCurAct = TMTIMER_GET_NEXT(pCurAct);
627 Assert(pCurAct == pCur);
628 }
629 break;
630
631 case TMTIMERSTATE_PENDING_SCHEDULE:
632 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
633 case TMTIMERSTATE_STOPPED:
634 case TMTIMERSTATE_EXPIRED_DELIVER:
635 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
636 {
637 Assert(!pCur->offNext);
638 Assert(!pCur->offPrev);
639 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
640 pCurAct;
641 pCurAct = TMTIMER_GET_NEXT(pCurAct))
642 {
643 Assert(pCurAct != pCur);
644 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
645 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
646 }
647 }
648 break;
649
650 /* ignore */
651 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
652 break;
653
654 /* shouldn't get here! */
655 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
656 case TMTIMERSTATE_DESTROY:
657 default:
658 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
659 break;
660 }
661 }
662# endif /* IN_RING3 */
663
664 if (fHaveVirtualSyncLock)
665 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
666}
667#endif /* !VBOX_STRICT */
668
669#ifdef VBOX_HIGH_RES_TIMERS_HACK
670
671/**
672 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
673 * EMT is polling.
674 *
675 * @returns See tmTimerPollInternal.
676 * @param pVM The cross context VM structure.
677 * @param u64Now Current virtual clock timestamp.
678 * @param u64Delta The delta to the next even in ticks of the
679 * virtual clock.
680 * @param pu64Delta Where to return the delta.
681 */
682DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
683{
684 Assert(!(u64Delta & RT_BIT_64(63)));
685
686 if (!pVM->tm.s.fVirtualWarpDrive)
687 {
688 *pu64Delta = u64Delta;
689 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
690 }
691
692 /*
693 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
694 */
695 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
696 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
697
698 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
699 u64GipTime -= u64Start; /* the start is GIP time. */
700 if (u64GipTime >= u64Delta)
701 {
702 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
703 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
704 }
705 else
706 {
707 u64Delta -= u64GipTime;
708 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
709 u64Delta += u64GipTime;
710 }
711 *pu64Delta = u64Delta;
712 u64GipTime += u64Start;
713 return u64GipTime;
714}
715
716
717/**
718 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
719 * than the one dedicated to timer work.
720 *
721 * @returns See tmTimerPollInternal.
722 * @param pVM The cross context VM structure.
723 * @param u64Now Current virtual clock timestamp.
724 * @param pu64Delta Where to return the delta.
725 */
726DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
727{
728 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
729 *pu64Delta = s_u64OtherRet;
730 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
731}
732
733
734/**
735 * Worker for tmTimerPollInternal.
736 *
737 * @returns See tmTimerPollInternal.
738 * @param pVM The cross context VM structure.
739 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
740 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
741 * timer EMT.
742 * @param u64Now Current virtual clock timestamp.
743 * @param pu64Delta Where to return the delta.
744 * @param pCounter The statistics counter to update.
745 */
746DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
747 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
748{
749 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
750 if (pVCpuDst != pVCpu)
751 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
752 *pu64Delta = 0;
753 return 0;
754}
755
756/**
757 * Common worker for TMTimerPollGIP and TMTimerPoll.
758 *
759 * This function is called before FFs are checked in the inner execution EM loops.
760 *
761 * @returns The GIP timestamp of the next event.
762 * 0 if the next event has already expired.
763 *
764 * @param pVM The cross context VM structure.
765 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
766 * @param pu64Delta Where to store the delta.
767 *
768 * @thread The emulation thread.
769 *
770 * @remarks GIP uses ns ticks.
771 */
772DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
773{
774 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
775 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
776 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
777
778 /*
779 * Return straight away if the timer FF is already set ...
780 */
781 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
782 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
783
784 /*
785 * ... or if timers are being run.
786 */
787 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
788 {
789 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
790 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
791 }
792
793 /*
794 * Check for TMCLOCK_VIRTUAL expiration.
795 */
796 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
797 const int64_t i64Delta1 = u64Expire1 - u64Now;
798 if (i64Delta1 <= 0)
799 {
800 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
801 {
802 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
803 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
804#if defined(IN_RING3) && defined(VBOX_WITH_REM)
805 REMR3NotifyTimerPending(pVM, pVCpuDst);
806#endif
807 }
808 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
809 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
810 }
811
812 /*
813 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
814 * This isn't quite as straight forward if in a catch-up, not only do
815 * we have to adjust the 'now' but when have to adjust the delta as well.
816 */
817
818 /*
819 * Optimistic lockless approach.
820 */
821 uint64_t u64VirtualSyncNow;
822 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
823 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
824 {
825 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
826 {
827 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
828 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
829 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
830 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
831 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
832 {
833 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
834 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
835 if (i64Delta2 > 0)
836 {
837 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
838 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
839
840 if (pVCpu == pVCpuDst)
841 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
842 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
843 }
844
845 if ( !pVM->tm.s.fRunningQueues
846 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
847 {
848 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
849 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
850#if defined(IN_RING3) && defined(VBOX_WITH_REM)
851 REMR3NotifyTimerPending(pVM, pVCpuDst);
852#endif
853 }
854
855 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
856 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
857 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
858 }
859 }
860 }
861 else
862 {
863 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
864 LogFlow(("TMTimerPoll: stopped\n"));
865 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
866 }
867
868 /*
869 * Complicated lockless approach.
870 */
871 uint64_t off;
872 uint32_t u32Pct = 0;
873 bool fCatchUp;
874 int cOuterTries = 42;
875 for (;; cOuterTries--)
876 {
877 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
878 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
879 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
880 if (fCatchUp)
881 {
882 /* No changes allowed, try get a consistent set of parameters. */
883 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
884 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
885 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
886 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
887 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
888 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
889 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
890 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
891 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
892 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
893 || cOuterTries <= 0)
894 {
895 uint64_t u64Delta = u64Now - u64Prev;
896 if (RT_LIKELY(!(u64Delta >> 32)))
897 {
898 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
899 if (off > u64Sub + offGivenUp)
900 off -= u64Sub;
901 else /* we've completely caught up. */
902 off = offGivenUp;
903 }
904 else
905 /* More than 4 seconds since last time (or negative), ignore it. */
906 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
907
908 /* Check that we're still running and in catch up. */
909 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
910 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
911 break;
912 }
913 }
914 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
915 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
916 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
917 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
918 break; /* Got an consistent offset */
919
920 /* Repeat the initial checks before iterating. */
921 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
922 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
923 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
924 {
925 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
926 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
927 }
928 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
929 {
930 LogFlow(("TMTimerPoll: stopped\n"));
931 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
932 }
933 if (cOuterTries <= 0)
934 break; /* that's enough */
935 }
936 if (cOuterTries <= 0)
937 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
938 u64VirtualSyncNow = u64Now - off;
939
940 /* Calc delta and see if we've got a virtual sync hit. */
941 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
942 if (i64Delta2 <= 0)
943 {
944 if ( !pVM->tm.s.fRunningQueues
945 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
946 {
947 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
948 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
949#if defined(IN_RING3) && defined(VBOX_WITH_REM)
950 REMR3NotifyTimerPending(pVM, pVCpuDst);
951#endif
952 }
953 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
954 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
955 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
956 }
957
958 /*
959 * Return the time left to the next event.
960 */
961 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
962 if (pVCpu == pVCpuDst)
963 {
964 if (fCatchUp)
965 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
966 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
967 }
968 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
969}
970
971
972/**
973 * Set FF if we've passed the next virtual event.
974 *
975 * This function is called before FFs are checked in the inner execution EM loops.
976 *
977 * @returns true if timers are pending, false if not.
978 *
979 * @param pVM The cross context VM structure.
980 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
981 * @thread The emulation thread.
982 */
983VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
984{
985 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
986 uint64_t off = 0;
987 tmTimerPollInternal(pVM, pVCpu, &off);
988 return off == 0;
989}
990
991
992/**
993 * Set FF if we've passed the next virtual event.
994 *
995 * This function is called before FFs are checked in the inner execution EM loops.
996 *
997 * @param pVM The cross context VM structure.
998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
999 * @thread The emulation thread.
1000 */
1001VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1002{
1003 uint64_t off;
1004 tmTimerPollInternal(pVM, pVCpu, &off);
1005}
1006
1007
1008/**
1009 * Set FF if we've passed the next virtual event.
1010 *
1011 * This function is called before FFs are checked in the inner execution EM loops.
1012 *
1013 * @returns The GIP timestamp of the next event.
1014 * 0 if the next event has already expired.
1015 * @param pVM The cross context VM structure.
1016 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1017 * @param pu64Delta Where to store the delta.
1018 * @thread The emulation thread.
1019 */
1020VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1021{
1022 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1023}
1024
1025#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1026
1027/**
1028 * Gets the host context ring-3 pointer of the timer.
1029 *
1030 * @returns HC R3 pointer.
1031 * @param pTimer Timer handle as returned by one of the create functions.
1032 */
1033VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1034{
1035 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1036}
1037
1038
1039/**
1040 * Gets the host context ring-0 pointer of the timer.
1041 *
1042 * @returns HC R0 pointer.
1043 * @param pTimer Timer handle as returned by one of the create functions.
1044 */
1045VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1046{
1047 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1048}
1049
1050
1051/**
1052 * Gets the RC pointer of the timer.
1053 *
1054 * @returns RC pointer.
1055 * @param pTimer Timer handle as returned by one of the create functions.
1056 */
1057VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1058{
1059 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1060}
1061
1062
1063/**
1064 * Locks the timer clock.
1065 *
1066 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1067 * if the clock does not have a lock.
1068 * @param pTimer The timer which clock lock we wish to take.
1069 * @param rcBusy What to return in ring-0 and raw-mode context
1070 * if the lock is busy. Pass VINF_SUCCESS to
1071 * acquired the critical section thru a ring-3
1072 call if necessary.
1073 *
1074 * @remarks Currently only supported on timers using the virtual sync clock.
1075 */
1076VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1077{
1078 AssertPtr(pTimer);
1079 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1080 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1081}
1082
1083
1084/**
1085 * Unlocks a timer clock locked by TMTimerLock.
1086 *
1087 * @param pTimer The timer which clock to unlock.
1088 */
1089VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1090{
1091 AssertPtr(pTimer);
1092 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1093 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1094}
1095
1096
1097/**
1098 * Checks if the current thread owns the timer clock lock.
1099 *
1100 * @returns @c true if its the owner, @c false if not.
1101 * @param pTimer The timer handle.
1102 */
1103VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1104{
1105 AssertPtr(pTimer);
1106 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1107 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1108}
1109
1110
1111/**
1112 * Optimized TMTimerSet code path for starting an inactive timer.
1113 *
1114 * @returns VBox status code.
1115 *
1116 * @param pVM The cross context VM structure.
1117 * @param pTimer The timer handle.
1118 * @param u64Expire The new expire time.
1119 */
1120static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1121{
1122 Assert(!pTimer->offPrev);
1123 Assert(!pTimer->offNext);
1124 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1125
1126 TMCLOCK const enmClock = pTimer->enmClock;
1127
1128 /*
1129 * Calculate and set the expiration time.
1130 */
1131 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1132 {
1133 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1134 AssertMsgStmt(u64Expire >= u64Last,
1135 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1136 u64Expire = u64Last);
1137 }
1138 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1139 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1140
1141 /*
1142 * Link the timer into the active list.
1143 */
1144 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1145
1146 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1147 TM_UNLOCK_TIMERS(pVM);
1148 return VINF_SUCCESS;
1149}
1150
1151
1152/**
1153 * TMTimerSet for the virtual sync timer queue.
1154 *
1155 * This employs a greatly simplified state machine by always acquiring the
1156 * queue lock and bypassing the scheduling list.
1157 *
1158 * @returns VBox status code
1159 * @param pVM The cross context VM structure.
1160 * @param pTimer The timer handle.
1161 * @param u64Expire The expiration time.
1162 */
1163static int tmTimerVirtualSyncSet(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1164{
1165 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1166 VM_ASSERT_EMT(pVM);
1167 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1168 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1169 AssertRCReturn(rc, rc);
1170
1171 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1172 TMTIMERSTATE enmState = pTimer->enmState;
1173 switch (enmState)
1174 {
1175 case TMTIMERSTATE_EXPIRED_DELIVER:
1176 case TMTIMERSTATE_STOPPED:
1177 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1178 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1179 else
1180 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1181
1182 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1183 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1184 pTimer->u64Expire = u64Expire;
1185 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1186 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1187 rc = VINF_SUCCESS;
1188 break;
1189
1190 case TMTIMERSTATE_ACTIVE:
1191 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1192 tmTimerQueueUnlinkActive(pQueue, pTimer);
1193 pTimer->u64Expire = u64Expire;
1194 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1195 rc = VINF_SUCCESS;
1196 break;
1197
1198 case TMTIMERSTATE_PENDING_RESCHEDULE:
1199 case TMTIMERSTATE_PENDING_STOP:
1200 case TMTIMERSTATE_PENDING_SCHEDULE:
1201 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1202 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1203 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1204 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1205 case TMTIMERSTATE_DESTROY:
1206 case TMTIMERSTATE_FREE:
1207 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1208 rc = VERR_TM_INVALID_STATE;
1209 break;
1210
1211 default:
1212 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1213 rc = VERR_TM_UNKNOWN_STATE;
1214 break;
1215 }
1216
1217 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1218 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1219 return rc;
1220}
1221
1222
1223/**
1224 * Arm a timer with a (new) expire time.
1225 *
1226 * @returns VBox status code.
1227 * @param pTimer Timer handle as returned by one of the create functions.
1228 * @param u64Expire New expire time.
1229 */
1230VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1231{
1232 PVM pVM = pTimer->CTX_SUFF(pVM);
1233
1234 /* Treat virtual sync timers specially. */
1235 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1236 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1237
1238 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1239 TMTIMER_ASSERT_CRITSECT(pTimer);
1240
1241 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1242
1243#ifdef VBOX_WITH_STATISTICS
1244 /*
1245 * Gather optimization info.
1246 */
1247 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1248 TMTIMERSTATE enmOrgState = pTimer->enmState;
1249 switch (enmOrgState)
1250 {
1251 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1252 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1253 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1254 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1255 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1256 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1257 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1258 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1259 }
1260#endif
1261
1262 /*
1263 * The most common case is setting the timer again during the callback.
1264 * The second most common case is starting a timer at some other time.
1265 */
1266#if 1
1267 TMTIMERSTATE enmState1 = pTimer->enmState;
1268 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1269 || ( enmState1 == TMTIMERSTATE_STOPPED
1270 && pTimer->pCritSect))
1271 {
1272 /* Try take the TM lock and check the state again. */
1273 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1274 {
1275 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1276 {
1277 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1278 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1279 return VINF_SUCCESS;
1280 }
1281 TM_UNLOCK_TIMERS(pVM);
1282 }
1283 }
1284#endif
1285
1286 /*
1287 * Unoptimized code path.
1288 */
1289 int cRetries = 1000;
1290 do
1291 {
1292 /*
1293 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1294 */
1295 TMTIMERSTATE enmState = pTimer->enmState;
1296 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1297 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1298 switch (enmState)
1299 {
1300 case TMTIMERSTATE_EXPIRED_DELIVER:
1301 case TMTIMERSTATE_STOPPED:
1302 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1303 {
1304 Assert(!pTimer->offPrev);
1305 Assert(!pTimer->offNext);
1306 pTimer->u64Expire = u64Expire;
1307 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1308 tmSchedule(pTimer);
1309 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1310 return VINF_SUCCESS;
1311 }
1312 break;
1313
1314 case TMTIMERSTATE_PENDING_SCHEDULE:
1315 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1316 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1317 {
1318 pTimer->u64Expire = u64Expire;
1319 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1320 tmSchedule(pTimer);
1321 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1322 return VINF_SUCCESS;
1323 }
1324 break;
1325
1326
1327 case TMTIMERSTATE_ACTIVE:
1328 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1329 {
1330 pTimer->u64Expire = u64Expire;
1331 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1332 tmSchedule(pTimer);
1333 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1334 return VINF_SUCCESS;
1335 }
1336 break;
1337
1338 case TMTIMERSTATE_PENDING_RESCHEDULE:
1339 case TMTIMERSTATE_PENDING_STOP:
1340 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1341 {
1342 pTimer->u64Expire = u64Expire;
1343 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1344 tmSchedule(pTimer);
1345 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1346 return VINF_SUCCESS;
1347 }
1348 break;
1349
1350
1351 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1352 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1353 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1354#ifdef IN_RING3
1355 if (!RTThreadYield())
1356 RTThreadSleep(1);
1357#else
1358/** @todo call host context and yield after a couple of iterations */
1359#endif
1360 break;
1361
1362 /*
1363 * Invalid states.
1364 */
1365 case TMTIMERSTATE_DESTROY:
1366 case TMTIMERSTATE_FREE:
1367 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1368 return VERR_TM_INVALID_STATE;
1369 default:
1370 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1371 return VERR_TM_UNKNOWN_STATE;
1372 }
1373 } while (cRetries-- > 0);
1374
1375 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1376 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1377 return VERR_TM_TIMER_UNSTABLE_STATE;
1378}
1379
1380
1381/**
1382 * Return the current time for the specified clock, setting pu64Now if not NULL.
1383 *
1384 * @returns Current time.
1385 * @param pVM The cross context VM structure.
1386 * @param enmClock The clock to query.
1387 * @param pu64Now Optional pointer where to store the return time
1388 */
1389DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1390{
1391 uint64_t u64Now;
1392 switch (enmClock)
1393 {
1394 case TMCLOCK_VIRTUAL_SYNC:
1395 u64Now = TMVirtualSyncGet(pVM);
1396 break;
1397 case TMCLOCK_VIRTUAL:
1398 u64Now = TMVirtualGet(pVM);
1399 break;
1400 case TMCLOCK_REAL:
1401 u64Now = TMRealGet(pVM);
1402 break;
1403 default:
1404 AssertFatalMsgFailed(("%d\n", enmClock));
1405 }
1406
1407 if (pu64Now)
1408 *pu64Now = u64Now;
1409 return u64Now;
1410}
1411
1412
1413/**
1414 * Optimized TMTimerSetRelative code path.
1415 *
1416 * @returns VBox status code.
1417 *
1418 * @param pVM The cross context VM structure.
1419 * @param pTimer The timer handle.
1420 * @param cTicksToNext Clock ticks until the next time expiration.
1421 * @param pu64Now Where to return the current time stamp used.
1422 * Optional.
1423 */
1424static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1425{
1426 Assert(!pTimer->offPrev);
1427 Assert(!pTimer->offNext);
1428 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1429
1430 /*
1431 * Calculate and set the expiration time.
1432 */
1433 TMCLOCK const enmClock = pTimer->enmClock;
1434 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1435 pTimer->u64Expire = u64Expire;
1436 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1437
1438 /*
1439 * Link the timer into the active list.
1440 */
1441 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1442 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1443
1444 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1445 TM_UNLOCK_TIMERS(pVM);
1446 return VINF_SUCCESS;
1447}
1448
1449
1450/**
1451 * TMTimerSetRelative for the virtual sync timer queue.
1452 *
1453 * This employs a greatly simplified state machine by always acquiring the
1454 * queue lock and bypassing the scheduling list.
1455 *
1456 * @returns VBox status code
1457 * @param pVM The cross context VM structure.
1458 * @param pTimer The timer to (re-)arm.
1459 * @param cTicksToNext Clock ticks until the next time expiration.
1460 * @param pu64Now Where to return the current time stamp used.
1461 * Optional.
1462 */
1463static int tmTimerVirtualSyncSetRelative(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1464{
1465 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1466 VM_ASSERT_EMT(pVM);
1467 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1468 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1469 AssertRCReturn(rc, rc);
1470
1471 /* Calculate the expiration tick. */
1472 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1473 if (pu64Now)
1474 *pu64Now = u64Expire;
1475 u64Expire += cTicksToNext;
1476
1477 /* Update the timer. */
1478 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1479 TMTIMERSTATE enmState = pTimer->enmState;
1480 switch (enmState)
1481 {
1482 case TMTIMERSTATE_EXPIRED_DELIVER:
1483 case TMTIMERSTATE_STOPPED:
1484 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1485 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1486 else
1487 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1488 pTimer->u64Expire = u64Expire;
1489 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1490 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1491 rc = VINF_SUCCESS;
1492 break;
1493
1494 case TMTIMERSTATE_ACTIVE:
1495 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1496 tmTimerQueueUnlinkActive(pQueue, pTimer);
1497 pTimer->u64Expire = u64Expire;
1498 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1499 rc = VINF_SUCCESS;
1500 break;
1501
1502 case TMTIMERSTATE_PENDING_RESCHEDULE:
1503 case TMTIMERSTATE_PENDING_STOP:
1504 case TMTIMERSTATE_PENDING_SCHEDULE:
1505 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1506 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1507 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1508 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1509 case TMTIMERSTATE_DESTROY:
1510 case TMTIMERSTATE_FREE:
1511 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1512 rc = VERR_TM_INVALID_STATE;
1513 break;
1514
1515 default:
1516 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1517 rc = VERR_TM_UNKNOWN_STATE;
1518 break;
1519 }
1520
1521 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1522 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1523 return rc;
1524}
1525
1526
1527/**
1528 * Arm a timer with a expire time relative to the current time.
1529 *
1530 * @returns VBox status code.
1531 * @param pTimer Timer handle as returned by one of the create functions.
1532 * @param cTicksToNext Clock ticks until the next time expiration.
1533 * @param pu64Now Where to return the current time stamp used.
1534 * Optional.
1535 */
1536VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1537{
1538 PVM pVM = pTimer->CTX_SUFF(pVM);
1539
1540 /* Treat virtual sync timers specially. */
1541 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1542 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1543
1544 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1545 TMTIMER_ASSERT_CRITSECT(pTimer);
1546
1547 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1548
1549#ifdef VBOX_WITH_STATISTICS
1550 /*
1551 * Gather optimization info.
1552 */
1553 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1554 TMTIMERSTATE enmOrgState = pTimer->enmState;
1555 switch (enmOrgState)
1556 {
1557 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1558 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1559 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1560 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1561 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1562 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1563 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1564 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1565 }
1566#endif
1567
1568 /*
1569 * Try to take the TM lock and optimize the common cases.
1570 *
1571 * With the TM lock we can safely make optimizations like immediate
1572 * scheduling and we can also be 100% sure that we're not racing the
1573 * running of the timer queues. As an additional restraint we require the
1574 * timer to have a critical section associated with to be 100% there aren't
1575 * concurrent operations on the timer. (This latter isn't necessary any
1576 * longer as this isn't supported for any timers, critsect or not.)
1577 *
1578 * Note! Lock ordering doesn't apply when we only tries to
1579 * get the innermost locks.
1580 */
1581 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1582#if 1
1583 if ( fOwnTMLock
1584 && pTimer->pCritSect)
1585 {
1586 TMTIMERSTATE enmState = pTimer->enmState;
1587 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1588 || enmState == TMTIMERSTATE_STOPPED)
1589 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1590 {
1591 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1592 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1593 return VINF_SUCCESS;
1594 }
1595
1596 /* Optimize other states when it becomes necessary. */
1597 }
1598#endif
1599
1600 /*
1601 * Unoptimized path.
1602 */
1603 int rc;
1604 TMCLOCK const enmClock = pTimer->enmClock;
1605 for (int cRetries = 1000; ; cRetries--)
1606 {
1607 /*
1608 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1609 */
1610 TMTIMERSTATE enmState = pTimer->enmState;
1611 switch (enmState)
1612 {
1613 case TMTIMERSTATE_STOPPED:
1614 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1615 {
1616 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1617 * Figure a safe way of activating this timer while the queue is
1618 * being run.
1619 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1620 * re-starting the timer in response to a initial_count write.) */
1621 }
1622 RT_FALL_THRU();
1623 case TMTIMERSTATE_EXPIRED_DELIVER:
1624 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1625 {
1626 Assert(!pTimer->offPrev);
1627 Assert(!pTimer->offNext);
1628 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1629 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1630 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1631 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1632 tmSchedule(pTimer);
1633 rc = VINF_SUCCESS;
1634 break;
1635 }
1636 rc = VERR_TRY_AGAIN;
1637 break;
1638
1639 case TMTIMERSTATE_PENDING_SCHEDULE:
1640 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1641 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1642 {
1643 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1644 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1645 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1646 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1647 tmSchedule(pTimer);
1648 rc = VINF_SUCCESS;
1649 break;
1650 }
1651 rc = VERR_TRY_AGAIN;
1652 break;
1653
1654
1655 case TMTIMERSTATE_ACTIVE:
1656 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1657 {
1658 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1659 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1660 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1661 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1662 tmSchedule(pTimer);
1663 rc = VINF_SUCCESS;
1664 break;
1665 }
1666 rc = VERR_TRY_AGAIN;
1667 break;
1668
1669 case TMTIMERSTATE_PENDING_RESCHEDULE:
1670 case TMTIMERSTATE_PENDING_STOP:
1671 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1672 {
1673 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1674 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1675 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1676 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1677 tmSchedule(pTimer);
1678 rc = VINF_SUCCESS;
1679 break;
1680 }
1681 rc = VERR_TRY_AGAIN;
1682 break;
1683
1684
1685 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1686 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1687 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1688#ifdef IN_RING3
1689 if (!RTThreadYield())
1690 RTThreadSleep(1);
1691#else
1692/** @todo call host context and yield after a couple of iterations */
1693#endif
1694 rc = VERR_TRY_AGAIN;
1695 break;
1696
1697 /*
1698 * Invalid states.
1699 */
1700 case TMTIMERSTATE_DESTROY:
1701 case TMTIMERSTATE_FREE:
1702 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1703 rc = VERR_TM_INVALID_STATE;
1704 break;
1705
1706 default:
1707 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1708 rc = VERR_TM_UNKNOWN_STATE;
1709 break;
1710 }
1711
1712 /* switch + loop is tedious to break out of. */
1713 if (rc == VINF_SUCCESS)
1714 break;
1715
1716 if (rc != VERR_TRY_AGAIN)
1717 {
1718 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1719 break;
1720 }
1721 if (cRetries <= 0)
1722 {
1723 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1724 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1725 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1726 break;
1727 }
1728
1729 /*
1730 * Retry to gain locks.
1731 */
1732 if (!fOwnTMLock)
1733 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1734
1735 } /* for (;;) */
1736
1737 /*
1738 * Clean up and return.
1739 */
1740 if (fOwnTMLock)
1741 TM_UNLOCK_TIMERS(pVM);
1742
1743 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1744 return rc;
1745}
1746
1747
1748/**
1749 * Drops a hint about the frequency of the timer.
1750 *
1751 * This is used by TM and the VMM to calculate how often guest execution needs
1752 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1753 *
1754 * @returns VBox status code.
1755 * @param pTimer Timer handle as returned by one of the create
1756 * functions.
1757 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1758 *
1759 * @remarks We're using an integer hertz value here since anything above 1 HZ
1760 * is not going to be any trouble satisfying scheduling wise. The
1761 * range where it makes sense is >= 100 HZ.
1762 */
1763VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1764{
1765 TMTIMER_ASSERT_CRITSECT(pTimer);
1766
1767 uint32_t const uHzOldHint = pTimer->uHzHint;
1768 pTimer->uHzHint = uHzHint;
1769
1770 PVM pVM = pTimer->CTX_SUFF(pVM);
1771 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1772 if ( uHzHint > uMaxHzHint
1773 || uHzOldHint >= uMaxHzHint)
1774 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1775
1776 return VINF_SUCCESS;
1777}
1778
1779
1780/**
1781 * TMTimerStop for the virtual sync timer queue.
1782 *
1783 * This employs a greatly simplified state machine by always acquiring the
1784 * queue lock and bypassing the scheduling list.
1785 *
1786 * @returns VBox status code
1787 * @param pVM The cross context VM structure.
1788 * @param pTimer The timer handle.
1789 */
1790static int tmTimerVirtualSyncStop(PVM pVM, PTMTIMER pTimer)
1791{
1792 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1793 VM_ASSERT_EMT(pVM);
1794 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1795 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1796 AssertRCReturn(rc, rc);
1797
1798 /* Reset the HZ hint. */
1799 if (pTimer->uHzHint)
1800 {
1801 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1802 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1803 pTimer->uHzHint = 0;
1804 }
1805
1806 /* Update the timer state. */
1807 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1808 TMTIMERSTATE enmState = pTimer->enmState;
1809 switch (enmState)
1810 {
1811 case TMTIMERSTATE_ACTIVE:
1812 tmTimerQueueUnlinkActive(pQueue, pTimer);
1813 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1814 rc = VINF_SUCCESS;
1815 break;
1816
1817 case TMTIMERSTATE_EXPIRED_DELIVER:
1818 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1819 rc = VINF_SUCCESS;
1820 break;
1821
1822 case TMTIMERSTATE_STOPPED:
1823 rc = VINF_SUCCESS;
1824 break;
1825
1826 case TMTIMERSTATE_PENDING_RESCHEDULE:
1827 case TMTIMERSTATE_PENDING_STOP:
1828 case TMTIMERSTATE_PENDING_SCHEDULE:
1829 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1830 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1831 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1832 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1833 case TMTIMERSTATE_DESTROY:
1834 case TMTIMERSTATE_FREE:
1835 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1836 rc = VERR_TM_INVALID_STATE;
1837 break;
1838
1839 default:
1840 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1841 rc = VERR_TM_UNKNOWN_STATE;
1842 break;
1843 }
1844
1845 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1846 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1847 return rc;
1848}
1849
1850
1851/**
1852 * Stop the timer.
1853 * Use TMR3TimerArm() to "un-stop" the timer.
1854 *
1855 * @returns VBox status code.
1856 * @param pTimer Timer handle as returned by one of the create functions.
1857 */
1858VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1859{
1860 PVM pVM = pTimer->CTX_SUFF(pVM);
1861
1862 /* Treat virtual sync timers specially. */
1863 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1864 return tmTimerVirtualSyncStop(pVM, pTimer);
1865
1866 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1867 TMTIMER_ASSERT_CRITSECT(pTimer);
1868
1869 /*
1870 * Reset the HZ hint.
1871 */
1872 if (pTimer->uHzHint)
1873 {
1874 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1875 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1876 pTimer->uHzHint = 0;
1877 }
1878
1879 /** @todo see if this function needs optimizing. */
1880 int cRetries = 1000;
1881 do
1882 {
1883 /*
1884 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1885 */
1886 TMTIMERSTATE enmState = pTimer->enmState;
1887 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1888 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1889 switch (enmState)
1890 {
1891 case TMTIMERSTATE_EXPIRED_DELIVER:
1892 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1893 return VERR_INVALID_PARAMETER;
1894
1895 case TMTIMERSTATE_STOPPED:
1896 case TMTIMERSTATE_PENDING_STOP:
1897 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1898 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1899 return VINF_SUCCESS;
1900
1901 case TMTIMERSTATE_PENDING_SCHEDULE:
1902 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1903 {
1904 tmSchedule(pTimer);
1905 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1906 return VINF_SUCCESS;
1907 }
1908 break;
1909
1910 case TMTIMERSTATE_PENDING_RESCHEDULE:
1911 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1912 {
1913 tmSchedule(pTimer);
1914 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1915 return VINF_SUCCESS;
1916 }
1917 break;
1918
1919 case TMTIMERSTATE_ACTIVE:
1920 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1921 {
1922 tmSchedule(pTimer);
1923 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1924 return VINF_SUCCESS;
1925 }
1926 break;
1927
1928 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1929 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1930 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1931#ifdef IN_RING3
1932 if (!RTThreadYield())
1933 RTThreadSleep(1);
1934#else
1935/** @todo call host and yield cpu after a while. */
1936#endif
1937 break;
1938
1939 /*
1940 * Invalid states.
1941 */
1942 case TMTIMERSTATE_DESTROY:
1943 case TMTIMERSTATE_FREE:
1944 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1945 return VERR_TM_INVALID_STATE;
1946 default:
1947 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1948 return VERR_TM_UNKNOWN_STATE;
1949 }
1950 } while (cRetries-- > 0);
1951
1952 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1953 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1954 return VERR_TM_TIMER_UNSTABLE_STATE;
1955}
1956
1957
1958/**
1959 * Get the current clock time.
1960 * Handy for calculating the new expire time.
1961 *
1962 * @returns Current clock time.
1963 * @param pTimer Timer handle as returned by one of the create functions.
1964 */
1965VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1966{
1967 PVM pVM = pTimer->CTX_SUFF(pVM);
1968
1969 uint64_t u64;
1970 switch (pTimer->enmClock)
1971 {
1972 case TMCLOCK_VIRTUAL:
1973 u64 = TMVirtualGet(pVM);
1974 break;
1975 case TMCLOCK_VIRTUAL_SYNC:
1976 u64 = TMVirtualSyncGet(pVM);
1977 break;
1978 case TMCLOCK_REAL:
1979 u64 = TMRealGet(pVM);
1980 break;
1981 default:
1982 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1983 return UINT64_MAX;
1984 }
1985 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1986 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1987 return u64;
1988}
1989
1990
1991/**
1992 * Get the frequency of the timer clock.
1993 *
1994 * @returns Clock frequency (as Hz of course).
1995 * @param pTimer Timer handle as returned by one of the create functions.
1996 */
1997VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1998{
1999 switch (pTimer->enmClock)
2000 {
2001 case TMCLOCK_VIRTUAL:
2002 case TMCLOCK_VIRTUAL_SYNC:
2003 return TMCLOCK_FREQ_VIRTUAL;
2004
2005 case TMCLOCK_REAL:
2006 return TMCLOCK_FREQ_REAL;
2007
2008 default:
2009 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2010 return 0;
2011 }
2012}
2013
2014
2015/**
2016 * Get the expire time of the timer.
2017 * Only valid for active timers.
2018 *
2019 * @returns Expire time of the timer.
2020 * @param pTimer Timer handle as returned by one of the create functions.
2021 */
2022VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2023{
2024 TMTIMER_ASSERT_CRITSECT(pTimer);
2025 int cRetries = 1000;
2026 do
2027 {
2028 TMTIMERSTATE enmState = pTimer->enmState;
2029 switch (enmState)
2030 {
2031 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2032 case TMTIMERSTATE_EXPIRED_DELIVER:
2033 case TMTIMERSTATE_STOPPED:
2034 case TMTIMERSTATE_PENDING_STOP:
2035 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2036 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2037 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2038 return ~(uint64_t)0;
2039
2040 case TMTIMERSTATE_ACTIVE:
2041 case TMTIMERSTATE_PENDING_RESCHEDULE:
2042 case TMTIMERSTATE_PENDING_SCHEDULE:
2043 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2044 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2045 return pTimer->u64Expire;
2046
2047 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2048 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2049#ifdef IN_RING3
2050 if (!RTThreadYield())
2051 RTThreadSleep(1);
2052#endif
2053 break;
2054
2055 /*
2056 * Invalid states.
2057 */
2058 case TMTIMERSTATE_DESTROY:
2059 case TMTIMERSTATE_FREE:
2060 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2061 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2062 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2063 return ~(uint64_t)0;
2064 default:
2065 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2066 return ~(uint64_t)0;
2067 }
2068 } while (cRetries-- > 0);
2069
2070 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2071 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2072 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2073 return ~(uint64_t)0;
2074}
2075
2076
2077/**
2078 * Checks if a timer is active or not.
2079 *
2080 * @returns True if active.
2081 * @returns False if not active.
2082 * @param pTimer Timer handle as returned by one of the create functions.
2083 */
2084VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2085{
2086 TMTIMERSTATE enmState = pTimer->enmState;
2087 switch (enmState)
2088 {
2089 case TMTIMERSTATE_STOPPED:
2090 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2091 case TMTIMERSTATE_EXPIRED_DELIVER:
2092 case TMTIMERSTATE_PENDING_STOP:
2093 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2094 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2095 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2096 return false;
2097
2098 case TMTIMERSTATE_ACTIVE:
2099 case TMTIMERSTATE_PENDING_RESCHEDULE:
2100 case TMTIMERSTATE_PENDING_SCHEDULE:
2101 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2102 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2103 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2104 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2105 return true;
2106
2107 /*
2108 * Invalid states.
2109 */
2110 case TMTIMERSTATE_DESTROY:
2111 case TMTIMERSTATE_FREE:
2112 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2113 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2114 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2115 return false;
2116 default:
2117 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2118 return false;
2119 }
2120}
2121
2122
2123/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2124
2125
2126/**
2127 * Arm a timer with a (new) expire time relative to current time.
2128 *
2129 * @returns VBox status code.
2130 * @param pTimer Timer handle as returned by one of the create functions.
2131 * @param cMilliesToNext Number of milliseconds to the next tick.
2132 */
2133VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2134{
2135 switch (pTimer->enmClock)
2136 {
2137 case TMCLOCK_VIRTUAL:
2138 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2139 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2140
2141 case TMCLOCK_VIRTUAL_SYNC:
2142 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2143 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2144
2145 case TMCLOCK_REAL:
2146 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2147 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2148
2149 default:
2150 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2151 return VERR_TM_TIMER_BAD_CLOCK;
2152 }
2153}
2154
2155
2156/**
2157 * Arm a timer with a (new) expire time relative to current time.
2158 *
2159 * @returns VBox status code.
2160 * @param pTimer Timer handle as returned by one of the create functions.
2161 * @param cMicrosToNext Number of microseconds to the next tick.
2162 */
2163VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2164{
2165 switch (pTimer->enmClock)
2166 {
2167 case TMCLOCK_VIRTUAL:
2168 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2169 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2170
2171 case TMCLOCK_VIRTUAL_SYNC:
2172 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2173 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2174
2175 case TMCLOCK_REAL:
2176 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2177 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2178
2179 default:
2180 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2181 return VERR_TM_TIMER_BAD_CLOCK;
2182 }
2183}
2184
2185
2186/**
2187 * Arm a timer with a (new) expire time relative to current time.
2188 *
2189 * @returns VBox status code.
2190 * @param pTimer Timer handle as returned by one of the create functions.
2191 * @param cNanosToNext Number of nanoseconds to the next tick.
2192 */
2193VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2194{
2195 switch (pTimer->enmClock)
2196 {
2197 case TMCLOCK_VIRTUAL:
2198 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2199 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2200
2201 case TMCLOCK_VIRTUAL_SYNC:
2202 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2203 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2204
2205 case TMCLOCK_REAL:
2206 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2207 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2208
2209 default:
2210 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2211 return VERR_TM_TIMER_BAD_CLOCK;
2212 }
2213}
2214
2215
2216/**
2217 * Get the current clock time as nanoseconds.
2218 *
2219 * @returns The timer clock as nanoseconds.
2220 * @param pTimer Timer handle as returned by one of the create functions.
2221 */
2222VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2223{
2224 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2225}
2226
2227
2228/**
2229 * Get the current clock time as microseconds.
2230 *
2231 * @returns The timer clock as microseconds.
2232 * @param pTimer Timer handle as returned by one of the create functions.
2233 */
2234VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2235{
2236 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2237}
2238
2239
2240/**
2241 * Get the current clock time as milliseconds.
2242 *
2243 * @returns The timer clock as milliseconds.
2244 * @param pTimer Timer handle as returned by one of the create functions.
2245 */
2246VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2247{
2248 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2249}
2250
2251
2252/**
2253 * Converts the specified timer clock time to nanoseconds.
2254 *
2255 * @returns nanoseconds.
2256 * @param pTimer Timer handle as returned by one of the create functions.
2257 * @param u64Ticks The clock ticks.
2258 * @remark There could be rounding errors here. We just do a simple integer divide
2259 * without any adjustments.
2260 */
2261VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2262{
2263 switch (pTimer->enmClock)
2264 {
2265 case TMCLOCK_VIRTUAL:
2266 case TMCLOCK_VIRTUAL_SYNC:
2267 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2268 return u64Ticks;
2269
2270 case TMCLOCK_REAL:
2271 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2272 return u64Ticks * 1000000;
2273
2274 default:
2275 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2276 return 0;
2277 }
2278}
2279
2280
2281/**
2282 * Converts the specified timer clock time to microseconds.
2283 *
2284 * @returns microseconds.
2285 * @param pTimer Timer handle as returned by one of the create functions.
2286 * @param u64Ticks The clock ticks.
2287 * @remark There could be rounding errors here. We just do a simple integer divide
2288 * without any adjustments.
2289 */
2290VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2291{
2292 switch (pTimer->enmClock)
2293 {
2294 case TMCLOCK_VIRTUAL:
2295 case TMCLOCK_VIRTUAL_SYNC:
2296 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2297 return u64Ticks / 1000;
2298
2299 case TMCLOCK_REAL:
2300 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2301 return u64Ticks * 1000;
2302
2303 default:
2304 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2305 return 0;
2306 }
2307}
2308
2309
2310/**
2311 * Converts the specified timer clock time to milliseconds.
2312 *
2313 * @returns milliseconds.
2314 * @param pTimer Timer handle as returned by one of the create functions.
2315 * @param u64Ticks The clock ticks.
2316 * @remark There could be rounding errors here. We just do a simple integer divide
2317 * without any adjustments.
2318 */
2319VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2320{
2321 switch (pTimer->enmClock)
2322 {
2323 case TMCLOCK_VIRTUAL:
2324 case TMCLOCK_VIRTUAL_SYNC:
2325 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2326 return u64Ticks / 1000000;
2327
2328 case TMCLOCK_REAL:
2329 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2330 return u64Ticks;
2331
2332 default:
2333 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2334 return 0;
2335 }
2336}
2337
2338
2339/**
2340 * Converts the specified nanosecond timestamp to timer clock ticks.
2341 *
2342 * @returns timer clock ticks.
2343 * @param pTimer Timer handle as returned by one of the create functions.
2344 * @param cNanoSecs The nanosecond value ticks to convert.
2345 * @remark There could be rounding and overflow errors here.
2346 */
2347VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2348{
2349 switch (pTimer->enmClock)
2350 {
2351 case TMCLOCK_VIRTUAL:
2352 case TMCLOCK_VIRTUAL_SYNC:
2353 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2354 return cNanoSecs;
2355
2356 case TMCLOCK_REAL:
2357 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2358 return cNanoSecs / 1000000;
2359
2360 default:
2361 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2362 return 0;
2363 }
2364}
2365
2366
2367/**
2368 * Converts the specified microsecond timestamp to timer clock ticks.
2369 *
2370 * @returns timer clock ticks.
2371 * @param pTimer Timer handle as returned by one of the create functions.
2372 * @param cMicroSecs The microsecond value ticks to convert.
2373 * @remark There could be rounding and overflow errors here.
2374 */
2375VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2376{
2377 switch (pTimer->enmClock)
2378 {
2379 case TMCLOCK_VIRTUAL:
2380 case TMCLOCK_VIRTUAL_SYNC:
2381 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2382 return cMicroSecs * 1000;
2383
2384 case TMCLOCK_REAL:
2385 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2386 return cMicroSecs / 1000;
2387
2388 default:
2389 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2390 return 0;
2391 }
2392}
2393
2394
2395/**
2396 * Converts the specified millisecond timestamp to timer clock ticks.
2397 *
2398 * @returns timer clock ticks.
2399 * @param pTimer Timer handle as returned by one of the create functions.
2400 * @param cMilliSecs The millisecond value ticks to convert.
2401 * @remark There could be rounding and overflow errors here.
2402 */
2403VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2404{
2405 switch (pTimer->enmClock)
2406 {
2407 case TMCLOCK_VIRTUAL:
2408 case TMCLOCK_VIRTUAL_SYNC:
2409 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2410 return cMilliSecs * 1000000;
2411
2412 case TMCLOCK_REAL:
2413 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2414 return cMilliSecs;
2415
2416 default:
2417 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2418 return 0;
2419 }
2420}
2421
2422
2423/**
2424 * Convert state to string.
2425 *
2426 * @returns Readonly status name.
2427 * @param enmState State.
2428 */
2429const char *tmTimerState(TMTIMERSTATE enmState)
2430{
2431 switch (enmState)
2432 {
2433#define CASE(num, state) \
2434 case TMTIMERSTATE_##state: \
2435 AssertCompile(TMTIMERSTATE_##state == (num)); \
2436 return #num "-" #state
2437 CASE( 1,STOPPED);
2438 CASE( 2,ACTIVE);
2439 CASE( 3,EXPIRED_GET_UNLINK);
2440 CASE( 4,EXPIRED_DELIVER);
2441 CASE( 5,PENDING_STOP);
2442 CASE( 6,PENDING_STOP_SCHEDULE);
2443 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2444 CASE( 8,PENDING_SCHEDULE);
2445 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2446 CASE(10,PENDING_RESCHEDULE);
2447 CASE(11,DESTROY);
2448 CASE(12,FREE);
2449 default:
2450 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2451 return "Invalid state!";
2452#undef CASE
2453 }
2454}
2455
2456
2457/**
2458 * Gets the highest frequency hint for all the important timers.
2459 *
2460 * @returns The highest frequency. 0 if no timers care.
2461 * @param pVM The cross context VM structure.
2462 */
2463static uint32_t tmGetFrequencyHint(PVM pVM)
2464{
2465 /*
2466 * Query the value, recalculate it if necessary.
2467 *
2468 * The "right" highest frequency value isn't so important that we'll block
2469 * waiting on the timer semaphore.
2470 */
2471 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2472 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2473 {
2474 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2475 {
2476 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2477
2478 /*
2479 * Loop over the timers associated with each clock.
2480 */
2481 uMaxHzHint = 0;
2482 for (int i = 0; i < TMCLOCK_MAX; i++)
2483 {
2484 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2485 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2486 {
2487 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2488 if (uHzHint > uMaxHzHint)
2489 {
2490 switch (pCur->enmState)
2491 {
2492 case TMTIMERSTATE_ACTIVE:
2493 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2494 case TMTIMERSTATE_EXPIRED_DELIVER:
2495 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2496 case TMTIMERSTATE_PENDING_SCHEDULE:
2497 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2498 case TMTIMERSTATE_PENDING_RESCHEDULE:
2499 uMaxHzHint = uHzHint;
2500 break;
2501
2502 case TMTIMERSTATE_STOPPED:
2503 case TMTIMERSTATE_PENDING_STOP:
2504 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2505 case TMTIMERSTATE_DESTROY:
2506 case TMTIMERSTATE_FREE:
2507 break;
2508 /* no default, want gcc warnings when adding more states. */
2509 }
2510 }
2511 }
2512 }
2513 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2514 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2515 TM_UNLOCK_TIMERS(pVM);
2516 }
2517 }
2518 return uMaxHzHint;
2519}
2520
2521
2522/**
2523 * Calculates a host timer frequency that would be suitable for the current
2524 * timer load.
2525 *
2526 * This will take the highest timer frequency, adjust for catch-up and warp
2527 * driver, and finally add a little fudge factor. The caller (VMM) will use
2528 * the result to adjust the per-cpu preemption timer.
2529 *
2530 * @returns The highest frequency. 0 if no important timers around.
2531 * @param pVM The cross context VM structure.
2532 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2533 */
2534VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2535{
2536 uint32_t uHz = tmGetFrequencyHint(pVM);
2537
2538 /* Catch up, we have to be more aggressive than the % indicates at the
2539 beginning of the effort. */
2540 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2541 {
2542 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2543 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2544 {
2545 if (u32Pct <= 100)
2546 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2547 else if (u32Pct <= 200)
2548 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2549 else if (u32Pct <= 400)
2550 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2551 uHz *= u32Pct + 100;
2552 uHz /= 100;
2553 }
2554 }
2555
2556 /* Warp drive. */
2557 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2558 {
2559 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2560 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2561 {
2562 uHz *= u32Pct;
2563 uHz /= 100;
2564 }
2565 }
2566
2567 /* Fudge factor. */
2568 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2569 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2570 else
2571 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2572 uHz /= 100;
2573
2574 /* Make sure it isn't too high. */
2575 if (uHz > pVM->tm.s.cHostHzMax)
2576 uHz = pVM->tm.s.cHostHzMax;
2577
2578 return uHz;
2579}
2580
2581
2582/**
2583 * Whether the guest virtual clock is ticking.
2584 *
2585 * @returns true if ticking, false otherwise.
2586 * @param pVM The cross context VM structure.
2587 */
2588VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2589{
2590 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2591}
2592
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette