VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 82333

Last change on this file since 82333 was 82333, checked in by vboxsync, 5 years ago

TM: Added timer statistics. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 93.1 KB
Line 
1/* $Id: TMAll.cpp 82333 2019-12-03 01:03:18Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(pTimer) ((pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(pTimer) tmRZTimerGetCritSect(pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(pTimer) \
70 do { \
71 if ((pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
81 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
133 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
134 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
135 return pDevInsR0->pCritSectRoR0;
136 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
137 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
138 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
139 }
140 return (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), pTimer->pCritSect);
141}
142#endif /* VBOX_STRICT && IN_RING0 */
143
144
145/**
146 * Notification that execution is about to start.
147 *
148 * This call must always be paired with a TMNotifyEndOfExecution call.
149 *
150 * The function may, depending on the configuration, resume the TSC and future
151 * clocks that only ticks when we're executing guest code.
152 *
153 * @param pVM The cross context VM structure.
154 * @param pVCpu The cross context virtual CPU structure.
155 */
156VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
157{
158#ifndef VBOX_WITHOUT_NS_ACCOUNTING
159 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
160#endif
161 if (pVM->tm.s.fTSCTiedToExecution)
162 tmCpuTickResume(pVM, pVCpu);
163}
164
165
166/**
167 * Notification that execution has ended.
168 *
169 * This call must always be paired with a TMNotifyStartOfExecution call.
170 *
171 * The function may, depending on the configuration, suspend the TSC and future
172 * clocks that only ticks when we're executing guest code.
173 *
174 * @param pVM The cross context VM structure.
175 * @param pVCpu The cross context virtual CPU structure.
176 */
177VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
178{
179 if (pVM->tm.s.fTSCTiedToExecution)
180 tmCpuTickPause(pVCpu);
181
182#ifndef VBOX_WITHOUT_NS_ACCOUNTING
183 uint64_t const u64NsTs = RTTimeNanoTS();
184 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
185 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
186 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
187 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
188
189# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
190 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
191 if (cNsExecutingDelta < 5000)
192 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
193 else if (cNsExecutingDelta < 50000)
194 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
195 else
196 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
197 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
198 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
199 if (cNsOtherNewDelta > 0)
200 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
201# endif
202
203 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
204 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
205 pVCpu->tm.s.cNsTotal = cNsTotalNew;
206 pVCpu->tm.s.cNsOther = cNsOtherNew;
207 pVCpu->tm.s.cPeriodsExecuting++;
208 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
209#endif
210}
211
212
213/**
214 * Notification that the cpu is entering the halt state
215 *
216 * This call must always be paired with a TMNotifyEndOfExecution call.
217 *
218 * The function may, depending on the configuration, resume the TSC and future
219 * clocks that only ticks when we're halted.
220 *
221 * @param pVCpu The cross context virtual CPU structure.
222 */
223VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
224{
225 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
226
227#ifndef VBOX_WITHOUT_NS_ACCOUNTING
228 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
229#endif
230
231 if ( pVM->tm.s.fTSCTiedToExecution
232 && !pVM->tm.s.fTSCNotTiedToHalt)
233 tmCpuTickResume(pVM, pVCpu);
234}
235
236
237/**
238 * Notification that the cpu is leaving the halt state
239 *
240 * This call must always be paired with a TMNotifyStartOfHalt call.
241 *
242 * The function may, depending on the configuration, suspend the TSC and future
243 * clocks that only ticks when we're halted.
244 *
245 * @param pVCpu The cross context virtual CPU structure.
246 */
247VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
248{
249 PVM pVM = pVCpu->CTX_SUFF(pVM);
250
251 if ( pVM->tm.s.fTSCTiedToExecution
252 && !pVM->tm.s.fTSCNotTiedToHalt)
253 tmCpuTickPause(pVCpu);
254
255#ifndef VBOX_WITHOUT_NS_ACCOUNTING
256 uint64_t const u64NsTs = RTTimeNanoTS();
257 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
258 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
259 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
260 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
261
262# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
263 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
264 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
265 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
266 if (cNsOtherNewDelta > 0)
267 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
268# endif
269
270 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
271 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
272 pVCpu->tm.s.cNsTotal = cNsTotalNew;
273 pVCpu->tm.s.cNsOther = cNsOtherNew;
274 pVCpu->tm.s.cPeriodsHalted++;
275 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
276#endif
277}
278
279
280/**
281 * Raise the timer force action flag and notify the dedicated timer EMT.
282 *
283 * @param pVM The cross context VM structure.
284 */
285DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
286{
287 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
288 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
289 {
290 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
291 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
292#ifdef IN_RING3
293 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
294#endif
295 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
296 }
297}
298
299
300/**
301 * Schedule the queue which was changed.
302 */
303DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
304{
305 PVMCC pVM = pTimer->CTX_SUFF(pVM);
306 if ( VM_IS_EMT(pVM)
307 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
308 {
309 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
310 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
311 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
312#ifdef VBOX_STRICT
313 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
314#endif
315 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
316 TM_UNLOCK_TIMERS(pVM);
317 }
318 else
319 {
320 TMTIMERSTATE enmState = pTimer->enmState;
321 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
322 tmScheduleNotify(pVM);
323 }
324}
325
326
327/**
328 * Try change the state to enmStateNew from enmStateOld
329 * and link the timer into the scheduling queue.
330 *
331 * @returns Success indicator.
332 * @param pTimer Timer in question.
333 * @param enmStateNew The new timer state.
334 * @param enmStateOld The old timer state.
335 */
336DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
337{
338 /*
339 * Attempt state change.
340 */
341 bool fRc;
342 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
343 return fRc;
344}
345
346
347/**
348 * Links the timer onto the scheduling queue.
349 *
350 * @param pQueue The timer queue the timer belongs to.
351 * @param pTimer The timer.
352 *
353 * @todo FIXME: Look into potential race with the thread running the queues
354 * and stuff.
355 */
356DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
357{
358 Assert(!pTimer->offScheduleNext);
359 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
360 int32_t offHead;
361 do
362 {
363 offHead = pQueue->offSchedule;
364 if (offHead)
365 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
366 else
367 pTimer->offScheduleNext = 0;
368 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
369}
370
371
372/**
373 * Try change the state to enmStateNew from enmStateOld
374 * and link the timer into the scheduling queue.
375 *
376 * @returns Success indicator.
377 * @param pTimer Timer in question.
378 * @param enmStateNew The new timer state.
379 * @param enmStateOld The old timer state.
380 */
381DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
382{
383 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
384 {
385 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
386 return true;
387 }
388 return false;
389}
390
391
392/**
393 * Links a timer into the active list of a timer queue.
394 *
395 * @param pQueue The queue.
396 * @param pTimer The timer.
397 * @param u64Expire The timer expiration time.
398 *
399 * @remarks Called while owning the relevant queue lock.
400 */
401DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
402{
403 Assert(!pTimer->offNext);
404 Assert(!pTimer->offPrev);
405 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
406
407 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
408 if (pCur)
409 {
410 for (;; pCur = TMTIMER_GET_NEXT(pCur))
411 {
412 if (pCur->u64Expire > u64Expire)
413 {
414 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
415 TMTIMER_SET_NEXT(pTimer, pCur);
416 TMTIMER_SET_PREV(pTimer, pPrev);
417 if (pPrev)
418 TMTIMER_SET_NEXT(pPrev, pTimer);
419 else
420 {
421 TMTIMER_SET_HEAD(pQueue, pTimer);
422 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
423 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
424 }
425 TMTIMER_SET_PREV(pCur, pTimer);
426 return;
427 }
428 if (!pCur->offNext)
429 {
430 TMTIMER_SET_NEXT(pCur, pTimer);
431 TMTIMER_SET_PREV(pTimer, pCur);
432 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
433 return;
434 }
435 }
436 }
437 else
438 {
439 TMTIMER_SET_HEAD(pQueue, pTimer);
440 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
441 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
442 }
443}
444
445
446
447/**
448 * Schedules the given timer on the given queue.
449 *
450 * @param pQueue The timer queue.
451 * @param pTimer The timer that needs scheduling.
452 *
453 * @remarks Called while owning the lock.
454 */
455DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
456{
457 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
458
459 /*
460 * Processing.
461 */
462 unsigned cRetries = 2;
463 do
464 {
465 TMTIMERSTATE enmState = pTimer->enmState;
466 switch (enmState)
467 {
468 /*
469 * Reschedule timer (in the active list).
470 */
471 case TMTIMERSTATE_PENDING_RESCHEDULE:
472 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
473 break; /* retry */
474 tmTimerQueueUnlinkActive(pQueue, pTimer);
475 RT_FALL_THRU();
476
477 /*
478 * Schedule timer (insert into the active list).
479 */
480 case TMTIMERSTATE_PENDING_SCHEDULE:
481 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
482 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
483 break; /* retry */
484 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
485 return;
486
487 /*
488 * Stop the timer in active list.
489 */
490 case TMTIMERSTATE_PENDING_STOP:
491 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
492 break; /* retry */
493 tmTimerQueueUnlinkActive(pQueue, pTimer);
494 RT_FALL_THRU();
495
496 /*
497 * Stop the timer (not on the active list).
498 */
499 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
500 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
501 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
502 break;
503 return;
504
505 /*
506 * The timer is pending destruction by TMR3TimerDestroy, our caller.
507 * Nothing to do here.
508 */
509 case TMTIMERSTATE_DESTROY:
510 break;
511
512 /*
513 * Postpone these until they get into the right state.
514 */
515 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
516 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
517 tmTimerLinkSchedule(pQueue, pTimer);
518 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
519 return;
520
521 /*
522 * None of these can be in the schedule.
523 */
524 case TMTIMERSTATE_FREE:
525 case TMTIMERSTATE_STOPPED:
526 case TMTIMERSTATE_ACTIVE:
527 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
528 case TMTIMERSTATE_EXPIRED_DELIVER:
529 default:
530 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
531 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
532 return;
533 }
534 } while (cRetries-- > 0);
535}
536
537
538/**
539 * Schedules the specified timer queue.
540 *
541 * @param pVM The cross context VM structure.
542 * @param pQueue The queue to schedule.
543 *
544 * @remarks Called while owning the lock.
545 */
546void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
547{
548 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
549 NOREF(pVM);
550
551 /*
552 * Dequeue the scheduling list and iterate it.
553 */
554 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
555 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
556 if (!offNext)
557 return;
558 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
559 while (pNext)
560 {
561 /*
562 * Unlink the head timer and find the next one.
563 */
564 PTMTIMER pTimer = pNext;
565 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
566 pTimer->offScheduleNext = 0;
567
568 /*
569 * Do the scheduling.
570 */
571 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
572 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
573 tmTimerQueueScheduleOne(pQueue, pTimer);
574 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
575 } /* foreach timer in current schedule batch. */
576 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
577}
578
579
580#ifdef VBOX_STRICT
581/**
582 * Checks that the timer queues are sane.
583 *
584 * @param pVM The cross context VM structure.
585 * @param pszWhere Caller location clue.
586 *
587 * @remarks Called while owning the lock.
588 */
589void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
590{
591 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
592
593 /*
594 * Check the linking of the active lists.
595 */
596 bool fHaveVirtualSyncLock = false;
597 for (int i = 0; i < TMCLOCK_MAX; i++)
598 {
599 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
600 Assert((int)pQueue->enmClock == i);
601 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
602 {
603 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
604 continue;
605 fHaveVirtualSyncLock = true;
606 }
607 PTMTIMER pPrev = NULL;
608 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
609 {
610 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
611 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
612 TMTIMERSTATE enmState = pCur->enmState;
613 switch (enmState)
614 {
615 case TMTIMERSTATE_ACTIVE:
616 AssertMsg( !pCur->offScheduleNext
617 || pCur->enmState != TMTIMERSTATE_ACTIVE,
618 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
619 break;
620 case TMTIMERSTATE_PENDING_STOP:
621 case TMTIMERSTATE_PENDING_RESCHEDULE:
622 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
623 break;
624 default:
625 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
626 break;
627 }
628 }
629 }
630
631
632# ifdef IN_RING3
633 /*
634 * Do the big list and check that active timers all are in the active lists.
635 */
636 PTMTIMERR3 pPrev = NULL;
637 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
638 {
639 Assert(pCur->pBigPrev == pPrev);
640 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
641
642 TMTIMERSTATE enmState = pCur->enmState;
643 switch (enmState)
644 {
645 case TMTIMERSTATE_ACTIVE:
646 case TMTIMERSTATE_PENDING_STOP:
647 case TMTIMERSTATE_PENDING_RESCHEDULE:
648 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
649 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
650 {
651 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
652 Assert(pCur->offPrev || pCur == pCurAct);
653 while (pCurAct && pCurAct != pCur)
654 pCurAct = TMTIMER_GET_NEXT(pCurAct);
655 Assert(pCurAct == pCur);
656 }
657 break;
658
659 case TMTIMERSTATE_PENDING_SCHEDULE:
660 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
661 case TMTIMERSTATE_STOPPED:
662 case TMTIMERSTATE_EXPIRED_DELIVER:
663 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
664 {
665 Assert(!pCur->offNext);
666 Assert(!pCur->offPrev);
667 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
668 pCurAct;
669 pCurAct = TMTIMER_GET_NEXT(pCurAct))
670 {
671 Assert(pCurAct != pCur);
672 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
673 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
674 }
675 }
676 break;
677
678 /* ignore */
679 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
680 break;
681
682 /* shouldn't get here! */
683 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
684 case TMTIMERSTATE_DESTROY:
685 default:
686 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
687 break;
688 }
689 }
690# endif /* IN_RING3 */
691
692 if (fHaveVirtualSyncLock)
693 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
694}
695#endif /* !VBOX_STRICT */
696
697#ifdef VBOX_HIGH_RES_TIMERS_HACK
698
699/**
700 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
701 * EMT is polling.
702 *
703 * @returns See tmTimerPollInternal.
704 * @param pVM The cross context VM structure.
705 * @param u64Now Current virtual clock timestamp.
706 * @param u64Delta The delta to the next even in ticks of the
707 * virtual clock.
708 * @param pu64Delta Where to return the delta.
709 */
710DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
711{
712 Assert(!(u64Delta & RT_BIT_64(63)));
713
714 if (!pVM->tm.s.fVirtualWarpDrive)
715 {
716 *pu64Delta = u64Delta;
717 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
718 }
719
720 /*
721 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
722 */
723 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
724 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
725
726 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
727 u64GipTime -= u64Start; /* the start is GIP time. */
728 if (u64GipTime >= u64Delta)
729 {
730 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
731 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
732 }
733 else
734 {
735 u64Delta -= u64GipTime;
736 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
737 u64Delta += u64GipTime;
738 }
739 *pu64Delta = u64Delta;
740 u64GipTime += u64Start;
741 return u64GipTime;
742}
743
744
745/**
746 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
747 * than the one dedicated to timer work.
748 *
749 * @returns See tmTimerPollInternal.
750 * @param pVM The cross context VM structure.
751 * @param u64Now Current virtual clock timestamp.
752 * @param pu64Delta Where to return the delta.
753 */
754DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
755{
756 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
757 *pu64Delta = s_u64OtherRet;
758 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
759}
760
761
762/**
763 * Worker for tmTimerPollInternal.
764 *
765 * @returns See tmTimerPollInternal.
766 * @param pVM The cross context VM structure.
767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
768 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
769 * timer EMT.
770 * @param u64Now Current virtual clock timestamp.
771 * @param pu64Delta Where to return the delta.
772 * @param pCounter The statistics counter to update.
773 */
774DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
775 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
776{
777 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
778 if (pVCpuDst != pVCpu)
779 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
780 *pu64Delta = 0;
781 return 0;
782}
783
784/**
785 * Common worker for TMTimerPollGIP and TMTimerPoll.
786 *
787 * This function is called before FFs are checked in the inner execution EM loops.
788 *
789 * @returns The GIP timestamp of the next event.
790 * 0 if the next event has already expired.
791 *
792 * @param pVM The cross context VM structure.
793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
794 * @param pu64Delta Where to store the delta.
795 *
796 * @thread The emulation thread.
797 *
798 * @remarks GIP uses ns ticks.
799 */
800DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
801{
802 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
803 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
804 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
805
806 /*
807 * Return straight away if the timer FF is already set ...
808 */
809 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
810 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
811
812 /*
813 * ... or if timers are being run.
814 */
815 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
816 {
817 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
818 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
819 }
820
821 /*
822 * Check for TMCLOCK_VIRTUAL expiration.
823 */
824 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
825 const int64_t i64Delta1 = u64Expire1 - u64Now;
826 if (i64Delta1 <= 0)
827 {
828 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
829 {
830 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
831 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
832 }
833 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
834 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
835 }
836
837 /*
838 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
839 * This isn't quite as straight forward if in a catch-up, not only do
840 * we have to adjust the 'now' but when have to adjust the delta as well.
841 */
842
843 /*
844 * Optimistic lockless approach.
845 */
846 uint64_t u64VirtualSyncNow;
847 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
848 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
849 {
850 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
851 {
852 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
853 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
854 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
855 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
856 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
857 {
858 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
859 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
860 if (i64Delta2 > 0)
861 {
862 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
863 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
864
865 if (pVCpu == pVCpuDst)
866 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
867 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
868 }
869
870 if ( !pVM->tm.s.fRunningQueues
871 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
872 {
873 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
874 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
875 }
876
877 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
878 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
879 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
880 }
881 }
882 }
883 else
884 {
885 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
886 LogFlow(("TMTimerPoll: stopped\n"));
887 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
888 }
889
890 /*
891 * Complicated lockless approach.
892 */
893 uint64_t off;
894 uint32_t u32Pct = 0;
895 bool fCatchUp;
896 int cOuterTries = 42;
897 for (;; cOuterTries--)
898 {
899 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
900 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
901 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
902 if (fCatchUp)
903 {
904 /* No changes allowed, try get a consistent set of parameters. */
905 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
906 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
907 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
908 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
909 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
910 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
911 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
912 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
913 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
914 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
915 || cOuterTries <= 0)
916 {
917 uint64_t u64Delta = u64Now - u64Prev;
918 if (RT_LIKELY(!(u64Delta >> 32)))
919 {
920 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
921 if (off > u64Sub + offGivenUp)
922 off -= u64Sub;
923 else /* we've completely caught up. */
924 off = offGivenUp;
925 }
926 else
927 /* More than 4 seconds since last time (or negative), ignore it. */
928 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
929
930 /* Check that we're still running and in catch up. */
931 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
932 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
933 break;
934 }
935 }
936 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
937 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
938 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
939 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
940 break; /* Got an consistent offset */
941
942 /* Repeat the initial checks before iterating. */
943 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
944 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
945 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
946 {
947 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
948 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
949 }
950 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
951 {
952 LogFlow(("TMTimerPoll: stopped\n"));
953 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
954 }
955 if (cOuterTries <= 0)
956 break; /* that's enough */
957 }
958 if (cOuterTries <= 0)
959 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
960 u64VirtualSyncNow = u64Now - off;
961
962 /* Calc delta and see if we've got a virtual sync hit. */
963 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
964 if (i64Delta2 <= 0)
965 {
966 if ( !pVM->tm.s.fRunningQueues
967 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
968 {
969 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
970 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
971 }
972 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
973 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
974 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
975 }
976
977 /*
978 * Return the time left to the next event.
979 */
980 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
981 if (pVCpu == pVCpuDst)
982 {
983 if (fCatchUp)
984 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
985 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
986 }
987 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
988}
989
990
991/**
992 * Set FF if we've passed the next virtual event.
993 *
994 * This function is called before FFs are checked in the inner execution EM loops.
995 *
996 * @returns true if timers are pending, false if not.
997 *
998 * @param pVM The cross context VM structure.
999 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1000 * @thread The emulation thread.
1001 */
1002VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1003{
1004 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1005 uint64_t off = 0;
1006 tmTimerPollInternal(pVM, pVCpu, &off);
1007 return off == 0;
1008}
1009
1010
1011/**
1012 * Set FF if we've passed the next virtual event.
1013 *
1014 * This function is called before FFs are checked in the inner execution EM loops.
1015 *
1016 * @param pVM The cross context VM structure.
1017 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1018 * @thread The emulation thread.
1019 */
1020VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1021{
1022 uint64_t off;
1023 tmTimerPollInternal(pVM, pVCpu, &off);
1024}
1025
1026
1027/**
1028 * Set FF if we've passed the next virtual event.
1029 *
1030 * This function is called before FFs are checked in the inner execution EM loops.
1031 *
1032 * @returns The GIP timestamp of the next event.
1033 * 0 if the next event has already expired.
1034 * @param pVM The cross context VM structure.
1035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1036 * @param pu64Delta Where to store the delta.
1037 * @thread The emulation thread.
1038 */
1039VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1040{
1041 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1042}
1043
1044#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1045
1046/**
1047 * Gets the host context ring-3 pointer of the timer.
1048 *
1049 * @returns HC R3 pointer.
1050 * @param pTimer Timer handle as returned by one of the create functions.
1051 */
1052VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1053{
1054 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1055}
1056
1057
1058/**
1059 * Gets the host context ring-0 pointer of the timer.
1060 *
1061 * @returns HC R0 pointer.
1062 * @param pTimer Timer handle as returned by one of the create functions.
1063 */
1064VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1065{
1066 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1067}
1068
1069
1070/**
1071 * Gets the RC pointer of the timer.
1072 *
1073 * @returns RC pointer.
1074 * @param pTimer Timer handle as returned by one of the create functions.
1075 */
1076VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1077{
1078 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1079}
1080
1081
1082/**
1083 * Locks the timer clock.
1084 *
1085 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1086 * if the clock does not have a lock.
1087 * @param pTimer The timer which clock lock we wish to take.
1088 * @param rcBusy What to return in ring-0 and raw-mode context
1089 * if the lock is busy. Pass VINF_SUCCESS to
1090 * acquired the critical section thru a ring-3
1091 call if necessary.
1092 *
1093 * @remarks Currently only supported on timers using the virtual sync clock.
1094 */
1095VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1096{
1097 AssertPtr(pTimer);
1098 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1099 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1100}
1101
1102
1103/**
1104 * Unlocks a timer clock locked by TMTimerLock.
1105 *
1106 * @param pTimer The timer which clock to unlock.
1107 */
1108VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1109{
1110 AssertPtr(pTimer);
1111 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1112 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1113}
1114
1115
1116/**
1117 * Checks if the current thread owns the timer clock lock.
1118 *
1119 * @returns @c true if its the owner, @c false if not.
1120 * @param pTimer The timer handle.
1121 */
1122VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1123{
1124 AssertPtr(pTimer);
1125 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1126 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1127}
1128
1129
1130/**
1131 * Optimized TMTimerSet code path for starting an inactive timer.
1132 *
1133 * @returns VBox status code.
1134 *
1135 * @param pVM The cross context VM structure.
1136 * @param pTimer The timer handle.
1137 * @param u64Expire The new expire time.
1138 */
1139static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1140{
1141 Assert(!pTimer->offPrev);
1142 Assert(!pTimer->offNext);
1143 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1144
1145 TMCLOCK const enmClock = pTimer->enmClock;
1146
1147 /*
1148 * Calculate and set the expiration time.
1149 */
1150 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1151 {
1152 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1153 AssertMsgStmt(u64Expire >= u64Last,
1154 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1155 u64Expire = u64Last);
1156 }
1157 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1158 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1159
1160 /*
1161 * Link the timer into the active list.
1162 */
1163 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1164
1165 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1166 TM_UNLOCK_TIMERS(pVM);
1167 return VINF_SUCCESS;
1168}
1169
1170
1171/**
1172 * TMTimerSet for the virtual sync timer queue.
1173 *
1174 * This employs a greatly simplified state machine by always acquiring the
1175 * queue lock and bypassing the scheduling list.
1176 *
1177 * @returns VBox status code
1178 * @param pVM The cross context VM structure.
1179 * @param pTimer The timer handle.
1180 * @param u64Expire The expiration time.
1181 */
1182static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1183{
1184 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1185 VM_ASSERT_EMT(pVM);
1186 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1187 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1188 AssertRCReturn(rc, rc);
1189
1190 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1191 TMTIMERSTATE enmState = pTimer->enmState;
1192 switch (enmState)
1193 {
1194 case TMTIMERSTATE_EXPIRED_DELIVER:
1195 case TMTIMERSTATE_STOPPED:
1196 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1197 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1198 else
1199 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1200
1201 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1202 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1203 pTimer->u64Expire = u64Expire;
1204 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1205 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1206 rc = VINF_SUCCESS;
1207 break;
1208
1209 case TMTIMERSTATE_ACTIVE:
1210 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1211 tmTimerQueueUnlinkActive(pQueue, pTimer);
1212 pTimer->u64Expire = u64Expire;
1213 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1214 rc = VINF_SUCCESS;
1215 break;
1216
1217 case TMTIMERSTATE_PENDING_RESCHEDULE:
1218 case TMTIMERSTATE_PENDING_STOP:
1219 case TMTIMERSTATE_PENDING_SCHEDULE:
1220 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1221 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1222 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1223 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1224 case TMTIMERSTATE_DESTROY:
1225 case TMTIMERSTATE_FREE:
1226 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1227 rc = VERR_TM_INVALID_STATE;
1228 break;
1229
1230 default:
1231 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1232 rc = VERR_TM_UNKNOWN_STATE;
1233 break;
1234 }
1235
1236 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1237 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1238 return rc;
1239}
1240
1241
1242/**
1243 * Arm a timer with a (new) expire time.
1244 *
1245 * @returns VBox status code.
1246 * @param pTimer Timer handle as returned by one of the create functions.
1247 * @param u64Expire New expire time.
1248 */
1249VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1250{
1251 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1252 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1253
1254 /* Treat virtual sync timers specially. */
1255 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1256 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1257
1258 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1259 TMTIMER_ASSERT_CRITSECT(pTimer);
1260
1261 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1262
1263#ifdef VBOX_WITH_STATISTICS
1264 /*
1265 * Gather optimization info.
1266 */
1267 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1268 TMTIMERSTATE enmOrgState = pTimer->enmState;
1269 switch (enmOrgState)
1270 {
1271 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1272 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1273 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1274 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1275 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1276 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1277 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1278 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1279 }
1280#endif
1281
1282 /*
1283 * The most common case is setting the timer again during the callback.
1284 * The second most common case is starting a timer at some other time.
1285 */
1286#if 1
1287 TMTIMERSTATE enmState1 = pTimer->enmState;
1288 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1289 || ( enmState1 == TMTIMERSTATE_STOPPED
1290 && pTimer->pCritSect))
1291 {
1292 /* Try take the TM lock and check the state again. */
1293 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1294 {
1295 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1296 {
1297 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1298 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1299 return VINF_SUCCESS;
1300 }
1301 TM_UNLOCK_TIMERS(pVM);
1302 }
1303 }
1304#endif
1305
1306 /*
1307 * Unoptimized code path.
1308 */
1309 int cRetries = 1000;
1310 do
1311 {
1312 /*
1313 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1314 */
1315 TMTIMERSTATE enmState = pTimer->enmState;
1316 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1317 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1318 switch (enmState)
1319 {
1320 case TMTIMERSTATE_EXPIRED_DELIVER:
1321 case TMTIMERSTATE_STOPPED:
1322 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1323 {
1324 Assert(!pTimer->offPrev);
1325 Assert(!pTimer->offNext);
1326 pTimer->u64Expire = u64Expire;
1327 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1328 tmSchedule(pTimer);
1329 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1330 return VINF_SUCCESS;
1331 }
1332 break;
1333
1334 case TMTIMERSTATE_PENDING_SCHEDULE:
1335 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1336 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1337 {
1338 pTimer->u64Expire = u64Expire;
1339 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1340 tmSchedule(pTimer);
1341 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1342 return VINF_SUCCESS;
1343 }
1344 break;
1345
1346
1347 case TMTIMERSTATE_ACTIVE:
1348 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1349 {
1350 pTimer->u64Expire = u64Expire;
1351 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1352 tmSchedule(pTimer);
1353 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1354 return VINF_SUCCESS;
1355 }
1356 break;
1357
1358 case TMTIMERSTATE_PENDING_RESCHEDULE:
1359 case TMTIMERSTATE_PENDING_STOP:
1360 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1361 {
1362 pTimer->u64Expire = u64Expire;
1363 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1364 tmSchedule(pTimer);
1365 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1366 return VINF_SUCCESS;
1367 }
1368 break;
1369
1370
1371 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1372 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1373 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1374#ifdef IN_RING3
1375 if (!RTThreadYield())
1376 RTThreadSleep(1);
1377#else
1378/** @todo call host context and yield after a couple of iterations */
1379#endif
1380 break;
1381
1382 /*
1383 * Invalid states.
1384 */
1385 case TMTIMERSTATE_DESTROY:
1386 case TMTIMERSTATE_FREE:
1387 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1388 return VERR_TM_INVALID_STATE;
1389 default:
1390 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1391 return VERR_TM_UNKNOWN_STATE;
1392 }
1393 } while (cRetries-- > 0);
1394
1395 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1396 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1397 return VERR_TM_TIMER_UNSTABLE_STATE;
1398}
1399
1400
1401/**
1402 * Return the current time for the specified clock, setting pu64Now if not NULL.
1403 *
1404 * @returns Current time.
1405 * @param pVM The cross context VM structure.
1406 * @param enmClock The clock to query.
1407 * @param pu64Now Optional pointer where to store the return time
1408 */
1409DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1410{
1411 uint64_t u64Now;
1412 switch (enmClock)
1413 {
1414 case TMCLOCK_VIRTUAL_SYNC:
1415 u64Now = TMVirtualSyncGet(pVM);
1416 break;
1417 case TMCLOCK_VIRTUAL:
1418 u64Now = TMVirtualGet(pVM);
1419 break;
1420 case TMCLOCK_REAL:
1421 u64Now = TMRealGet(pVM);
1422 break;
1423 default:
1424 AssertFatalMsgFailed(("%d\n", enmClock));
1425 }
1426
1427 if (pu64Now)
1428 *pu64Now = u64Now;
1429 return u64Now;
1430}
1431
1432
1433/**
1434 * Optimized TMTimerSetRelative code path.
1435 *
1436 * @returns VBox status code.
1437 *
1438 * @param pVM The cross context VM structure.
1439 * @param pTimer The timer handle.
1440 * @param cTicksToNext Clock ticks until the next time expiration.
1441 * @param pu64Now Where to return the current time stamp used.
1442 * Optional.
1443 */
1444static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1445{
1446 Assert(!pTimer->offPrev);
1447 Assert(!pTimer->offNext);
1448 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1449
1450 /*
1451 * Calculate and set the expiration time.
1452 */
1453 TMCLOCK const enmClock = pTimer->enmClock;
1454 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1455 pTimer->u64Expire = u64Expire;
1456 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1457
1458 /*
1459 * Link the timer into the active list.
1460 */
1461 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1462 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1463
1464 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1465 TM_UNLOCK_TIMERS(pVM);
1466 return VINF_SUCCESS;
1467}
1468
1469
1470/**
1471 * TMTimerSetRelative for the virtual sync timer queue.
1472 *
1473 * This employs a greatly simplified state machine by always acquiring the
1474 * queue lock and bypassing the scheduling list.
1475 *
1476 * @returns VBox status code
1477 * @param pVM The cross context VM structure.
1478 * @param pTimer The timer to (re-)arm.
1479 * @param cTicksToNext Clock ticks until the next time expiration.
1480 * @param pu64Now Where to return the current time stamp used.
1481 * Optional.
1482 */
1483static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1484{
1485 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1486 VM_ASSERT_EMT(pVM);
1487 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1488 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1489 AssertRCReturn(rc, rc);
1490
1491 /* Calculate the expiration tick. */
1492 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1493 if (pu64Now)
1494 *pu64Now = u64Expire;
1495 u64Expire += cTicksToNext;
1496
1497 /* Update the timer. */
1498 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1499 TMTIMERSTATE enmState = pTimer->enmState;
1500 switch (enmState)
1501 {
1502 case TMTIMERSTATE_EXPIRED_DELIVER:
1503 case TMTIMERSTATE_STOPPED:
1504 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1505 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1506 else
1507 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1508 pTimer->u64Expire = u64Expire;
1509 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1510 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1511 rc = VINF_SUCCESS;
1512 break;
1513
1514 case TMTIMERSTATE_ACTIVE:
1515 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1516 tmTimerQueueUnlinkActive(pQueue, pTimer);
1517 pTimer->u64Expire = u64Expire;
1518 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1519 rc = VINF_SUCCESS;
1520 break;
1521
1522 case TMTIMERSTATE_PENDING_RESCHEDULE:
1523 case TMTIMERSTATE_PENDING_STOP:
1524 case TMTIMERSTATE_PENDING_SCHEDULE:
1525 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1526 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1527 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1528 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1529 case TMTIMERSTATE_DESTROY:
1530 case TMTIMERSTATE_FREE:
1531 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1532 rc = VERR_TM_INVALID_STATE;
1533 break;
1534
1535 default:
1536 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1537 rc = VERR_TM_UNKNOWN_STATE;
1538 break;
1539 }
1540
1541 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1542 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1543 return rc;
1544}
1545
1546
1547/**
1548 * Arm a timer with a expire time relative to the current time.
1549 *
1550 * @returns VBox status code.
1551 * @param pTimer Timer handle as returned by one of the create functions.
1552 * @param cTicksToNext Clock ticks until the next time expiration.
1553 * @param pu64Now Where to return the current time stamp used.
1554 * Optional.
1555 */
1556VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1557{
1558 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1559 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1560
1561 /* Treat virtual sync timers specially. */
1562 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1563 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1564
1565 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1566 TMTIMER_ASSERT_CRITSECT(pTimer);
1567
1568 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1569
1570#ifdef VBOX_WITH_STATISTICS
1571 /*
1572 * Gather optimization info.
1573 */
1574 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1575 TMTIMERSTATE enmOrgState = pTimer->enmState;
1576 switch (enmOrgState)
1577 {
1578 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1579 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1580 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1581 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1582 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1583 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1584 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1585 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1586 }
1587#endif
1588
1589 /*
1590 * Try to take the TM lock and optimize the common cases.
1591 *
1592 * With the TM lock we can safely make optimizations like immediate
1593 * scheduling and we can also be 100% sure that we're not racing the
1594 * running of the timer queues. As an additional restraint we require the
1595 * timer to have a critical section associated with to be 100% there aren't
1596 * concurrent operations on the timer. (This latter isn't necessary any
1597 * longer as this isn't supported for any timers, critsect or not.)
1598 *
1599 * Note! Lock ordering doesn't apply when we only tries to
1600 * get the innermost locks.
1601 */
1602 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1603#if 1
1604 if ( fOwnTMLock
1605 && pTimer->pCritSect)
1606 {
1607 TMTIMERSTATE enmState = pTimer->enmState;
1608 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1609 || enmState == TMTIMERSTATE_STOPPED)
1610 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1611 {
1612 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1613 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1614 return VINF_SUCCESS;
1615 }
1616
1617 /* Optimize other states when it becomes necessary. */
1618 }
1619#endif
1620
1621 /*
1622 * Unoptimized path.
1623 */
1624 int rc;
1625 TMCLOCK const enmClock = pTimer->enmClock;
1626 for (int cRetries = 1000; ; cRetries--)
1627 {
1628 /*
1629 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1630 */
1631 TMTIMERSTATE enmState = pTimer->enmState;
1632 switch (enmState)
1633 {
1634 case TMTIMERSTATE_STOPPED:
1635 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1636 {
1637 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1638 * Figure a safe way of activating this timer while the queue is
1639 * being run.
1640 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1641 * re-starting the timer in response to a initial_count write.) */
1642 }
1643 RT_FALL_THRU();
1644 case TMTIMERSTATE_EXPIRED_DELIVER:
1645 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1646 {
1647 Assert(!pTimer->offPrev);
1648 Assert(!pTimer->offNext);
1649 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1650 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1651 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1652 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1653 tmSchedule(pTimer);
1654 rc = VINF_SUCCESS;
1655 break;
1656 }
1657 rc = VERR_TRY_AGAIN;
1658 break;
1659
1660 case TMTIMERSTATE_PENDING_SCHEDULE:
1661 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1662 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1663 {
1664 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1665 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1666 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1667 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1668 tmSchedule(pTimer);
1669 rc = VINF_SUCCESS;
1670 break;
1671 }
1672 rc = VERR_TRY_AGAIN;
1673 break;
1674
1675
1676 case TMTIMERSTATE_ACTIVE:
1677 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1678 {
1679 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1680 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1681 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1682 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1683 tmSchedule(pTimer);
1684 rc = VINF_SUCCESS;
1685 break;
1686 }
1687 rc = VERR_TRY_AGAIN;
1688 break;
1689
1690 case TMTIMERSTATE_PENDING_RESCHEDULE:
1691 case TMTIMERSTATE_PENDING_STOP:
1692 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1693 {
1694 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1695 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1696 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1697 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1698 tmSchedule(pTimer);
1699 rc = VINF_SUCCESS;
1700 break;
1701 }
1702 rc = VERR_TRY_AGAIN;
1703 break;
1704
1705
1706 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1707 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1708 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1709#ifdef IN_RING3
1710 if (!RTThreadYield())
1711 RTThreadSleep(1);
1712#else
1713/** @todo call host context and yield after a couple of iterations */
1714#endif
1715 rc = VERR_TRY_AGAIN;
1716 break;
1717
1718 /*
1719 * Invalid states.
1720 */
1721 case TMTIMERSTATE_DESTROY:
1722 case TMTIMERSTATE_FREE:
1723 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1724 rc = VERR_TM_INVALID_STATE;
1725 break;
1726
1727 default:
1728 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1729 rc = VERR_TM_UNKNOWN_STATE;
1730 break;
1731 }
1732
1733 /* switch + loop is tedious to break out of. */
1734 if (rc == VINF_SUCCESS)
1735 break;
1736
1737 if (rc != VERR_TRY_AGAIN)
1738 {
1739 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1740 break;
1741 }
1742 if (cRetries <= 0)
1743 {
1744 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1745 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1746 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1747 break;
1748 }
1749
1750 /*
1751 * Retry to gain locks.
1752 */
1753 if (!fOwnTMLock)
1754 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1755
1756 } /* for (;;) */
1757
1758 /*
1759 * Clean up and return.
1760 */
1761 if (fOwnTMLock)
1762 TM_UNLOCK_TIMERS(pVM);
1763
1764 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1765 return rc;
1766}
1767
1768
1769/**
1770 * Drops a hint about the frequency of the timer.
1771 *
1772 * This is used by TM and the VMM to calculate how often guest execution needs
1773 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1774 *
1775 * @returns VBox status code.
1776 * @param pTimer Timer handle as returned by one of the create
1777 * functions.
1778 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1779 *
1780 * @remarks We're using an integer hertz value here since anything above 1 HZ
1781 * is not going to be any trouble satisfying scheduling wise. The
1782 * range where it makes sense is >= 100 HZ.
1783 */
1784VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1785{
1786 TMTIMER_ASSERT_CRITSECT(pTimer);
1787
1788 uint32_t const uHzOldHint = pTimer->uHzHint;
1789 pTimer->uHzHint = uHzHint;
1790
1791 PVM pVM = pTimer->CTX_SUFF(pVM);
1792 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1793 if ( uHzHint > uMaxHzHint
1794 || uHzOldHint >= uMaxHzHint)
1795 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1796
1797 return VINF_SUCCESS;
1798}
1799
1800
1801/**
1802 * TMTimerStop for the virtual sync timer queue.
1803 *
1804 * This employs a greatly simplified state machine by always acquiring the
1805 * queue lock and bypassing the scheduling list.
1806 *
1807 * @returns VBox status code
1808 * @param pVM The cross context VM structure.
1809 * @param pTimer The timer handle.
1810 */
1811static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1812{
1813 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1814 VM_ASSERT_EMT(pVM);
1815 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1816 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1817 AssertRCReturn(rc, rc);
1818
1819 /* Reset the HZ hint. */
1820 if (pTimer->uHzHint)
1821 {
1822 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1823 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1824 pTimer->uHzHint = 0;
1825 }
1826
1827 /* Update the timer state. */
1828 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1829 TMTIMERSTATE enmState = pTimer->enmState;
1830 switch (enmState)
1831 {
1832 case TMTIMERSTATE_ACTIVE:
1833 tmTimerQueueUnlinkActive(pQueue, pTimer);
1834 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1835 rc = VINF_SUCCESS;
1836 break;
1837
1838 case TMTIMERSTATE_EXPIRED_DELIVER:
1839 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1840 rc = VINF_SUCCESS;
1841 break;
1842
1843 case TMTIMERSTATE_STOPPED:
1844 rc = VINF_SUCCESS;
1845 break;
1846
1847 case TMTIMERSTATE_PENDING_RESCHEDULE:
1848 case TMTIMERSTATE_PENDING_STOP:
1849 case TMTIMERSTATE_PENDING_SCHEDULE:
1850 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1851 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1852 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1853 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1854 case TMTIMERSTATE_DESTROY:
1855 case TMTIMERSTATE_FREE:
1856 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1857 rc = VERR_TM_INVALID_STATE;
1858 break;
1859
1860 default:
1861 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1862 rc = VERR_TM_UNKNOWN_STATE;
1863 break;
1864 }
1865
1866 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1867 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1868 return rc;
1869}
1870
1871
1872/**
1873 * Stop the timer.
1874 * Use TMR3TimerArm() to "un-stop" the timer.
1875 *
1876 * @returns VBox status code.
1877 * @param pTimer Timer handle as returned by one of the create functions.
1878 */
1879VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1880{
1881 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1882 STAM_COUNTER_INC(&pTimer->StatStop);
1883
1884 /* Treat virtual sync timers specially. */
1885 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1886 return tmTimerVirtualSyncStop(pVM, pTimer);
1887
1888 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1889 TMTIMER_ASSERT_CRITSECT(pTimer);
1890
1891 /*
1892 * Reset the HZ hint.
1893 */
1894 if (pTimer->uHzHint)
1895 {
1896 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1897 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1898 pTimer->uHzHint = 0;
1899 }
1900
1901 /** @todo see if this function needs optimizing. */
1902 int cRetries = 1000;
1903 do
1904 {
1905 /*
1906 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1907 */
1908 TMTIMERSTATE enmState = pTimer->enmState;
1909 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1910 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1911 switch (enmState)
1912 {
1913 case TMTIMERSTATE_EXPIRED_DELIVER:
1914 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1915 return VERR_INVALID_PARAMETER;
1916
1917 case TMTIMERSTATE_STOPPED:
1918 case TMTIMERSTATE_PENDING_STOP:
1919 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1920 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1921 return VINF_SUCCESS;
1922
1923 case TMTIMERSTATE_PENDING_SCHEDULE:
1924 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1925 {
1926 tmSchedule(pTimer);
1927 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1928 return VINF_SUCCESS;
1929 }
1930 break;
1931
1932 case TMTIMERSTATE_PENDING_RESCHEDULE:
1933 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1934 {
1935 tmSchedule(pTimer);
1936 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1937 return VINF_SUCCESS;
1938 }
1939 break;
1940
1941 case TMTIMERSTATE_ACTIVE:
1942 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1943 {
1944 tmSchedule(pTimer);
1945 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1946 return VINF_SUCCESS;
1947 }
1948 break;
1949
1950 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1951 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1952 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1953#ifdef IN_RING3
1954 if (!RTThreadYield())
1955 RTThreadSleep(1);
1956#else
1957/** @todo call host and yield cpu after a while. */
1958#endif
1959 break;
1960
1961 /*
1962 * Invalid states.
1963 */
1964 case TMTIMERSTATE_DESTROY:
1965 case TMTIMERSTATE_FREE:
1966 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1967 return VERR_TM_INVALID_STATE;
1968 default:
1969 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1970 return VERR_TM_UNKNOWN_STATE;
1971 }
1972 } while (cRetries-- > 0);
1973
1974 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1975 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1976 return VERR_TM_TIMER_UNSTABLE_STATE;
1977}
1978
1979
1980/**
1981 * Get the current clock time.
1982 * Handy for calculating the new expire time.
1983 *
1984 * @returns Current clock time.
1985 * @param pTimer Timer handle as returned by one of the create functions.
1986 */
1987VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1988{
1989 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1990 STAM_COUNTER_INC(&pTimer->StatGet);
1991
1992 uint64_t u64;
1993 switch (pTimer->enmClock)
1994 {
1995 case TMCLOCK_VIRTUAL:
1996 u64 = TMVirtualGet(pVM);
1997 break;
1998 case TMCLOCK_VIRTUAL_SYNC:
1999 u64 = TMVirtualSyncGet(pVM);
2000 break;
2001 case TMCLOCK_REAL:
2002 u64 = TMRealGet(pVM);
2003 break;
2004 default:
2005 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2006 return UINT64_MAX;
2007 }
2008 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2009 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2010 return u64;
2011}
2012
2013
2014/**
2015 * Get the frequency of the timer clock.
2016 *
2017 * @returns Clock frequency (as Hz of course).
2018 * @param pTimer Timer handle as returned by one of the create functions.
2019 */
2020VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2021{
2022 switch (pTimer->enmClock)
2023 {
2024 case TMCLOCK_VIRTUAL:
2025 case TMCLOCK_VIRTUAL_SYNC:
2026 return TMCLOCK_FREQ_VIRTUAL;
2027
2028 case TMCLOCK_REAL:
2029 return TMCLOCK_FREQ_REAL;
2030
2031 default:
2032 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2033 return 0;
2034 }
2035}
2036
2037
2038/**
2039 * Get the expire time of the timer.
2040 * Only valid for active timers.
2041 *
2042 * @returns Expire time of the timer.
2043 * @param pTimer Timer handle as returned by one of the create functions.
2044 */
2045VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2046{
2047 TMTIMER_ASSERT_CRITSECT(pTimer);
2048 int cRetries = 1000;
2049 do
2050 {
2051 TMTIMERSTATE enmState = pTimer->enmState;
2052 switch (enmState)
2053 {
2054 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2055 case TMTIMERSTATE_EXPIRED_DELIVER:
2056 case TMTIMERSTATE_STOPPED:
2057 case TMTIMERSTATE_PENDING_STOP:
2058 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2059 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2060 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2061 return ~(uint64_t)0;
2062
2063 case TMTIMERSTATE_ACTIVE:
2064 case TMTIMERSTATE_PENDING_RESCHEDULE:
2065 case TMTIMERSTATE_PENDING_SCHEDULE:
2066 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2067 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2068 return pTimer->u64Expire;
2069
2070 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2071 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2072#ifdef IN_RING3
2073 if (!RTThreadYield())
2074 RTThreadSleep(1);
2075#endif
2076 break;
2077
2078 /*
2079 * Invalid states.
2080 */
2081 case TMTIMERSTATE_DESTROY:
2082 case TMTIMERSTATE_FREE:
2083 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2084 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2085 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2086 return ~(uint64_t)0;
2087 default:
2088 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2089 return ~(uint64_t)0;
2090 }
2091 } while (cRetries-- > 0);
2092
2093 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2094 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2095 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2096 return ~(uint64_t)0;
2097}
2098
2099
2100/**
2101 * Checks if a timer is active or not.
2102 *
2103 * @returns True if active.
2104 * @returns False if not active.
2105 * @param pTimer Timer handle as returned by one of the create functions.
2106 */
2107VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2108{
2109 TMTIMERSTATE enmState = pTimer->enmState;
2110 switch (enmState)
2111 {
2112 case TMTIMERSTATE_STOPPED:
2113 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2114 case TMTIMERSTATE_EXPIRED_DELIVER:
2115 case TMTIMERSTATE_PENDING_STOP:
2116 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2117 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2118 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2119 return false;
2120
2121 case TMTIMERSTATE_ACTIVE:
2122 case TMTIMERSTATE_PENDING_RESCHEDULE:
2123 case TMTIMERSTATE_PENDING_SCHEDULE:
2124 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2125 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2126 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2127 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2128 return true;
2129
2130 /*
2131 * Invalid states.
2132 */
2133 case TMTIMERSTATE_DESTROY:
2134 case TMTIMERSTATE_FREE:
2135 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2136 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2137 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2138 return false;
2139 default:
2140 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2141 return false;
2142 }
2143}
2144
2145
2146/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2147
2148
2149/**
2150 * Arm a timer with a (new) expire time relative to current time.
2151 *
2152 * @returns VBox status code.
2153 * @param pTimer Timer handle as returned by one of the create functions.
2154 * @param cMilliesToNext Number of milliseconds to the next tick.
2155 */
2156VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2157{
2158 switch (pTimer->enmClock)
2159 {
2160 case TMCLOCK_VIRTUAL:
2161 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2162 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2163
2164 case TMCLOCK_VIRTUAL_SYNC:
2165 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2166 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2167
2168 case TMCLOCK_REAL:
2169 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2170 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2171
2172 default:
2173 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2174 return VERR_TM_TIMER_BAD_CLOCK;
2175 }
2176}
2177
2178
2179/**
2180 * Arm a timer with a (new) expire time relative to current time.
2181 *
2182 * @returns VBox status code.
2183 * @param pTimer Timer handle as returned by one of the create functions.
2184 * @param cMicrosToNext Number of microseconds to the next tick.
2185 */
2186VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2187{
2188 switch (pTimer->enmClock)
2189 {
2190 case TMCLOCK_VIRTUAL:
2191 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2192 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2193
2194 case TMCLOCK_VIRTUAL_SYNC:
2195 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2196 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2197
2198 case TMCLOCK_REAL:
2199 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2200 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2201
2202 default:
2203 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2204 return VERR_TM_TIMER_BAD_CLOCK;
2205 }
2206}
2207
2208
2209/**
2210 * Arm a timer with a (new) expire time relative to current time.
2211 *
2212 * @returns VBox status code.
2213 * @param pTimer Timer handle as returned by one of the create functions.
2214 * @param cNanosToNext Number of nanoseconds to the next tick.
2215 */
2216VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2217{
2218 switch (pTimer->enmClock)
2219 {
2220 case TMCLOCK_VIRTUAL:
2221 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2222 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2223
2224 case TMCLOCK_VIRTUAL_SYNC:
2225 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2226 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2227
2228 case TMCLOCK_REAL:
2229 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2230 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2231
2232 default:
2233 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2234 return VERR_TM_TIMER_BAD_CLOCK;
2235 }
2236}
2237
2238
2239/**
2240 * Get the current clock time as nanoseconds.
2241 *
2242 * @returns The timer clock as nanoseconds.
2243 * @param pTimer Timer handle as returned by one of the create functions.
2244 */
2245VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2246{
2247 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2248}
2249
2250
2251/**
2252 * Get the current clock time as microseconds.
2253 *
2254 * @returns The timer clock as microseconds.
2255 * @param pTimer Timer handle as returned by one of the create functions.
2256 */
2257VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2258{
2259 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2260}
2261
2262
2263/**
2264 * Get the current clock time as milliseconds.
2265 *
2266 * @returns The timer clock as milliseconds.
2267 * @param pTimer Timer handle as returned by one of the create functions.
2268 */
2269VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2270{
2271 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2272}
2273
2274
2275/**
2276 * Converts the specified timer clock time to nanoseconds.
2277 *
2278 * @returns nanoseconds.
2279 * @param pTimer Timer handle as returned by one of the create functions.
2280 * @param u64Ticks The clock ticks.
2281 * @remark There could be rounding errors here. We just do a simple integer divide
2282 * without any adjustments.
2283 */
2284VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2285{
2286 switch (pTimer->enmClock)
2287 {
2288 case TMCLOCK_VIRTUAL:
2289 case TMCLOCK_VIRTUAL_SYNC:
2290 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2291 return u64Ticks;
2292
2293 case TMCLOCK_REAL:
2294 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2295 return u64Ticks * 1000000;
2296
2297 default:
2298 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2299 return 0;
2300 }
2301}
2302
2303
2304/**
2305 * Converts the specified timer clock time to microseconds.
2306 *
2307 * @returns microseconds.
2308 * @param pTimer Timer handle as returned by one of the create functions.
2309 * @param u64Ticks The clock ticks.
2310 * @remark There could be rounding errors here. We just do a simple integer divide
2311 * without any adjustments.
2312 */
2313VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2314{
2315 switch (pTimer->enmClock)
2316 {
2317 case TMCLOCK_VIRTUAL:
2318 case TMCLOCK_VIRTUAL_SYNC:
2319 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2320 return u64Ticks / 1000;
2321
2322 case TMCLOCK_REAL:
2323 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2324 return u64Ticks * 1000;
2325
2326 default:
2327 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2328 return 0;
2329 }
2330}
2331
2332
2333/**
2334 * Converts the specified timer clock time to milliseconds.
2335 *
2336 * @returns milliseconds.
2337 * @param pTimer Timer handle as returned by one of the create functions.
2338 * @param u64Ticks The clock ticks.
2339 * @remark There could be rounding errors here. We just do a simple integer divide
2340 * without any adjustments.
2341 */
2342VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2343{
2344 switch (pTimer->enmClock)
2345 {
2346 case TMCLOCK_VIRTUAL:
2347 case TMCLOCK_VIRTUAL_SYNC:
2348 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2349 return u64Ticks / 1000000;
2350
2351 case TMCLOCK_REAL:
2352 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2353 return u64Ticks;
2354
2355 default:
2356 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2357 return 0;
2358 }
2359}
2360
2361
2362/**
2363 * Converts the specified nanosecond timestamp to timer clock ticks.
2364 *
2365 * @returns timer clock ticks.
2366 * @param pTimer Timer handle as returned by one of the create functions.
2367 * @param cNanoSecs The nanosecond value ticks to convert.
2368 * @remark There could be rounding and overflow errors here.
2369 */
2370VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2371{
2372 switch (pTimer->enmClock)
2373 {
2374 case TMCLOCK_VIRTUAL:
2375 case TMCLOCK_VIRTUAL_SYNC:
2376 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2377 return cNanoSecs;
2378
2379 case TMCLOCK_REAL:
2380 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2381 return cNanoSecs / 1000000;
2382
2383 default:
2384 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2385 return 0;
2386 }
2387}
2388
2389
2390/**
2391 * Converts the specified microsecond timestamp to timer clock ticks.
2392 *
2393 * @returns timer clock ticks.
2394 * @param pTimer Timer handle as returned by one of the create functions.
2395 * @param cMicroSecs The microsecond value ticks to convert.
2396 * @remark There could be rounding and overflow errors here.
2397 */
2398VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2399{
2400 switch (pTimer->enmClock)
2401 {
2402 case TMCLOCK_VIRTUAL:
2403 case TMCLOCK_VIRTUAL_SYNC:
2404 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2405 return cMicroSecs * 1000;
2406
2407 case TMCLOCK_REAL:
2408 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2409 return cMicroSecs / 1000;
2410
2411 default:
2412 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2413 return 0;
2414 }
2415}
2416
2417
2418/**
2419 * Converts the specified millisecond timestamp to timer clock ticks.
2420 *
2421 * @returns timer clock ticks.
2422 * @param pTimer Timer handle as returned by one of the create functions.
2423 * @param cMilliSecs The millisecond value ticks to convert.
2424 * @remark There could be rounding and overflow errors here.
2425 */
2426VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2427{
2428 switch (pTimer->enmClock)
2429 {
2430 case TMCLOCK_VIRTUAL:
2431 case TMCLOCK_VIRTUAL_SYNC:
2432 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2433 return cMilliSecs * 1000000;
2434
2435 case TMCLOCK_REAL:
2436 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2437 return cMilliSecs;
2438
2439 default:
2440 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2441 return 0;
2442 }
2443}
2444
2445
2446/**
2447 * Convert state to string.
2448 *
2449 * @returns Readonly status name.
2450 * @param enmState State.
2451 */
2452const char *tmTimerState(TMTIMERSTATE enmState)
2453{
2454 switch (enmState)
2455 {
2456#define CASE(num, state) \
2457 case TMTIMERSTATE_##state: \
2458 AssertCompile(TMTIMERSTATE_##state == (num)); \
2459 return #num "-" #state
2460 CASE( 1,STOPPED);
2461 CASE( 2,ACTIVE);
2462 CASE( 3,EXPIRED_GET_UNLINK);
2463 CASE( 4,EXPIRED_DELIVER);
2464 CASE( 5,PENDING_STOP);
2465 CASE( 6,PENDING_STOP_SCHEDULE);
2466 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2467 CASE( 8,PENDING_SCHEDULE);
2468 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2469 CASE(10,PENDING_RESCHEDULE);
2470 CASE(11,DESTROY);
2471 CASE(12,FREE);
2472 default:
2473 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2474 return "Invalid state!";
2475#undef CASE
2476 }
2477}
2478
2479
2480/**
2481 * Gets the highest frequency hint for all the important timers.
2482 *
2483 * @returns The highest frequency. 0 if no timers care.
2484 * @param pVM The cross context VM structure.
2485 */
2486static uint32_t tmGetFrequencyHint(PVM pVM)
2487{
2488 /*
2489 * Query the value, recalculate it if necessary.
2490 *
2491 * The "right" highest frequency value isn't so important that we'll block
2492 * waiting on the timer semaphore.
2493 */
2494 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2495 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2496 {
2497 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2498 {
2499 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2500
2501 /*
2502 * Loop over the timers associated with each clock.
2503 */
2504 uMaxHzHint = 0;
2505 for (int i = 0; i < TMCLOCK_MAX; i++)
2506 {
2507 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2508 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2509 {
2510 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2511 if (uHzHint > uMaxHzHint)
2512 {
2513 switch (pCur->enmState)
2514 {
2515 case TMTIMERSTATE_ACTIVE:
2516 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2517 case TMTIMERSTATE_EXPIRED_DELIVER:
2518 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2519 case TMTIMERSTATE_PENDING_SCHEDULE:
2520 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2521 case TMTIMERSTATE_PENDING_RESCHEDULE:
2522 uMaxHzHint = uHzHint;
2523 break;
2524
2525 case TMTIMERSTATE_STOPPED:
2526 case TMTIMERSTATE_PENDING_STOP:
2527 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2528 case TMTIMERSTATE_DESTROY:
2529 case TMTIMERSTATE_FREE:
2530 break;
2531 /* no default, want gcc warnings when adding more states. */
2532 }
2533 }
2534 }
2535 }
2536 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2537 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2538 TM_UNLOCK_TIMERS(pVM);
2539 }
2540 }
2541 return uMaxHzHint;
2542}
2543
2544
2545/**
2546 * Calculates a host timer frequency that would be suitable for the current
2547 * timer load.
2548 *
2549 * This will take the highest timer frequency, adjust for catch-up and warp
2550 * driver, and finally add a little fudge factor. The caller (VMM) will use
2551 * the result to adjust the per-cpu preemption timer.
2552 *
2553 * @returns The highest frequency. 0 if no important timers around.
2554 * @param pVM The cross context VM structure.
2555 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2556 */
2557VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2558{
2559 uint32_t uHz = tmGetFrequencyHint(pVM);
2560
2561 /* Catch up, we have to be more aggressive than the % indicates at the
2562 beginning of the effort. */
2563 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2564 {
2565 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2566 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2567 {
2568 if (u32Pct <= 100)
2569 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2570 else if (u32Pct <= 200)
2571 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2572 else if (u32Pct <= 400)
2573 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2574 uHz *= u32Pct + 100;
2575 uHz /= 100;
2576 }
2577 }
2578
2579 /* Warp drive. */
2580 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2581 {
2582 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2583 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2584 {
2585 uHz *= u32Pct;
2586 uHz /= 100;
2587 }
2588 }
2589
2590 /* Fudge factor. */
2591 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2592 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2593 else
2594 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2595 uHz /= 100;
2596
2597 /* Make sure it isn't too high. */
2598 if (uHz > pVM->tm.s.cHostHzMax)
2599 uHz = pVM->tm.s.cHostHzMax;
2600
2601 return uHz;
2602}
2603
2604
2605/**
2606 * Whether the guest virtual clock is ticking.
2607 *
2608 * @returns true if ticking, false otherwise.
2609 * @param pVM The cross context VM structure.
2610 */
2611VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2612{
2613 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2614}
2615
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette