/* $Id: TMAll.cpp 20784 2009-06-22 14:37:39Z vboxsync $ */ /** @file * TM - Timeout Manager, all contexts. */ /* * Copyright (C) 2006-2007 Sun Microsystems, Inc. * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa * Clara, CA 95054 USA or visit http://www.sun.com if you need * additional information or have any questions. */ /******************************************************************************* * Header Files * *******************************************************************************/ #define LOG_GROUP LOG_GROUP_TM #include #include #ifdef IN_RING3 # include #endif #include "TMInternal.h" #include #include #include #include #include #include #include #include #ifdef IN_RING3 # include #endif /******************************************************************************* * Defined Constants And Macros * *******************************************************************************/ /** @def TMTIMER_ASSERT_CRITSECT * Checks that the caller owns the critical section if one is associated with * the timer. */ #ifdef VBOX_STRICT # define TMTIMER_ASSERT_CRITSECT(pTimer) \ do { \ if ((pTimer)->pCritSect) \ { \ PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \ AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \ ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \ } \ } while (0) #else # define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0) #endif #ifndef tmTimerLock /** * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC. * * @retval VINF_SUCCESS on success (always in ring-3). * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy. * * @param pVM The VM handle. * * @thread EMTs for the time being. */ int tmTimerLock(PVM pVM) { VM_ASSERT_EMT(pVM); int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY); return rc; } /** * Try take the timer lock, no waiting. * * @retval VINF_SUCCESS on success. * @retval VERR_SEM_BUSY if busy. * * @param pVM The VM handle. */ int tmTimerTryLock(PVM pVM) { int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect); return rc; } /** * Release the EMT/TM lock. * * @param pVM The VM handle. */ void tmTimerUnlock(PVM pVM) { PDMCritSectLeave(&pVM->tm.s.TimerCritSect); } /** * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC. * * @retval VINF_SUCCESS on success (always in ring-3). * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy. * * @param pVM The VM handle. */ int tmVirtualSyncLock(PVM pVM) { VM_ASSERT_EMT(pVM); int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY); return rc; } /** * Try take the VirtualSync lock, no waiting. * * @retval VINF_SUCCESS on success. * @retval VERR_SEM_BUSY if busy. * * @param pVM The VM handle. */ int tmVirtualSyncTryLock(PVM pVM) { VM_ASSERT_EMT(pVM); int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock); return rc; } /** * Release the VirtualSync lock. * * @param pVM The VM handle. */ void tmVirtualSyncUnlock(PVM pVM) { PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); } #endif /* ! macros */ /** * Notification that execution is about to start. * * This call must always be paired with a TMNotifyEndOfExecution call. * * The function may, depending on the configuration, resume the TSC and future * clocks that only ticks when we're executing guest code. * * @param pVCpu The VMCPU to operate on. */ VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu) { PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->tm.s.fTSCTiedToExecution) tmCpuTickResume(pVM, pVCpu); } /** * Notification that execution is about to start. * * This call must always be paired with a TMNotifyStartOfExecution call. * * The function may, depending on the configuration, suspend the TSC and future * clocks that only ticks when we're executing guest code. * * @param pVCpu The VMCPU to operate on. */ VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu) { PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->tm.s.fTSCTiedToExecution) tmCpuTickPause(pVM, pVCpu); } /** * Notification that the cpu is entering the halt state * * This call must always be paired with a TMNotifyEndOfExecution call. * * The function may, depending on the configuration, resume the TSC and future * clocks that only ticks when we're halted. * * @param pVCpu The VMCPU to operate on. */ VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu) { PVM pVM = pVCpu->CTX_SUFF(pVM); if ( pVM->tm.s.fTSCTiedToExecution && !pVM->tm.s.fTSCNotTiedToHalt) tmCpuTickResume(pVM, pVCpu); } /** * Notification that the cpu is leaving the halt state * * This call must always be paired with a TMNotifyStartOfHalt call. * * The function may, depending on the configuration, suspend the TSC and future * clocks that only ticks when we're halted. * * @param pVCpu The VMCPU to operate on. */ VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu) { PVM pVM = pVCpu->CTX_SUFF(pVM); if ( pVM->tm.s.fTSCTiedToExecution && !pVM->tm.s.fTSCNotTiedToHalt) tmCpuTickPause(pVM, pVCpu); } /** * Raise the timer force action flag and notify the dedicated timer EMT. * * @param pVM The VM handle. */ DECLINLINE(void) tmScheduleNotify(PVM pVM) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) { Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 REMR3NotifyTimerPending(pVM, pVCpuDst); VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); #endif STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF); } } /** * Schedule the queue which was changed. */ DECLINLINE(void) tmSchedule(PTMTIMER pTimer) { PVM pVM = pTimer->CTX_SUFF(pVM); if ( VM_IS_EMT(pVM) && RT_SUCCESS(tmTimerTryLock(pVM))) { STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); Log3(("tmSchedule: tmTimerQueueSchedule\n")); tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]); #ifdef VBOX_STRICT tmTimerQueuesSanityChecks(pVM, "tmSchedule"); #endif STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); tmTimerUnlock(pVM); } else { TMTIMERSTATE enmState = pTimer->enmState; if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState)) tmScheduleNotify(pVM); } } /** * Try change the state to enmStateNew from enmStateOld * and link the timer into the scheduling queue. * * @returns Success indicator. * @param pTimer Timer in question. * @param enmStateNew The new timer state. * @param enmStateOld The old timer state. */ DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld) { /* * Attempt state change. */ bool fRc; TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc); return fRc; } /** * Links the timer onto the scheduling queue. * * @param pQueue The timer queue the timer belongs to. * @param pTimer The timer. * * @todo FIXME: Look into potential race with the thread running the queues * and stuff. */ DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer) { Assert(!pTimer->offScheduleNext); const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue; int32_t offHead; do { offHead = pQueue->offSchedule; if (offHead) pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer; else pTimer->offScheduleNext = 0; } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead)); } /** * Try change the state to enmStateNew from enmStateOld * and link the timer into the scheduling queue. * * @returns Success indicator. * @param pTimer Timer in question. * @param enmStateNew The new timer state. * @param enmStateOld The old timer state. */ DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld) { if (tmTimerTry(pTimer, enmStateNew, enmStateOld)) { tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer); return true; } return false; } #ifdef VBOX_HIGH_RES_TIMERS_HACK /** * Worker for tmTimerPollInternal that handles misses when the decidate timer * EMT is polling. * * @returns See tmTimerPollInternal. * @param pVM Pointer to the shared VM structure. * @param u64Now Current virtual clock timestamp. * @param u64Delta The delta to the next even in ticks of the * virtual clock. * @param pu64Delta Where to return the delta. * @param pCounter The statistics counter to update. */ DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta) { Assert(!(u64Delta & RT_BIT_64(63))); if (!pVM->tm.s.fVirtualWarpDrive) { *pu64Delta = u64Delta; return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset; } /* * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing. */ uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart; uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage; uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset; u64GipTime -= u64Start; /* the start is GIP time. */ if (u64GipTime >= u64Delta) { ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct); ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct); } else { u64Delta -= u64GipTime; ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct); u64Delta += u64GipTime; } *pu64Delta = u64Delta; u64GipTime += u64Start; return u64GipTime; } /** * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other * than the one dedicated to timer work. * * @returns See tmTimerPollInternal. * @param pVM Pointer to the shared VM structure. * @param u64Now Current virtual clock timestamp. * @param pu64Delta Where to return the delta. */ DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta) { static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */ *pu64Delta = s_u64OtherRet; return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; } /** * Worker for tmTimerPollInternal. * * @returns See tmTimerPollInternal. * @param pVM Pointer to the shared VM structure. * @param pVCpu Pointer to the shared VMCPU structure of the * caller. * @param pVCpuDst Pointer to the shared VMCPU structure of the * dedicated timer EMT. * @param u64Now Current virtual clock timestamp. * @param pu64Delta Where to return the delta. * @param pCounter The statistics counter to update. */ DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now, uint64_t *pu64Delta, PSTAMCOUNTER pCounter) { STAM_COUNTER_INC(pCounter); if (pVCpuDst != pVCpu) return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta); *pu64Delta = 0; return 0; } /** * Common worker for TMTimerPollGIP and TMTimerPoll. * * This function is called before FFs are checked in the inner execution EM loops. * * @returns The GIP timestamp of the next event. * 0 if the next event has already expired. * * @param pVM Pointer to the shared VM structure. * @param pVCpu Pointer to the shared VMCPU structure of the caller. * @param pu64Delta Where to store the delta. * * @thread The emulation thread. * * @remarks GIP uses ns ticks. */ DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta) { PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; const uint64_t u64Now = TMVirtualGetNoCheck(pVM); STAM_COUNTER_INC(&pVM->tm.s.StatPoll); /* * Return straight away if the timer FF is already set ... */ if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); /* * ... or if timers are being run. */ if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues)) { STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning); return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta); } /* * Check for TMCLOCK_VIRTUAL expiration. */ const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire); const int64_t i64Delta1 = u64Expire1 - u64Now; if (i64Delta1 <= 0) { if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) { Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 REMR3NotifyTimerPending(pVM, pVCpuDst); #endif } LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now)); return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual); } /* * Check for TMCLOCK_VIRTUAL_SYNC expiration. * This isn't quite as stright forward if in a catch-up, not only do * we have to adjust the 'now' but when have to adjust the delta as well. */ /* * Optimistic lockless approach. */ uint64_t u64VirtualSyncNow; uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) { if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) { u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))) { u64VirtualSyncNow = u64Now - u64VirtualSyncNow; int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow; if (i64Delta2 > 0) { STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); if (pVCpu == pVCpuDst) return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta); return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta); } if ( !pVM->tm.s.fRunningQueues && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) { Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 REMR3NotifyTimerPending(pVM, pVCpuDst); #endif } STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now)); return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); } } } else { STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); LogFlow(("TMTimerPoll: stopped\n")); return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); } /* * Complicated lockless approach. */ uint64_t off; uint32_t u32Pct = 0; bool fCatchUp; int cOuterTries = 42; for (;; cOuterTries--) { fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp); off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); if (fCatchUp) { /* No changes allowed, try get a consistent set of parameters. */ uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev); uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp); u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage); if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev) && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp) && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire) && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) || cOuterTries <= 0) { uint64_t u64Delta = u64Now - u64Prev; if (RT_LIKELY(!(u64Delta >> 32))) { uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100); if (off > u64Sub + offGivenUp) off -= u64Sub; else /* we've completely caught up. */ off = offGivenUp; } else /* More than 4 seconds since last time (or negative), ignore it. */ Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta)); /* Check that we're still running and in catch up. */ if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) break; } } else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire) && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) break; /* Got an consistent offset */ /* Repeat the initial checks before iterating. */ if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues)) { STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning); return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta); } if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) { LogFlow(("TMTimerPoll: stopped\n")); return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); } if (cOuterTries <= 0) break; /* that's enough */ } if (cOuterTries <= 0) STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop); u64VirtualSyncNow = u64Now - off; /* Calc delta and see if we've got a virtual sync hit. */ int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow; if (i64Delta2 <= 0) { if ( !pVM->tm.s.fRunningQueues && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) { Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); #ifdef IN_RING3 REMR3NotifyTimerPending(pVM, pVCpuDst); #endif } STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now)); return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); } /* * Return the time left to the next event. */ STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); if (pVCpu == pVCpuDst) { if (fCatchUp) i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100); return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta); } return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta); } /** * Set FF if we've passed the next virtual event. * * This function is called before FFs are checked in the inner execution EM loops. * * @returns true if timers are pending, false if not. * * @param pVM Pointer to the shared VM structure. * @param pVCpu Pointer to the shared VMCPU structure of the caller. * @thread The emulation thread. */ VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu) { AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); uint64_t off = 0; tmTimerPollInternal(pVM, pVCpu, &off); return off == 0; } /** * Set FF if we've passed the next virtual event. * * This function is called before FFs are checked in the inner execution EM loops. * * @param pVM Pointer to the shared VM structure. * @param pVCpu Pointer to the shared VMCPU structure of the caller. * @thread The emulation thread. */ VMMDECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu) { uint64_t off; tmTimerPollInternal(pVM, pVCpu, &off); } /** * Set FF if we've passed the next virtual event. * * This function is called before FFs are checked in the inner execution EM loops. * * @returns The GIP timestamp of the next event. * 0 if the next event has already expired. * @param pVM Pointer to the shared VM structure. * @param pVCpu Pointer to the shared VMCPU structure of the caller. * @param pu64Delta Where to store the delta. * @thread The emulation thread. */ VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta) { return tmTimerPollInternal(pVM, pVCpu, pu64Delta); } #endif /* VBOX_HIGH_RES_TIMERS_HACK */ /** * Gets the host context ring-3 pointer of the timer. * * @returns HC R3 pointer. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer) { return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer); } /** * Gets the host context ring-0 pointer of the timer. * * @returns HC R0 pointer. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer) { return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer); } /** * Gets the RC pointer of the timer. * * @returns RC pointer. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer) { return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer); } /** * Links a timer into the active list of a timer queue. * * The caller must have taken the TM semaphore before calling this function. * * @param pQueue The queue. * @param pTimer The timer. * @param u64Expire The timer expiration time. */ DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire) { PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); if (pCur) { for (;; pCur = TMTIMER_GET_NEXT(pCur)) { if (pCur->u64Expire > u64Expire) { const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur); TMTIMER_SET_NEXT(pTimer, pCur); TMTIMER_SET_PREV(pTimer, pPrev); if (pPrev) TMTIMER_SET_NEXT(pPrev, pTimer); else { TMTIMER_SET_HEAD(pQueue, pTimer); pQueue->u64Expire = u64Expire; } TMTIMER_SET_PREV(pCur, pTimer); return; } if (!pCur->offNext) { TMTIMER_SET_NEXT(pCur, pTimer); TMTIMER_SET_PREV(pTimer, pCur); return; } } } else { TMTIMER_SET_HEAD(pQueue, pTimer); pQueue->u64Expire = u64Expire; } } /** * Optimized TMTimerSet code path for starting an inactive timer. * * @returns VBox status code. * * @param pVM The VM handle. * @param pTimer The timer handle. * @param u64Expire The new expire time. */ static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire) { Assert(!pTimer->offPrev); Assert(!pTimer->offNext); Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE); /* * Calculate and set the expiration time. */ pTimer->u64Expire = u64Expire; Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire)); /* * Link the timer into the active list. */ TMCLOCK const enmClock = pTimer->enmClock; tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire); STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt); tmTimerUnlock(pVM); return VINF_SUCCESS; } /** * Arm a timer with a (new) expire time. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. * @param u64Expire New expire time. */ VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire) { PVM pVM = pTimer->CTX_SUFF(pVM); STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); TMTIMER_ASSERT_CRITSECT(pTimer); #ifdef VBOX_WITH_STATISTICS /* Gather optimization info. */ STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet); TMTIMERSTATE enmOrgState = pTimer->enmState; switch (enmOrgState) { case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break; case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break; case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break; case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break; case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break; case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break; case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break; default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break; } #endif /* * The most common case is setting the timer again during the callback. * The second most common case is starting a timer at some other time. */ #if 1 TMTIMERSTATE enmState = pTimer->enmState; if ( enmState == TMTIMERSTATE_EXPIRED_DELIVER || ( enmState == TMTIMERSTATE_STOPPED && pTimer->pCritSect)) { /* Try take the TM lock and check the state again. */ if (RT_SUCCESS_NP(tmTimerTryLock(pVM))) { if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState))) { tmTimerSetOptimizedStart(pVM, pTimer, u64Expire); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); return VINF_SUCCESS; } tmTimerUnlock(pVM); } } #endif /* * Unoptimized code path. */ int cRetries = 1000; do { /* * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE. */ TMTIMERSTATE enmState = pTimer->enmState; Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire)); switch (enmState) { case TMTIMERSTATE_EXPIRED_DELIVER: case TMTIMERSTATE_STOPPED: if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) { Assert(!pTimer->offPrev); Assert(!pTimer->offNext); AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC || pVM->tm.s.fVirtualSyncTicking || u64Expire >= pVM->tm.s.u64VirtualSync, ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc))); pTimer->u64Expire = u64Expire; TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); tmSchedule(pTimer); STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_PENDING_SCHEDULE: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = u64Expire; TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); tmSchedule(pTimer); STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_ACTIVE: if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = u64Expire; TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE); tmSchedule(pTimer); STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_STOP: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = u64Expire; TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE); tmSchedule(pTimer); STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: #ifdef IN_RING3 if (!RTThreadYield()) RTThreadSleep(1); #else /** @todo call host context and yield after a couple of iterations */ #endif break; /* * Invalid states. */ case TMTIMERSTATE_DESTROY: case TMTIMERSTATE_FREE: AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return VERR_TM_INVALID_STATE; default: AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return VERR_TM_UNKNOWN_STATE; } } while (cRetries-- > 0); AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc))); STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); return VERR_INTERNAL_ERROR; } /** * Return the current time for the specified clock, setting pu64Now if not NULL. * * @returns Current time. * @param pVM The VM handle. * @param enmClock The clock to query. * @param pu64Now Optional pointer where to store the return time */ DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now) { uint64_t u64Now; switch (enmClock) { case TMCLOCK_VIRTUAL_SYNC: u64Now = TMVirtualSyncGet(pVM); break; case TMCLOCK_VIRTUAL: u64Now = TMVirtualGet(pVM); break; case TMCLOCK_REAL: u64Now = TMRealGet(pVM); break; default: AssertFatalMsgFailed(("%d\n", enmClock)); } if (pu64Now) *pu64Now = u64Now; return u64Now; } /** * Optimized TMTimerSetRelative code path. * * @returns VBox status code. * * @param pVM The VM handle. * @param pTimer The timer handle. * @param cTicksToNext Clock ticks until the next time expiration. * @param pu64Now Where to return the current time stamp used. * Optional. */ static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now) { Assert(!pTimer->offPrev); Assert(!pTimer->offNext); Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE); /* * Calculate and set the expiration time. */ TMCLOCK const enmClock = pTimer->enmClock; uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); pTimer->u64Expire = u64Expire; Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext)); /* * Link the timer into the active list. */ tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire); STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt); tmTimerUnlock(pVM); return VINF_SUCCESS; } /** * Arm a timer with a expire time relative to the current time. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. * @param cTicksToNext Clock ticks until the next time expiration. * @param pu64Now Where to return the current time stamp used. * Optional. */ VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now) { STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); TMTIMER_ASSERT_CRITSECT(pTimer); PVM pVM = pTimer->CTX_SUFF(pVM); int rc; #ifdef VBOX_WITH_STATISTICS /* Gather optimization info. */ STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative); TMTIMERSTATE enmOrgState = pTimer->enmState; switch (enmOrgState) { case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break; case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break; case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break; case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break; case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break; case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break; case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break; default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break; } #endif /* * Try to take the TM lock and optimize the common cases. * * With the TM lock we can safely make optimizations like immediate * scheduling and we can also be 100% sure that we're not racing the * running of the timer queues. As an additional restraint we require the * timer to have a critical section associated with to be 100% there aren't * concurrent operations on the timer. (This latter isn't necessary any * longer as this isn't supported for any timers, critsect or not.) * * Note! Lock ordering doesn't apply when we only tries to * get the innermost locks. */ bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM)); #if 1 if ( fOwnTMLock && pTimer->pCritSect) { TMTIMERSTATE enmState = pTimer->enmState; if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER || enmState == TMTIMERSTATE_STOPPED) && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState))) { tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); return VINF_SUCCESS; } /* Optimize other states when it becomes necessary. */ } #endif /* * Unoptimized path. */ TMCLOCK const enmClock = pTimer->enmClock; bool fOwnVirtSyncLock; fOwnVirtSyncLock = !fOwnTMLock && enmClock == TMCLOCK_VIRTUAL_SYNC && RT_SUCCESS(tmVirtualSyncTryLock(pVM)); for (int cRetries = 1000; ; cRetries--) { /* * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE. */ TMTIMERSTATE enmState = pTimer->enmState; switch (enmState) { case TMTIMERSTATE_STOPPED: if (enmClock == TMCLOCK_VIRTUAL_SYNC) { /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync: * Figure a safe way of activating this timer while the queue is * being run. * (99.9% sure this that the assertion is caused by DevAPIC.cpp * re-starting the timer in respons to a initial_count write.) */ } /* fall thru */ case TMTIMERSTATE_EXPIRED_DELIVER: if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) { Assert(!pTimer->offPrev); Assert(!pTimer->offNext); pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries)); TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); tmSchedule(pTimer); rc = VINF_SUCCESS; break; } rc = VERR_TRY_AGAIN; break; case TMTIMERSTATE_PENDING_SCHEDULE: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries)); TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); tmSchedule(pTimer); rc = VINF_SUCCESS; break; } rc = VERR_TRY_AGAIN; break; case TMTIMERSTATE_ACTIVE: if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries)); TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE); tmSchedule(pTimer); rc = VINF_SUCCESS; break; } rc = VERR_TRY_AGAIN; break; case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_STOP: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState)) { pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries)); TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE); tmSchedule(pTimer); rc = VINF_SUCCESS; break; } rc = VERR_TRY_AGAIN; break; case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: #ifdef IN_RING3 if (!RTThreadYield()) RTThreadSleep(1); #else /** @todo call host context and yield after a couple of iterations */ #endif rc = VERR_TRY_AGAIN; break; /* * Invalid states. */ case TMTIMERSTATE_DESTROY: case TMTIMERSTATE_FREE: AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); rc = VERR_TM_INVALID_STATE; break; default: AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); rc = VERR_TM_UNKNOWN_STATE; break; } /* switch + loop is tedious to break out of. */ if (rc == VINF_SUCCESS) break; if (rc != VERR_TRY_AGAIN) { tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); break; } if (cRetries <= 0) { AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc))); rc = VERR_INTERNAL_ERROR; tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); break; } /* * Retry to gain locks. */ if (!fOwnTMLock) { fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM)); if ( !fOwnTMLock && enmClock == TMCLOCK_VIRTUAL_SYNC && !fOwnVirtSyncLock) fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM)); } } /* for (;;) */ /* * Clean up and return. */ if (fOwnVirtSyncLock) tmVirtualSyncUnlock(pVM); if (fOwnTMLock) tmTimerUnlock(pVM); if ( !fOwnTMLock && !fOwnVirtSyncLock && enmClock == TMCLOCK_VIRTUAL_SYNC) STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); return rc; } /** * Arm a timer with a (new) expire time relative to current time. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. * @param cMilliesToNext Number of millieseconds to the next tick. */ VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext) { PVM pVM = pTimer->CTX_SUFF(pVM); PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */ switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL); case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL); case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return TMTimerSetRelative(pTimer, cMilliesToNext, NULL); default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return VERR_INTERNAL_ERROR; } } /** * Arm a timer with a (new) expire time relative to current time. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. * @param cMicrosToNext Number of microseconds to the next tick. */ VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext) { PVM pVM = pTimer->CTX_SUFF(pVM); PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */ switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL); case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL); case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL); default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return VERR_INTERNAL_ERROR; } } /** * Arm a timer with a (new) expire time relative to current time. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. * @param cNanosToNext Number of nanoseconds to the next tick. */ VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext) { PVM pVM = pTimer->CTX_SUFF(pVM); PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */ switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cNanosToNext, NULL); case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return TMTimerSetRelative(pTimer, cNanosToNext, NULL); case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL); default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return VERR_INTERNAL_ERROR; } } /** * Stop the timer. * Use TMR3TimerArm() to "un-stop" the timer. * * @returns VBox status. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(int) TMTimerStop(PTMTIMER pTimer) { STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); TMTIMER_ASSERT_CRITSECT(pTimer); /** @todo see if this function needs optimizing. */ int cRetries = 1000; do { /* * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE. */ TMTIMERSTATE enmState = pTimer->enmState; Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n", pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries)); switch (enmState) { case TMTIMERSTATE_EXPIRED_DELIVER: //AssertMsgFailed(("You don't stop an expired timer dude!\n")); return VERR_INVALID_PARAMETER; case TMTIMERSTATE_STOPPED: case TMTIMERSTATE_PENDING_STOP: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); return VINF_SUCCESS; case TMTIMERSTATE_PENDING_SCHEDULE: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState)) { tmSchedule(pTimer); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); return VINF_SUCCESS; } case TMTIMERSTATE_PENDING_RESCHEDULE: if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState)) { tmSchedule(pTimer); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_ACTIVE: if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState)) { tmSchedule(pTimer); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); return VINF_SUCCESS; } break; case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: #ifdef IN_RING3 if (!RTThreadYield()) RTThreadSleep(1); #else /**@todo call host and yield cpu after a while. */ #endif break; /* * Invalid states. */ case TMTIMERSTATE_DESTROY: case TMTIMERSTATE_FREE: AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return VERR_TM_INVALID_STATE; default: AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return VERR_TM_UNKNOWN_STATE; } } while (cRetries-- > 0); AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc))); STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a); return VERR_INTERNAL_ERROR; } /** * Get the current clock time. * Handy for calculating the new expire time. * * @returns Current clock time. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer) { uint64_t u64; PVM pVM = pTimer->CTX_SUFF(pVM); switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: u64 = TMVirtualGet(pVM); break; case TMCLOCK_VIRTUAL_SYNC: u64 = TMVirtualSyncGet(pVM); break; case TMCLOCK_REAL: u64 = TMRealGet(pVM); break; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return ~(uint64_t)0; } //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return u64; } /** * Get the freqency of the timer clock. * * @returns Clock frequency (as Hz of course). * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: return TMCLOCK_FREQ_VIRTUAL; case TMCLOCK_REAL: return TMCLOCK_FREQ_REAL; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Get the current clock time as nanoseconds. * * @returns The timer clock as nanoseconds. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer) { return TMTimerToNano(pTimer, TMTimerGet(pTimer)); } /** * Get the current clock time as microseconds. * * @returns The timer clock as microseconds. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer) { return TMTimerToMicro(pTimer, TMTimerGet(pTimer)); } /** * Get the current clock time as milliseconds. * * @returns The timer clock as milliseconds. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer) { return TMTimerToMilli(pTimer, TMTimerGet(pTimer)); } /** * Converts the specified timer clock time to nanoseconds. * * @returns nanoseconds. * @param pTimer Timer handle as returned by one of the create functions. * @param u64Ticks The clock ticks. * @remark There could be rounding errors here. We just do a simple integere divide * without any adjustments. */ VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64Ticks; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64Ticks * 1000000; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Converts the specified timer clock time to microseconds. * * @returns microseconds. * @param pTimer Timer handle as returned by one of the create functions. * @param u64Ticks The clock ticks. * @remark There could be rounding errors here. We just do a simple integere divide * without any adjustments. */ VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64Ticks / 1000; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64Ticks * 1000; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Converts the specified timer clock time to milliseconds. * * @returns milliseconds. * @param pTimer Timer handle as returned by one of the create functions. * @param u64Ticks The clock ticks. * @remark There could be rounding errors here. We just do a simple integere divide * without any adjustments. */ VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64Ticks / 1000000; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64Ticks; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Converts the specified nanosecond timestamp to timer clock ticks. * * @returns timer clock ticks. * @param pTimer Timer handle as returned by one of the create functions. * @param u64NanoTS The nanosecond value ticks to convert. * @remark There could be rounding and overflow errors here. */ VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64NanoTS; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64NanoTS / 1000000; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Converts the specified microsecond timestamp to timer clock ticks. * * @returns timer clock ticks. * @param pTimer Timer handle as returned by one of the create functions. * @param u64MicroTS The microsecond value ticks to convert. * @remark There could be rounding and overflow errors here. */ VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64MicroTS * 1000; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64MicroTS / 1000; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Converts the specified millisecond timestamp to timer clock ticks. * * @returns timer clock ticks. * @param pTimer Timer handle as returned by one of the create functions. * @param u64MilliTS The millisecond value ticks to convert. * @remark There could be rounding and overflow errors here. */ VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS) { switch (pTimer->enmClock) { case TMCLOCK_VIRTUAL: case TMCLOCK_VIRTUAL_SYNC: AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); return u64MilliTS * 1000000; case TMCLOCK_REAL: AssertCompile(TMCLOCK_FREQ_REAL == 1000); return u64MilliTS; default: AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock)); return 0; } } /** * Get the expire time of the timer. * Only valid for active timers. * * @returns Expire time of the timer. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer) { TMTIMER_ASSERT_CRITSECT(pTimer); int cRetries = 1000; do { TMTIMERSTATE enmState = pTimer->enmState; switch (enmState) { case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_EXPIRED_DELIVER: case TMTIMERSTATE_STOPPED: case TMTIMERSTATE_PENDING_STOP: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return ~(uint64_t)0; case TMTIMERSTATE_ACTIVE: case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_SCHEDULE: Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return pTimer->u64Expire; case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: #ifdef IN_RING3 if (!RTThreadYield()) RTThreadSleep(1); #endif break; /* * Invalid states. */ case TMTIMERSTATE_DESTROY: case TMTIMERSTATE_FREE: AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return ~(uint64_t)0; default: AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return ~(uint64_t)0; } } while (cRetries-- > 0); AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc))); Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return ~(uint64_t)0; } /** * Checks if a timer is active or not. * * @returns True if active. * @returns False if not active. * @param pTimer Timer handle as returned by one of the create functions. */ VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer) { TMTIMERSTATE enmState = pTimer->enmState; switch (enmState) { case TMTIMERSTATE_STOPPED: case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_EXPIRED_DELIVER: case TMTIMERSTATE_PENDING_STOP: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return false; case TMTIMERSTATE_ACTIVE: case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_SCHEDULE: case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return true; /* * Invalid states. */ case TMTIMERSTATE_DESTROY: case TMTIMERSTATE_FREE: AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc))); Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); return false; default: AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc))); return false; } } /** * Convert state to string. * * @returns Readonly status name. * @param enmState State. */ const char *tmTimerState(TMTIMERSTATE enmState) { switch (enmState) { #define CASE(num, state) \ case TMTIMERSTATE_##state: \ AssertCompile(TMTIMERSTATE_##state == (num)); \ return #num "-" #state CASE( 1,STOPPED); CASE( 2,ACTIVE); CASE( 3,EXPIRED_GET_UNLINK); CASE( 4,EXPIRED_DELIVER); CASE( 5,PENDING_STOP); CASE( 6,PENDING_STOP_SCHEDULE); CASE( 7,PENDING_SCHEDULE_SET_EXPIRE); CASE( 8,PENDING_SCHEDULE); CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE); CASE(10,PENDING_RESCHEDULE); CASE(11,DESTROY); CASE(12,FREE); default: AssertMsgFailed(("Invalid state enmState=%d\n", enmState)); return "Invalid state!"; #undef CASE } } /** * Schedules the given timer on the given queue. * * @param pQueue The timer queue. * @param pTimer The timer that needs scheduling. * * @remarks Called while owning the lock. */ DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer) { /* * Processing. */ unsigned cRetries = 2; do { TMTIMERSTATE enmState = pTimer->enmState; switch (enmState) { /* * Reschedule timer (in the active list). */ case TMTIMERSTATE_PENDING_RESCHEDULE: { if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE))) break; /* retry */ const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer); const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer); if (pPrev) TMTIMER_SET_NEXT(pPrev, pNext); else { TMTIMER_SET_HEAD(pQueue, pNext); pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; } if (pNext) TMTIMER_SET_PREV(pNext, pPrev); pTimer->offNext = 0; pTimer->offPrev = 0; /* fall thru */ } /* * Schedule timer (insert into the active list). */ case TMTIMERSTATE_PENDING_SCHEDULE: { Assert(!pTimer->offNext); Assert(!pTimer->offPrev); if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE))) break; /* retry */ PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); if (pCur) { const uint64_t u64Expire = pTimer->u64Expire; for (;; pCur = TMTIMER_GET_NEXT(pCur)) { if (pCur->u64Expire > u64Expire) { const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur); TMTIMER_SET_NEXT(pTimer, pCur); TMTIMER_SET_PREV(pTimer, pPrev); if (pPrev) TMTIMER_SET_NEXT(pPrev, pTimer); else { TMTIMER_SET_HEAD(pQueue, pTimer); pQueue->u64Expire = u64Expire; } TMTIMER_SET_PREV(pCur, pTimer); return; } if (!pCur->offNext) { TMTIMER_SET_NEXT(pCur, pTimer); TMTIMER_SET_PREV(pTimer, pCur); return; } } } else { TMTIMER_SET_HEAD(pQueue, pTimer); pQueue->u64Expire = pTimer->u64Expire; } return; } /* * Stop the timer in active list. */ case TMTIMERSTATE_PENDING_STOP: { if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP))) break; /* retry */ const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer); const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer); if (pPrev) TMTIMER_SET_NEXT(pPrev, pNext); else { TMTIMER_SET_HEAD(pQueue, pNext); pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX; } if (pNext) TMTIMER_SET_PREV(pNext, pPrev); pTimer->offNext = 0; pTimer->offPrev = 0; /* fall thru */ } /* * Stop the timer (not on the active list). */ case TMTIMERSTATE_PENDING_STOP_SCHEDULE: Assert(!pTimer->offNext); Assert(!pTimer->offPrev); if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE))) break; return; /* * The timer is pending destruction by TMR3TimerDestroy, our caller. * Nothing to do here. */ case TMTIMERSTATE_DESTROY: break; /* * Postpone these until they get into the right state. */ case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: tmTimerLink(pQueue, pTimer); STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed)); return; /* * None of these can be in the schedule. */ case TMTIMERSTATE_FREE: case TMTIMERSTATE_STOPPED: case TMTIMERSTATE_ACTIVE: case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_EXPIRED_DELIVER: default: AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!", pTimer, tmTimerState(pTimer->enmState), pTimer->enmState)); return; } } while (cRetries-- > 0); } /** * Schedules the specified timer queue. * * @param pVM The VM to run the timers for. * @param pQueue The queue to schedule. * * @remarks Called while owning the lock. */ void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue) { TM_ASSERT_LOCK(pVM); /* * Dequeue the scheduling list and iterate it. */ int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0); Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire)); if (!offNext) return; PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext); while (pNext) { /* * Unlink the head timer and find the next one. */ PTMTIMER pTimer = pNext; pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL; pTimer->offScheduleNext = 0; /* * Do the scheduling. */ Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n", pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc))); tmTimerQueueScheduleOne(pQueue, pTimer); Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState))); } /* foreach timer in current schedule batch. */ Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire)); } #ifdef VBOX_STRICT /** * Checks that the timer queues are sane. * * @param pVM VM handle. * * @remarks Called while owning the lock. */ void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere) { TM_ASSERT_LOCK(pVM); /* * Check the linking of the active lists. */ for (int i = 0; i < TMCLOCK_MAX; i++) { PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i]; Assert((int)pQueue->enmClock == i); PTMTIMER pPrev = NULL; for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur)) { AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i)); AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev)); TMTIMERSTATE enmState = pCur->enmState; switch (enmState) { case TMTIMERSTATE_ACTIVE: AssertMsg( !pCur->offScheduleNext || pCur->enmState != TMTIMERSTATE_ACTIVE, ("%s: %RI32\n", pszWhere, pCur->offScheduleNext)); break; case TMTIMERSTATE_PENDING_STOP: case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: break; default: AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState))); break; } } } # ifdef IN_RING3 /* * Do the big list and check that active timers all are in the active lists. */ PTMTIMERR3 pPrev = NULL; for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext) { Assert(pCur->pBigPrev == pPrev); Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX); TMTIMERSTATE enmState = pCur->enmState; switch (enmState) { case TMTIMERSTATE_ACTIVE: case TMTIMERSTATE_PENDING_STOP: case TMTIMERSTATE_PENDING_RESCHEDULE: case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: { PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]); Assert(pCur->offPrev || pCur == pCurAct); while (pCurAct && pCurAct != pCur) pCurAct = TMTIMER_GET_NEXT(pCurAct); Assert(pCurAct == pCur); break; } case TMTIMERSTATE_PENDING_SCHEDULE: case TMTIMERSTATE_PENDING_STOP_SCHEDULE: case TMTIMERSTATE_STOPPED: case TMTIMERSTATE_EXPIRED_DELIVER: { Assert(!pCur->offNext); Assert(!pCur->offPrev); for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]); pCurAct; pCurAct = TMTIMER_GET_NEXT(pCurAct)) { Assert(pCurAct != pCur); Assert(TMTIMER_GET_NEXT(pCurAct) != pCur); Assert(TMTIMER_GET_PREV(pCurAct) != pCur); } break; } /* ignore */ case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: break; /* shouldn't get here! */ case TMTIMERSTATE_EXPIRED_GET_UNLINK: case TMTIMERSTATE_DESTROY: default: AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState))); break; } } # endif /* IN_RING3 */ } #endif /* !VBOX_STRICT */ /** * Gets the current warp drive percent. * * @returns The warp drive percent. * @param pVM The VM handle. */ VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM) { return pVM->tm.s.u32VirtualWarpDrivePercentage; }