VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 73.9 KB
Line 
1/* $Id: TMAll.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#include <VBox/mm.h>
25#ifdef IN_RING3
26# include <VBox/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#ifdef IN_RING3
39# include <iprt/thread.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** @def TMTIMER_ASSERT_CRITSECT
47 * Checks that the caller owns the critical section if one is associated with
48 * the timer. */
49#ifdef VBOX_STRICT
50# define TMTIMER_ASSERT_CRITSECT(pTimer) \
51 do { \
52 if ((pTimer)->pCritSect) \
53 { \
54 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
55 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
56 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
57 } \
58 } while (0)
59#else
60# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
61#endif
62
63
64#ifndef tmTimerLock
65
66/**
67 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
68 *
69 * @retval VINF_SUCCESS on success (always in ring-3).
70 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
71 *
72 * @param pVM The VM handle.
73 *
74 * @thread EMTs for the time being.
75 */
76int tmTimerLock(PVM pVM)
77{
78 VM_ASSERT_EMT(pVM);
79 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
80 return rc;
81}
82
83
84/**
85 * Try take the timer lock, no waiting.
86 *
87 * @retval VINF_SUCCESS on success.
88 * @retval VERR_SEM_BUSY if busy.
89 *
90 * @param pVM The VM handle.
91 */
92int tmTimerTryLock(PVM pVM)
93{
94 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
95 return rc;
96}
97
98
99/**
100 * Release the EMT/TM lock.
101 *
102 * @param pVM The VM handle.
103 */
104void tmTimerUnlock(PVM pVM)
105{
106 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
107}
108
109
110/**
111 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
112 *
113 * @retval VINF_SUCCESS on success (always in ring-3).
114 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
115 *
116 * @param pVM The VM handle.
117 */
118int tmVirtualSyncLock(PVM pVM)
119{
120 VM_ASSERT_EMT(pVM);
121 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
122 return rc;
123}
124
125
126/**
127 * Try take the VirtualSync lock, no waiting.
128 *
129 * @retval VINF_SUCCESS on success.
130 * @retval VERR_SEM_BUSY if busy.
131 *
132 * @param pVM The VM handle.
133 */
134int tmVirtualSyncTryLock(PVM pVM)
135{
136 VM_ASSERT_EMT(pVM);
137 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
138 return rc;
139}
140
141
142/**
143 * Release the VirtualSync lock.
144 *
145 * @param pVM The VM handle.
146 */
147void tmVirtualSyncUnlock(PVM pVM)
148{
149 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
150}
151
152#endif /* ! macros */
153
154/**
155 * Notification that execution is about to start.
156 *
157 * This call must always be paired with a TMNotifyEndOfExecution call.
158 *
159 * The function may, depending on the configuration, resume the TSC and future
160 * clocks that only ticks when we're executing guest code.
161 *
162 * @param pVCpu The VMCPU to operate on.
163 */
164VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
165{
166 PVM pVM = pVCpu->CTX_SUFF(pVM);
167
168 if (pVM->tm.s.fTSCTiedToExecution)
169 tmCpuTickResume(pVM, pVCpu);
170}
171
172
173/**
174 * Notification that execution is about to start.
175 *
176 * This call must always be paired with a TMNotifyStartOfExecution call.
177 *
178 * The function may, depending on the configuration, suspend the TSC and future
179 * clocks that only ticks when we're executing guest code.
180 *
181 * @param pVCpu The VMCPU to operate on.
182 */
183VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
184{
185 PVM pVM = pVCpu->CTX_SUFF(pVM);
186
187 if (pVM->tm.s.fTSCTiedToExecution)
188 tmCpuTickPause(pVM, pVCpu);
189}
190
191
192/**
193 * Notification that the cpu is entering the halt state
194 *
195 * This call must always be paired with a TMNotifyEndOfExecution call.
196 *
197 * The function may, depending on the configuration, resume the TSC and future
198 * clocks that only ticks when we're halted.
199 *
200 * @param pVCpu The VMCPU to operate on.
201 */
202VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
203{
204 PVM pVM = pVCpu->CTX_SUFF(pVM);
205
206 if ( pVM->tm.s.fTSCTiedToExecution
207 && !pVM->tm.s.fTSCNotTiedToHalt)
208 tmCpuTickResume(pVM, pVCpu);
209}
210
211
212/**
213 * Notification that the cpu is leaving the halt state
214 *
215 * This call must always be paired with a TMNotifyStartOfHalt call.
216 *
217 * The function may, depending on the configuration, suspend the TSC and future
218 * clocks that only ticks when we're halted.
219 *
220 * @param pVCpu The VMCPU to operate on.
221 */
222VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
223{
224 PVM pVM = pVCpu->CTX_SUFF(pVM);
225
226 if ( pVM->tm.s.fTSCTiedToExecution
227 && !pVM->tm.s.fTSCNotTiedToHalt)
228 tmCpuTickPause(pVM, pVCpu);
229}
230
231
232/**
233 * Raise the timer force action flag and notify the dedicated timer EMT.
234 *
235 * @param pVM The VM handle.
236 */
237DECLINLINE(void) tmScheduleNotify(PVM pVM)
238{
239 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
240 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
241 {
242 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
243 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
244#ifdef IN_RING3
245 REMR3NotifyTimerPending(pVM, pVCpuDst);
246 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
247#endif
248 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
249 }
250}
251
252
253/**
254 * Schedule the queue which was changed.
255 */
256DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
257{
258 PVM pVM = pTimer->CTX_SUFF(pVM);
259 if ( VM_IS_EMT(pVM)
260 && RT_SUCCESS(tmTimerTryLock(pVM)))
261 {
262 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
263 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
264 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
265#ifdef VBOX_STRICT
266 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
267#endif
268 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
269 tmTimerUnlock(pVM);
270 }
271 else
272 {
273 TMTIMERSTATE enmState = pTimer->enmState;
274 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
275 tmScheduleNotify(pVM);
276 }
277}
278
279
280/**
281 * Try change the state to enmStateNew from enmStateOld
282 * and link the timer into the scheduling queue.
283 *
284 * @returns Success indicator.
285 * @param pTimer Timer in question.
286 * @param enmStateNew The new timer state.
287 * @param enmStateOld The old timer state.
288 */
289DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
290{
291 /*
292 * Attempt state change.
293 */
294 bool fRc;
295 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
296 return fRc;
297}
298
299
300/**
301 * Links the timer onto the scheduling queue.
302 *
303 * @param pQueue The timer queue the timer belongs to.
304 * @param pTimer The timer.
305 *
306 * @todo FIXME: Look into potential race with the thread running the queues
307 * and stuff.
308 */
309DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
310{
311 Assert(!pTimer->offScheduleNext);
312 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
313 int32_t offHead;
314 do
315 {
316 offHead = pQueue->offSchedule;
317 if (offHead)
318 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
319 else
320 pTimer->offScheduleNext = 0;
321 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
322}
323
324
325/**
326 * Try change the state to enmStateNew from enmStateOld
327 * and link the timer into the scheduling queue.
328 *
329 * @returns Success indicator.
330 * @param pTimer Timer in question.
331 * @param enmStateNew The new timer state.
332 * @param enmStateOld The old timer state.
333 */
334DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
335{
336 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
337 {
338 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
339 return true;
340 }
341 return false;
342}
343
344
345#ifdef VBOX_HIGH_RES_TIMERS_HACK
346
347/**
348 * Worker for tmTimerPollInternal that handles misses when the decidate timer
349 * EMT is polling.
350 *
351 * @returns See tmTimerPollInternal.
352 * @param pVM Pointer to the shared VM structure.
353 * @param u64Now Current virtual clock timestamp.
354 * @param u64Delta The delta to the next even in ticks of the
355 * virtual clock.
356 * @param pu64Delta Where to return the delta.
357 * @param pCounter The statistics counter to update.
358 */
359DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
360{
361 Assert(!(u64Delta & RT_BIT_64(63)));
362
363 if (!pVM->tm.s.fVirtualWarpDrive)
364 {
365 *pu64Delta = u64Delta;
366 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
367 }
368
369 /*
370 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
371 */
372 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
373 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
374
375 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
376 u64GipTime -= u64Start; /* the start is GIP time. */
377 if (u64GipTime >= u64Delta)
378 {
379 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
380 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
381 }
382 else
383 {
384 u64Delta -= u64GipTime;
385 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
386 u64Delta += u64GipTime;
387 }
388 *pu64Delta = u64Delta;
389 u64GipTime += u64Start;
390 return u64GipTime;
391}
392
393
394/**
395 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
396 * than the one dedicated to timer work.
397 *
398 * @returns See tmTimerPollInternal.
399 * @param pVM Pointer to the shared VM structure.
400 * @param u64Now Current virtual clock timestamp.
401 * @param pu64Delta Where to return the delta.
402 */
403DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
404{
405 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
406 *pu64Delta = s_u64OtherRet;
407 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
408}
409
410
411/**
412 * Worker for tmTimerPollInternal.
413 *
414 * @returns See tmTimerPollInternal.
415 * @param pVM Pointer to the shared VM structure.
416 * @param pVCpu Pointer to the shared VMCPU structure of the
417 * caller.
418 * @param pVCpuDst Pointer to the shared VMCPU structure of the
419 * dedicated timer EMT.
420 * @param u64Now Current virtual clock timestamp.
421 * @param pu64Delta Where to return the delta.
422 * @param pCounter The statistics counter to update.
423 */
424DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
425 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
426{
427 STAM_COUNTER_INC(pCounter);
428 if (pVCpuDst != pVCpu)
429 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
430 *pu64Delta = 0;
431 return 0;
432}
433
434/**
435 * Common worker for TMTimerPollGIP and TMTimerPoll.
436 *
437 * This function is called before FFs are checked in the inner execution EM loops.
438 *
439 * @returns The GIP timestamp of the next event.
440 * 0 if the next event has already expired.
441 *
442 * @param pVM Pointer to the shared VM structure.
443 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
444 * @param pu64Delta Where to store the delta.
445 *
446 * @thread The emulation thread.
447 *
448 * @remarks GIP uses ns ticks.
449 */
450DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
451{
452 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
453 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
454 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
455
456 /*
457 * Return straight away if the timer FF is already set ...
458 */
459 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
460 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
461
462 /*
463 * ... or if timers are being run.
464 */
465 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
466 {
467 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
468 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
469 }
470
471 /*
472 * Check for TMCLOCK_VIRTUAL expiration.
473 */
474 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
475 const int64_t i64Delta1 = u64Expire1 - u64Now;
476 if (i64Delta1 <= 0)
477 {
478 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
479 {
480 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
481 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
482#ifdef IN_RING3
483 REMR3NotifyTimerPending(pVM, pVCpuDst);
484#endif
485 }
486 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
487 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
488 }
489
490 /*
491 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
492 * This isn't quite as stright forward if in a catch-up, not only do
493 * we have to adjust the 'now' but when have to adjust the delta as well.
494 */
495
496 /*
497 * Optimistic lockless approach.
498 */
499 uint64_t u64VirtualSyncNow;
500 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
501 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
502 {
503 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
504 {
505 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
506 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
507 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
508 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
509 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
510 {
511 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
512 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
513 if (i64Delta2 > 0)
514 {
515 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
516 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
517
518 if (pVCpu == pVCpuDst)
519 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
520 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
521 }
522
523 if ( !pVM->tm.s.fRunningQueues
524 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
525 {
526 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
527 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
528#ifdef IN_RING3
529 REMR3NotifyTimerPending(pVM, pVCpuDst);
530#endif
531 }
532
533 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
534 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
535 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
536 }
537 }
538 }
539 else
540 {
541 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
542 LogFlow(("TMTimerPoll: stopped\n"));
543 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
544 }
545
546 /*
547 * Complicated lockless approach.
548 */
549 uint64_t off;
550 uint32_t u32Pct = 0;
551 bool fCatchUp;
552 int cOuterTries = 42;
553 for (;; cOuterTries--)
554 {
555 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
556 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
557 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
558 if (fCatchUp)
559 {
560 /* No changes allowed, try get a consistent set of parameters. */
561 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
562 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
563 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
564 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
565 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
566 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
567 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
568 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
569 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
570 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
571 || cOuterTries <= 0)
572 {
573 uint64_t u64Delta = u64Now - u64Prev;
574 if (RT_LIKELY(!(u64Delta >> 32)))
575 {
576 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
577 if (off > u64Sub + offGivenUp)
578 off -= u64Sub;
579 else /* we've completely caught up. */
580 off = offGivenUp;
581 }
582 else
583 /* More than 4 seconds since last time (or negative), ignore it. */
584 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
585
586 /* Check that we're still running and in catch up. */
587 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
588 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
589 break;
590 }
591 }
592 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
593 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
594 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
595 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
596 break; /* Got an consistent offset */
597
598 /* Repeat the initial checks before iterating. */
599 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
600 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
601 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
602 {
603 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
604 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
605 }
606 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
607 {
608 LogFlow(("TMTimerPoll: stopped\n"));
609 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
610 }
611 if (cOuterTries <= 0)
612 break; /* that's enough */
613 }
614 if (cOuterTries <= 0)
615 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
616 u64VirtualSyncNow = u64Now - off;
617
618 /* Calc delta and see if we've got a virtual sync hit. */
619 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
620 if (i64Delta2 <= 0)
621 {
622 if ( !pVM->tm.s.fRunningQueues
623 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
624 {
625 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
626 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
627#ifdef IN_RING3
628 REMR3NotifyTimerPending(pVM, pVCpuDst);
629#endif
630 }
631 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
632 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
633 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
634 }
635
636 /*
637 * Return the time left to the next event.
638 */
639 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
640 if (pVCpu == pVCpuDst)
641 {
642 if (fCatchUp)
643 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
644 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
645 }
646 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
647}
648
649
650/**
651 * Set FF if we've passed the next virtual event.
652 *
653 * This function is called before FFs are checked in the inner execution EM loops.
654 *
655 * @returns true if timers are pending, false if not.
656 *
657 * @param pVM Pointer to the shared VM structure.
658 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
659 * @thread The emulation thread.
660 */
661VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
662{
663 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
664 uint64_t off = 0;
665 tmTimerPollInternal(pVM, pVCpu, &off);
666 return off == 0;
667}
668
669
670/**
671 * Set FF if we've passed the next virtual event.
672 *
673 * This function is called before FFs are checked in the inner execution EM loops.
674 *
675 * @param pVM Pointer to the shared VM structure.
676 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
677 * @thread The emulation thread.
678 */
679VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
680{
681 uint64_t off;
682 tmTimerPollInternal(pVM, pVCpu, &off);
683}
684
685
686/**
687 * Set FF if we've passed the next virtual event.
688 *
689 * This function is called before FFs are checked in the inner execution EM loops.
690 *
691 * @returns The GIP timestamp of the next event.
692 * 0 if the next event has already expired.
693 * @param pVM Pointer to the shared VM structure.
694 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
695 * @param pu64Delta Where to store the delta.
696 * @thread The emulation thread.
697 */
698VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
699{
700 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
701}
702
703#endif /* VBOX_HIGH_RES_TIMERS_HACK */
704
705/**
706 * Gets the host context ring-3 pointer of the timer.
707 *
708 * @returns HC R3 pointer.
709 * @param pTimer Timer handle as returned by one of the create functions.
710 */
711VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
712{
713 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
714}
715
716
717/**
718 * Gets the host context ring-0 pointer of the timer.
719 *
720 * @returns HC R0 pointer.
721 * @param pTimer Timer handle as returned by one of the create functions.
722 */
723VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
724{
725 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
726}
727
728
729/**
730 * Gets the RC pointer of the timer.
731 *
732 * @returns RC pointer.
733 * @param pTimer Timer handle as returned by one of the create functions.
734 */
735VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
736{
737 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
738}
739
740
741/**
742 * Links a timer into the active list of a timer queue.
743 *
744 * The caller must have taken the TM semaphore before calling this function.
745 *
746 * @param pQueue The queue.
747 * @param pTimer The timer.
748 * @param u64Expire The timer expiration time.
749 */
750DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
751{
752 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
753 if (pCur)
754 {
755 for (;; pCur = TMTIMER_GET_NEXT(pCur))
756 {
757 if (pCur->u64Expire > u64Expire)
758 {
759 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
760 TMTIMER_SET_NEXT(pTimer, pCur);
761 TMTIMER_SET_PREV(pTimer, pPrev);
762 if (pPrev)
763 TMTIMER_SET_NEXT(pPrev, pTimer);
764 else
765 {
766 TMTIMER_SET_HEAD(pQueue, pTimer);
767 pQueue->u64Expire = u64Expire;
768 }
769 TMTIMER_SET_PREV(pCur, pTimer);
770 return;
771 }
772 if (!pCur->offNext)
773 {
774 TMTIMER_SET_NEXT(pCur, pTimer);
775 TMTIMER_SET_PREV(pTimer, pCur);
776 return;
777 }
778 }
779 }
780 else
781 {
782 TMTIMER_SET_HEAD(pQueue, pTimer);
783 pQueue->u64Expire = u64Expire;
784 }
785}
786
787
788/**
789 * Optimized TMTimerSet code path for starting an inactive timer.
790 *
791 * @returns VBox status code.
792 *
793 * @param pVM The VM handle.
794 * @param pTimer The timer handle.
795 * @param u64Expire The new expire time.
796 */
797static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
798{
799 Assert(!pTimer->offPrev);
800 Assert(!pTimer->offNext);
801 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
802
803 /*
804 * Calculate and set the expiration time.
805 */
806 pTimer->u64Expire = u64Expire;
807 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
808
809 /*
810 * Link the timer into the active list.
811 */
812 TMCLOCK const enmClock = pTimer->enmClock;
813 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
814
815 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
816 tmTimerUnlock(pVM);
817 return VINF_SUCCESS;
818}
819
820
821
822
823
824/**
825 * Arm a timer with a (new) expire time.
826 *
827 * @returns VBox status.
828 * @param pTimer Timer handle as returned by one of the create functions.
829 * @param u64Expire New expire time.
830 */
831VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
832{
833 PVM pVM = pTimer->CTX_SUFF(pVM);
834 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
835 TMTIMER_ASSERT_CRITSECT(pTimer);
836
837#ifdef VBOX_WITH_STATISTICS
838 /* Gather optimization info. */
839 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
840 TMTIMERSTATE enmOrgState = pTimer->enmState;
841 switch (enmOrgState)
842 {
843 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
844 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
845 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
846 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
847 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
848 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
849 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
850 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
851 }
852#endif
853
854 /*
855 * The most common case is setting the timer again during the callback.
856 * The second most common case is starting a timer at some other time.
857 */
858#if 1
859 TMTIMERSTATE enmState1 = pTimer->enmState;
860 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
861 || ( enmState1 == TMTIMERSTATE_STOPPED
862 && pTimer->pCritSect))
863 {
864 /* Try take the TM lock and check the state again. */
865 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
866 {
867 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
868 {
869 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
870 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
871 return VINF_SUCCESS;
872 }
873 tmTimerUnlock(pVM);
874 }
875 }
876#endif
877
878 /*
879 * Unoptimized code path.
880 */
881 int cRetries = 1000;
882 do
883 {
884 /*
885 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
886 */
887 TMTIMERSTATE enmState = pTimer->enmState;
888 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
889 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
890 switch (enmState)
891 {
892 case TMTIMERSTATE_EXPIRED_DELIVER:
893 case TMTIMERSTATE_STOPPED:
894 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
895 {
896 Assert(!pTimer->offPrev);
897 Assert(!pTimer->offNext);
898 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
899 || pVM->tm.s.fVirtualSyncTicking
900 || u64Expire >= pVM->tm.s.u64VirtualSync,
901 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
902 pTimer->u64Expire = u64Expire;
903 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
904 tmSchedule(pTimer);
905 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
906 return VINF_SUCCESS;
907 }
908 break;
909
910 case TMTIMERSTATE_PENDING_SCHEDULE:
911 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
912 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
913 {
914 pTimer->u64Expire = u64Expire;
915 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
916 tmSchedule(pTimer);
917 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
918 return VINF_SUCCESS;
919 }
920 break;
921
922
923 case TMTIMERSTATE_ACTIVE:
924 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
925 {
926 pTimer->u64Expire = u64Expire;
927 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
928 tmSchedule(pTimer);
929 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
930 return VINF_SUCCESS;
931 }
932 break;
933
934 case TMTIMERSTATE_PENDING_RESCHEDULE:
935 case TMTIMERSTATE_PENDING_STOP:
936 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
937 {
938 pTimer->u64Expire = u64Expire;
939 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
940 tmSchedule(pTimer);
941 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
942 return VINF_SUCCESS;
943 }
944 break;
945
946
947 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
948 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
949 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
950#ifdef IN_RING3
951 if (!RTThreadYield())
952 RTThreadSleep(1);
953#else
954/** @todo call host context and yield after a couple of iterations */
955#endif
956 break;
957
958 /*
959 * Invalid states.
960 */
961 case TMTIMERSTATE_DESTROY:
962 case TMTIMERSTATE_FREE:
963 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
964 return VERR_TM_INVALID_STATE;
965 default:
966 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
967 return VERR_TM_UNKNOWN_STATE;
968 }
969 } while (cRetries-- > 0);
970
971 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
972 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
973 return VERR_INTERNAL_ERROR;
974}
975
976
977/**
978 * Return the current time for the specified clock, setting pu64Now if not NULL.
979 *
980 * @returns Current time.
981 * @param pVM The VM handle.
982 * @param enmClock The clock to query.
983 * @param pu64Now Optional pointer where to store the return time
984 */
985DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
986{
987 uint64_t u64Now;
988 switch (enmClock)
989 {
990 case TMCLOCK_VIRTUAL_SYNC:
991 u64Now = TMVirtualSyncGet(pVM);
992 break;
993 case TMCLOCK_VIRTUAL:
994 u64Now = TMVirtualGet(pVM);
995 break;
996 case TMCLOCK_REAL:
997 u64Now = TMRealGet(pVM);
998 break;
999 default:
1000 AssertFatalMsgFailed(("%d\n", enmClock));
1001 }
1002
1003 if (pu64Now)
1004 *pu64Now = u64Now;
1005 return u64Now;
1006}
1007
1008
1009/**
1010 * Optimized TMTimerSetRelative code path.
1011 *
1012 * @returns VBox status code.
1013 *
1014 * @param pVM The VM handle.
1015 * @param pTimer The timer handle.
1016 * @param cTicksToNext Clock ticks until the next time expiration.
1017 * @param pu64Now Where to return the current time stamp used.
1018 * Optional.
1019 */
1020static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1021{
1022 Assert(!pTimer->offPrev);
1023 Assert(!pTimer->offNext);
1024 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1025
1026 /*
1027 * Calculate and set the expiration time.
1028 */
1029 TMCLOCK const enmClock = pTimer->enmClock;
1030 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1031 pTimer->u64Expire = u64Expire;
1032 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1033
1034 /*
1035 * Link the timer into the active list.
1036 */
1037 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1038
1039 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1040 tmTimerUnlock(pVM);
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Arm a timer with a expire time relative to the current time.
1047 *
1048 * @returns VBox status.
1049 * @param pTimer Timer handle as returned by one of the create functions.
1050 * @param cTicksToNext Clock ticks until the next time expiration.
1051 * @param pu64Now Where to return the current time stamp used.
1052 * Optional.
1053 */
1054VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1055{
1056 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1057 TMTIMER_ASSERT_CRITSECT(pTimer);
1058 PVM pVM = pTimer->CTX_SUFF(pVM);
1059 int rc;
1060
1061#ifdef VBOX_WITH_STATISTICS
1062 /* Gather optimization info. */
1063 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1064 TMTIMERSTATE enmOrgState = pTimer->enmState;
1065 switch (enmOrgState)
1066 {
1067 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1068 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1069 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1070 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1071 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1072 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1073 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1074 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1075 }
1076#endif
1077
1078 /*
1079 * Try to take the TM lock and optimize the common cases.
1080 *
1081 * With the TM lock we can safely make optimizations like immediate
1082 * scheduling and we can also be 100% sure that we're not racing the
1083 * running of the timer queues. As an additional restraint we require the
1084 * timer to have a critical section associated with to be 100% there aren't
1085 * concurrent operations on the timer. (This latter isn't necessary any
1086 * longer as this isn't supported for any timers, critsect or not.)
1087 *
1088 * Note! Lock ordering doesn't apply when we only tries to
1089 * get the innermost locks.
1090 */
1091 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1092#if 1
1093 if ( fOwnTMLock
1094 && pTimer->pCritSect)
1095 {
1096 TMTIMERSTATE enmState = pTimer->enmState;
1097 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1098 || enmState == TMTIMERSTATE_STOPPED)
1099 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1100 {
1101 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1102 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1103 return VINF_SUCCESS;
1104 }
1105
1106 /* Optimize other states when it becomes necessary. */
1107 }
1108#endif
1109
1110 /*
1111 * Unoptimized path.
1112 */
1113 TMCLOCK const enmClock = pTimer->enmClock;
1114 bool fOwnVirtSyncLock;
1115 fOwnVirtSyncLock = !fOwnTMLock
1116 && enmClock == TMCLOCK_VIRTUAL_SYNC
1117 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1118 for (int cRetries = 1000; ; cRetries--)
1119 {
1120 /*
1121 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1122 */
1123 TMTIMERSTATE enmState = pTimer->enmState;
1124 switch (enmState)
1125 {
1126 case TMTIMERSTATE_STOPPED:
1127 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1128 {
1129 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1130 * Figure a safe way of activating this timer while the queue is
1131 * being run.
1132 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1133 * re-starting the timer in respons to a initial_count write.) */
1134 }
1135 /* fall thru */
1136 case TMTIMERSTATE_EXPIRED_DELIVER:
1137 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1138 {
1139 Assert(!pTimer->offPrev);
1140 Assert(!pTimer->offNext);
1141 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1142 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1143 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1144 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1145 tmSchedule(pTimer);
1146 rc = VINF_SUCCESS;
1147 break;
1148 }
1149 rc = VERR_TRY_AGAIN;
1150 break;
1151
1152 case TMTIMERSTATE_PENDING_SCHEDULE:
1153 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1154 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1155 {
1156 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1157 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1158 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1159 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1160 tmSchedule(pTimer);
1161 rc = VINF_SUCCESS;
1162 break;
1163 }
1164 rc = VERR_TRY_AGAIN;
1165 break;
1166
1167
1168 case TMTIMERSTATE_ACTIVE:
1169 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1170 {
1171 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1172 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1173 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1174 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1175 tmSchedule(pTimer);
1176 rc = VINF_SUCCESS;
1177 break;
1178 }
1179 rc = VERR_TRY_AGAIN;
1180 break;
1181
1182 case TMTIMERSTATE_PENDING_RESCHEDULE:
1183 case TMTIMERSTATE_PENDING_STOP:
1184 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1185 {
1186 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1187 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1188 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1189 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1190 tmSchedule(pTimer);
1191 rc = VINF_SUCCESS;
1192 break;
1193 }
1194 rc = VERR_TRY_AGAIN;
1195 break;
1196
1197
1198 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1199 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1200 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1201#ifdef IN_RING3
1202 if (!RTThreadYield())
1203 RTThreadSleep(1);
1204#else
1205/** @todo call host context and yield after a couple of iterations */
1206#endif
1207 rc = VERR_TRY_AGAIN;
1208 break;
1209
1210 /*
1211 * Invalid states.
1212 */
1213 case TMTIMERSTATE_DESTROY:
1214 case TMTIMERSTATE_FREE:
1215 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1216 rc = VERR_TM_INVALID_STATE;
1217 break;
1218
1219 default:
1220 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1221 rc = VERR_TM_UNKNOWN_STATE;
1222 break;
1223 }
1224
1225 /* switch + loop is tedious to break out of. */
1226 if (rc == VINF_SUCCESS)
1227 break;
1228
1229 if (rc != VERR_TRY_AGAIN)
1230 {
1231 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1232 break;
1233 }
1234 if (cRetries <= 0)
1235 {
1236 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1237 rc = VERR_INTERNAL_ERROR;
1238 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1239 break;
1240 }
1241
1242 /*
1243 * Retry to gain locks.
1244 */
1245 if (!fOwnTMLock)
1246 {
1247 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1248 if ( !fOwnTMLock
1249 && enmClock == TMCLOCK_VIRTUAL_SYNC
1250 && !fOwnVirtSyncLock)
1251 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1252 }
1253
1254 } /* for (;;) */
1255
1256 /*
1257 * Clean up and return.
1258 */
1259 if (fOwnVirtSyncLock)
1260 tmVirtualSyncUnlock(pVM);
1261 if (fOwnTMLock)
1262 tmTimerUnlock(pVM);
1263
1264 if ( !fOwnTMLock
1265 && !fOwnVirtSyncLock
1266 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1267 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1268
1269 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1270 return rc;
1271}
1272
1273
1274/**
1275 * Arm a timer with a (new) expire time relative to current time.
1276 *
1277 * @returns VBox status.
1278 * @param pTimer Timer handle as returned by one of the create functions.
1279 * @param cMilliesToNext Number of millieseconds to the next tick.
1280 */
1281VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1282{
1283 PVM pVM = pTimer->CTX_SUFF(pVM);
1284 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1285
1286 switch (pTimer->enmClock)
1287 {
1288 case TMCLOCK_VIRTUAL:
1289 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1290 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1291
1292 case TMCLOCK_VIRTUAL_SYNC:
1293 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1294 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1295
1296 case TMCLOCK_REAL:
1297 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1298 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1299
1300 default:
1301 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1302 return VERR_INTERNAL_ERROR;
1303 }
1304}
1305
1306
1307/**
1308 * Arm a timer with a (new) expire time relative to current time.
1309 *
1310 * @returns VBox status.
1311 * @param pTimer Timer handle as returned by one of the create functions.
1312 * @param cMicrosToNext Number of microseconds to the next tick.
1313 */
1314VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1315{
1316 PVM pVM = pTimer->CTX_SUFF(pVM);
1317 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1318
1319 switch (pTimer->enmClock)
1320 {
1321 case TMCLOCK_VIRTUAL:
1322 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1323 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1324
1325 case TMCLOCK_VIRTUAL_SYNC:
1326 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1327 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1328
1329 case TMCLOCK_REAL:
1330 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1331 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1332
1333 default:
1334 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1335 return VERR_INTERNAL_ERROR;
1336 }
1337}
1338
1339
1340/**
1341 * Arm a timer with a (new) expire time relative to current time.
1342 *
1343 * @returns VBox status.
1344 * @param pTimer Timer handle as returned by one of the create functions.
1345 * @param cNanosToNext Number of nanoseconds to the next tick.
1346 */
1347VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1348{
1349 PVM pVM = pTimer->CTX_SUFF(pVM);
1350 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1351
1352 switch (pTimer->enmClock)
1353 {
1354 case TMCLOCK_VIRTUAL:
1355 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1356 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1357
1358 case TMCLOCK_VIRTUAL_SYNC:
1359 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1360 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1361
1362 case TMCLOCK_REAL:
1363 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1364 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1365
1366 default:
1367 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1368 return VERR_INTERNAL_ERROR;
1369 }
1370}
1371
1372
1373/**
1374 * Stop the timer.
1375 * Use TMR3TimerArm() to "un-stop" the timer.
1376 *
1377 * @returns VBox status.
1378 * @param pTimer Timer handle as returned by one of the create functions.
1379 */
1380VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1381{
1382 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1383 TMTIMER_ASSERT_CRITSECT(pTimer);
1384
1385 /** @todo see if this function needs optimizing. */
1386 int cRetries = 1000;
1387 do
1388 {
1389 /*
1390 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1391 */
1392 TMTIMERSTATE enmState = pTimer->enmState;
1393 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1394 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1395 switch (enmState)
1396 {
1397 case TMTIMERSTATE_EXPIRED_DELIVER:
1398 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1399 return VERR_INVALID_PARAMETER;
1400
1401 case TMTIMERSTATE_STOPPED:
1402 case TMTIMERSTATE_PENDING_STOP:
1403 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1404 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1405 return VINF_SUCCESS;
1406
1407 case TMTIMERSTATE_PENDING_SCHEDULE:
1408 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1409 {
1410 tmSchedule(pTimer);
1411 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1412 return VINF_SUCCESS;
1413 }
1414
1415 case TMTIMERSTATE_PENDING_RESCHEDULE:
1416 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1417 {
1418 tmSchedule(pTimer);
1419 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1420 return VINF_SUCCESS;
1421 }
1422 break;
1423
1424 case TMTIMERSTATE_ACTIVE:
1425 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1426 {
1427 tmSchedule(pTimer);
1428 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1429 return VINF_SUCCESS;
1430 }
1431 break;
1432
1433 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1434 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1435 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1436#ifdef IN_RING3
1437 if (!RTThreadYield())
1438 RTThreadSleep(1);
1439#else
1440/**@todo call host and yield cpu after a while. */
1441#endif
1442 break;
1443
1444 /*
1445 * Invalid states.
1446 */
1447 case TMTIMERSTATE_DESTROY:
1448 case TMTIMERSTATE_FREE:
1449 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1450 return VERR_TM_INVALID_STATE;
1451 default:
1452 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1453 return VERR_TM_UNKNOWN_STATE;
1454 }
1455 } while (cRetries-- > 0);
1456
1457 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1458 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1459 return VERR_INTERNAL_ERROR;
1460}
1461
1462
1463/**
1464 * Get the current clock time.
1465 * Handy for calculating the new expire time.
1466 *
1467 * @returns Current clock time.
1468 * @param pTimer Timer handle as returned by one of the create functions.
1469 */
1470VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1471{
1472 uint64_t u64;
1473 PVM pVM = pTimer->CTX_SUFF(pVM);
1474
1475 switch (pTimer->enmClock)
1476 {
1477 case TMCLOCK_VIRTUAL:
1478 u64 = TMVirtualGet(pVM);
1479 break;
1480 case TMCLOCK_VIRTUAL_SYNC:
1481 u64 = TMVirtualSyncGet(pVM);
1482 break;
1483 case TMCLOCK_REAL:
1484 u64 = TMRealGet(pVM);
1485 break;
1486 default:
1487 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1488 return ~(uint64_t)0;
1489 }
1490 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1491 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1492 return u64;
1493}
1494
1495
1496/**
1497 * Get the freqency of the timer clock.
1498 *
1499 * @returns Clock frequency (as Hz of course).
1500 * @param pTimer Timer handle as returned by one of the create functions.
1501 */
1502VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1503{
1504 switch (pTimer->enmClock)
1505 {
1506 case TMCLOCK_VIRTUAL:
1507 case TMCLOCK_VIRTUAL_SYNC:
1508 return TMCLOCK_FREQ_VIRTUAL;
1509
1510 case TMCLOCK_REAL:
1511 return TMCLOCK_FREQ_REAL;
1512
1513 default:
1514 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1515 return 0;
1516 }
1517}
1518
1519
1520/**
1521 * Get the current clock time as nanoseconds.
1522 *
1523 * @returns The timer clock as nanoseconds.
1524 * @param pTimer Timer handle as returned by one of the create functions.
1525 */
1526VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1527{
1528 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1529}
1530
1531
1532/**
1533 * Get the current clock time as microseconds.
1534 *
1535 * @returns The timer clock as microseconds.
1536 * @param pTimer Timer handle as returned by one of the create functions.
1537 */
1538VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1539{
1540 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1541}
1542
1543
1544/**
1545 * Get the current clock time as milliseconds.
1546 *
1547 * @returns The timer clock as milliseconds.
1548 * @param pTimer Timer handle as returned by one of the create functions.
1549 */
1550VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1551{
1552 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1553}
1554
1555
1556/**
1557 * Converts the specified timer clock time to nanoseconds.
1558 *
1559 * @returns nanoseconds.
1560 * @param pTimer Timer handle as returned by one of the create functions.
1561 * @param u64Ticks The clock ticks.
1562 * @remark There could be rounding errors here. We just do a simple integere divide
1563 * without any adjustments.
1564 */
1565VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1566{
1567 switch (pTimer->enmClock)
1568 {
1569 case TMCLOCK_VIRTUAL:
1570 case TMCLOCK_VIRTUAL_SYNC:
1571 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1572 return u64Ticks;
1573
1574 case TMCLOCK_REAL:
1575 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1576 return u64Ticks * 1000000;
1577
1578 default:
1579 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1580 return 0;
1581 }
1582}
1583
1584
1585/**
1586 * Converts the specified timer clock time to microseconds.
1587 *
1588 * @returns microseconds.
1589 * @param pTimer Timer handle as returned by one of the create functions.
1590 * @param u64Ticks The clock ticks.
1591 * @remark There could be rounding errors here. We just do a simple integere divide
1592 * without any adjustments.
1593 */
1594VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1595{
1596 switch (pTimer->enmClock)
1597 {
1598 case TMCLOCK_VIRTUAL:
1599 case TMCLOCK_VIRTUAL_SYNC:
1600 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1601 return u64Ticks / 1000;
1602
1603 case TMCLOCK_REAL:
1604 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1605 return u64Ticks * 1000;
1606
1607 default:
1608 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1609 return 0;
1610 }
1611}
1612
1613
1614/**
1615 * Converts the specified timer clock time to milliseconds.
1616 *
1617 * @returns milliseconds.
1618 * @param pTimer Timer handle as returned by one of the create functions.
1619 * @param u64Ticks The clock ticks.
1620 * @remark There could be rounding errors here. We just do a simple integere divide
1621 * without any adjustments.
1622 */
1623VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1624{
1625 switch (pTimer->enmClock)
1626 {
1627 case TMCLOCK_VIRTUAL:
1628 case TMCLOCK_VIRTUAL_SYNC:
1629 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1630 return u64Ticks / 1000000;
1631
1632 case TMCLOCK_REAL:
1633 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1634 return u64Ticks;
1635
1636 default:
1637 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1638 return 0;
1639 }
1640}
1641
1642
1643/**
1644 * Converts the specified nanosecond timestamp to timer clock ticks.
1645 *
1646 * @returns timer clock ticks.
1647 * @param pTimer Timer handle as returned by one of the create functions.
1648 * @param u64NanoTS The nanosecond value ticks to convert.
1649 * @remark There could be rounding and overflow errors here.
1650 */
1651VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1652{
1653 switch (pTimer->enmClock)
1654 {
1655 case TMCLOCK_VIRTUAL:
1656 case TMCLOCK_VIRTUAL_SYNC:
1657 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1658 return u64NanoTS;
1659
1660 case TMCLOCK_REAL:
1661 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1662 return u64NanoTS / 1000000;
1663
1664 default:
1665 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1666 return 0;
1667 }
1668}
1669
1670
1671/**
1672 * Converts the specified microsecond timestamp to timer clock ticks.
1673 *
1674 * @returns timer clock ticks.
1675 * @param pTimer Timer handle as returned by one of the create functions.
1676 * @param u64MicroTS The microsecond value ticks to convert.
1677 * @remark There could be rounding and overflow errors here.
1678 */
1679VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1680{
1681 switch (pTimer->enmClock)
1682 {
1683 case TMCLOCK_VIRTUAL:
1684 case TMCLOCK_VIRTUAL_SYNC:
1685 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1686 return u64MicroTS * 1000;
1687
1688 case TMCLOCK_REAL:
1689 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1690 return u64MicroTS / 1000;
1691
1692 default:
1693 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1694 return 0;
1695 }
1696}
1697
1698
1699/**
1700 * Converts the specified millisecond timestamp to timer clock ticks.
1701 *
1702 * @returns timer clock ticks.
1703 * @param pTimer Timer handle as returned by one of the create functions.
1704 * @param u64MilliTS The millisecond value ticks to convert.
1705 * @remark There could be rounding and overflow errors here.
1706 */
1707VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1708{
1709 switch (pTimer->enmClock)
1710 {
1711 case TMCLOCK_VIRTUAL:
1712 case TMCLOCK_VIRTUAL_SYNC:
1713 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1714 return u64MilliTS * 1000000;
1715
1716 case TMCLOCK_REAL:
1717 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1718 return u64MilliTS;
1719
1720 default:
1721 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1722 return 0;
1723 }
1724}
1725
1726
1727/**
1728 * Get the expire time of the timer.
1729 * Only valid for active timers.
1730 *
1731 * @returns Expire time of the timer.
1732 * @param pTimer Timer handle as returned by one of the create functions.
1733 */
1734VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1735{
1736 TMTIMER_ASSERT_CRITSECT(pTimer);
1737 int cRetries = 1000;
1738 do
1739 {
1740 TMTIMERSTATE enmState = pTimer->enmState;
1741 switch (enmState)
1742 {
1743 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1744 case TMTIMERSTATE_EXPIRED_DELIVER:
1745 case TMTIMERSTATE_STOPPED:
1746 case TMTIMERSTATE_PENDING_STOP:
1747 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1748 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1749 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1750 return ~(uint64_t)0;
1751
1752 case TMTIMERSTATE_ACTIVE:
1753 case TMTIMERSTATE_PENDING_RESCHEDULE:
1754 case TMTIMERSTATE_PENDING_SCHEDULE:
1755 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1756 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1757 return pTimer->u64Expire;
1758
1759 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1760 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1761#ifdef IN_RING3
1762 if (!RTThreadYield())
1763 RTThreadSleep(1);
1764#endif
1765 break;
1766
1767 /*
1768 * Invalid states.
1769 */
1770 case TMTIMERSTATE_DESTROY:
1771 case TMTIMERSTATE_FREE:
1772 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1773 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1774 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1775 return ~(uint64_t)0;
1776 default:
1777 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1778 return ~(uint64_t)0;
1779 }
1780 } while (cRetries-- > 0);
1781
1782 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1783 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1784 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1785 return ~(uint64_t)0;
1786}
1787
1788
1789/**
1790 * Checks if a timer is active or not.
1791 *
1792 * @returns True if active.
1793 * @returns False if not active.
1794 * @param pTimer Timer handle as returned by one of the create functions.
1795 */
1796VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1797{
1798 TMTIMERSTATE enmState = pTimer->enmState;
1799 switch (enmState)
1800 {
1801 case TMTIMERSTATE_STOPPED:
1802 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1803 case TMTIMERSTATE_EXPIRED_DELIVER:
1804 case TMTIMERSTATE_PENDING_STOP:
1805 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1806 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1807 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1808 return false;
1809
1810 case TMTIMERSTATE_ACTIVE:
1811 case TMTIMERSTATE_PENDING_RESCHEDULE:
1812 case TMTIMERSTATE_PENDING_SCHEDULE:
1813 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1814 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1815 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1816 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1817 return true;
1818
1819 /*
1820 * Invalid states.
1821 */
1822 case TMTIMERSTATE_DESTROY:
1823 case TMTIMERSTATE_FREE:
1824 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1825 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1826 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1827 return false;
1828 default:
1829 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1830 return false;
1831 }
1832}
1833
1834
1835/**
1836 * Convert state to string.
1837 *
1838 * @returns Readonly status name.
1839 * @param enmState State.
1840 */
1841const char *tmTimerState(TMTIMERSTATE enmState)
1842{
1843 switch (enmState)
1844 {
1845#define CASE(num, state) \
1846 case TMTIMERSTATE_##state: \
1847 AssertCompile(TMTIMERSTATE_##state == (num)); \
1848 return #num "-" #state
1849 CASE( 1,STOPPED);
1850 CASE( 2,ACTIVE);
1851 CASE( 3,EXPIRED_GET_UNLINK);
1852 CASE( 4,EXPIRED_DELIVER);
1853 CASE( 5,PENDING_STOP);
1854 CASE( 6,PENDING_STOP_SCHEDULE);
1855 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1856 CASE( 8,PENDING_SCHEDULE);
1857 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1858 CASE(10,PENDING_RESCHEDULE);
1859 CASE(11,DESTROY);
1860 CASE(12,FREE);
1861 default:
1862 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1863 return "Invalid state!";
1864#undef CASE
1865 }
1866}
1867
1868
1869/**
1870 * Schedules the given timer on the given queue.
1871 *
1872 * @param pQueue The timer queue.
1873 * @param pTimer The timer that needs scheduling.
1874 *
1875 * @remarks Called while owning the lock.
1876 */
1877DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1878{
1879 /*
1880 * Processing.
1881 */
1882 unsigned cRetries = 2;
1883 do
1884 {
1885 TMTIMERSTATE enmState = pTimer->enmState;
1886 switch (enmState)
1887 {
1888 /*
1889 * Reschedule timer (in the active list).
1890 */
1891 case TMTIMERSTATE_PENDING_RESCHEDULE:
1892 {
1893 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1894 break; /* retry */
1895
1896 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1897 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1898 if (pPrev)
1899 TMTIMER_SET_NEXT(pPrev, pNext);
1900 else
1901 {
1902 TMTIMER_SET_HEAD(pQueue, pNext);
1903 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1904 }
1905 if (pNext)
1906 TMTIMER_SET_PREV(pNext, pPrev);
1907 pTimer->offNext = 0;
1908 pTimer->offPrev = 0;
1909 /* fall thru */
1910 }
1911
1912 /*
1913 * Schedule timer (insert into the active list).
1914 */
1915 case TMTIMERSTATE_PENDING_SCHEDULE:
1916 {
1917 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1918 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1919 break; /* retry */
1920
1921 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1922 if (pCur)
1923 {
1924 const uint64_t u64Expire = pTimer->u64Expire;
1925 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1926 {
1927 if (pCur->u64Expire > u64Expire)
1928 {
1929 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1930 TMTIMER_SET_NEXT(pTimer, pCur);
1931 TMTIMER_SET_PREV(pTimer, pPrev);
1932 if (pPrev)
1933 TMTIMER_SET_NEXT(pPrev, pTimer);
1934 else
1935 {
1936 TMTIMER_SET_HEAD(pQueue, pTimer);
1937 pQueue->u64Expire = u64Expire;
1938 }
1939 TMTIMER_SET_PREV(pCur, pTimer);
1940 return;
1941 }
1942 if (!pCur->offNext)
1943 {
1944 TMTIMER_SET_NEXT(pCur, pTimer);
1945 TMTIMER_SET_PREV(pTimer, pCur);
1946 return;
1947 }
1948 }
1949 }
1950 else
1951 {
1952 TMTIMER_SET_HEAD(pQueue, pTimer);
1953 pQueue->u64Expire = pTimer->u64Expire;
1954 }
1955 return;
1956 }
1957
1958 /*
1959 * Stop the timer in active list.
1960 */
1961 case TMTIMERSTATE_PENDING_STOP:
1962 {
1963 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1964 break; /* retry */
1965
1966 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1967 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1968 if (pPrev)
1969 TMTIMER_SET_NEXT(pPrev, pNext);
1970 else
1971 {
1972 TMTIMER_SET_HEAD(pQueue, pNext);
1973 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1974 }
1975 if (pNext)
1976 TMTIMER_SET_PREV(pNext, pPrev);
1977 pTimer->offNext = 0;
1978 pTimer->offPrev = 0;
1979 /* fall thru */
1980 }
1981
1982 /*
1983 * Stop the timer (not on the active list).
1984 */
1985 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1986 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1987 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1988 break;
1989 return;
1990
1991 /*
1992 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1993 * Nothing to do here.
1994 */
1995 case TMTIMERSTATE_DESTROY:
1996 break;
1997
1998 /*
1999 * Postpone these until they get into the right state.
2000 */
2001 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2002 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2003 tmTimerLink(pQueue, pTimer);
2004 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2005 return;
2006
2007 /*
2008 * None of these can be in the schedule.
2009 */
2010 case TMTIMERSTATE_FREE:
2011 case TMTIMERSTATE_STOPPED:
2012 case TMTIMERSTATE_ACTIVE:
2013 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2014 case TMTIMERSTATE_EXPIRED_DELIVER:
2015 default:
2016 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2017 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2018 return;
2019 }
2020 } while (cRetries-- > 0);
2021}
2022
2023
2024/**
2025 * Schedules the specified timer queue.
2026 *
2027 * @param pVM The VM to run the timers for.
2028 * @param pQueue The queue to schedule.
2029 *
2030 * @remarks Called while owning the lock.
2031 */
2032void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2033{
2034 TM_ASSERT_LOCK(pVM);
2035
2036 /*
2037 * Dequeue the scheduling list and iterate it.
2038 */
2039 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2040 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2041 if (!offNext)
2042 return;
2043 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2044 while (pNext)
2045 {
2046 /*
2047 * Unlink the head timer and find the next one.
2048 */
2049 PTMTIMER pTimer = pNext;
2050 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2051 pTimer->offScheduleNext = 0;
2052
2053 /*
2054 * Do the scheduling.
2055 */
2056 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2057 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2058 tmTimerQueueScheduleOne(pQueue, pTimer);
2059 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2060 } /* foreach timer in current schedule batch. */
2061 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2062}
2063
2064
2065#ifdef VBOX_STRICT
2066/**
2067 * Checks that the timer queues are sane.
2068 *
2069 * @param pVM VM handle.
2070 *
2071 * @remarks Called while owning the lock.
2072 */
2073void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2074{
2075 TM_ASSERT_LOCK(pVM);
2076
2077 /*
2078 * Check the linking of the active lists.
2079 */
2080 for (int i = 0; i < TMCLOCK_MAX; i++)
2081 {
2082 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2083 Assert((int)pQueue->enmClock == i);
2084 PTMTIMER pPrev = NULL;
2085 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2086 {
2087 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2088 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2089 TMTIMERSTATE enmState = pCur->enmState;
2090 switch (enmState)
2091 {
2092 case TMTIMERSTATE_ACTIVE:
2093 AssertMsg( !pCur->offScheduleNext
2094 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2095 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2096 break;
2097 case TMTIMERSTATE_PENDING_STOP:
2098 case TMTIMERSTATE_PENDING_RESCHEDULE:
2099 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2100 break;
2101 default:
2102 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2103 break;
2104 }
2105 }
2106 }
2107
2108
2109# ifdef IN_RING3
2110 /*
2111 * Do the big list and check that active timers all are in the active lists.
2112 */
2113 PTMTIMERR3 pPrev = NULL;
2114 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2115 {
2116 Assert(pCur->pBigPrev == pPrev);
2117 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2118
2119 TMTIMERSTATE enmState = pCur->enmState;
2120 switch (enmState)
2121 {
2122 case TMTIMERSTATE_ACTIVE:
2123 case TMTIMERSTATE_PENDING_STOP:
2124 case TMTIMERSTATE_PENDING_RESCHEDULE:
2125 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2126 {
2127 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2128 Assert(pCur->offPrev || pCur == pCurAct);
2129 while (pCurAct && pCurAct != pCur)
2130 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2131 Assert(pCurAct == pCur);
2132 break;
2133 }
2134
2135 case TMTIMERSTATE_PENDING_SCHEDULE:
2136 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2137 case TMTIMERSTATE_STOPPED:
2138 case TMTIMERSTATE_EXPIRED_DELIVER:
2139 {
2140 Assert(!pCur->offNext);
2141 Assert(!pCur->offPrev);
2142 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2143 pCurAct;
2144 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2145 {
2146 Assert(pCurAct != pCur);
2147 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2148 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2149 }
2150 break;
2151 }
2152
2153 /* ignore */
2154 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2155 break;
2156
2157 /* shouldn't get here! */
2158 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2159 case TMTIMERSTATE_DESTROY:
2160 default:
2161 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2162 break;
2163 }
2164 }
2165# endif /* IN_RING3 */
2166}
2167#endif /* !VBOX_STRICT */
2168
2169
2170/**
2171 * Gets the current warp drive percent.
2172 *
2173 * @returns The warp drive percent.
2174 * @param pVM The VM handle.
2175 */
2176VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2177{
2178 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2179}
2180
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette