VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 19454

Last change on this file since 19454 was 19444, checked in by vboxsync, 15 years ago

TM: Serialize EMT access using a critsect.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 27.1 KB
Line 
1/* $Id: TMAllVirtual.cpp 19444 2009-05-06 16:21:00Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#ifdef IN_RING3
29# include <VBox/rem.h>
30# include <iprt/thread.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/vmm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/sup.h>
38
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent);
48
49
50/**
51 * Helper function that's used by the assembly routines when something goes bust.
52 *
53 * @param pData Pointer to the data structure.
54 * @param u64NanoTS The calculated nano ts.
55 * @param u64DeltaPrev The delta relative to the previously returned timestamp.
56 * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
57 */
58DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
59{
60 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
61 pData->cBadPrev++;
62 if ((int64_t)u64DeltaPrev < 0)
63 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
64 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
65 else
66 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
67 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
68}
69
70
71/**
72 * Called the first time somebody asks for the time or when the GIP
73 * is mapped/unmapped.
74 *
75 * This should never ever happen.
76 */
77DECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
78{
79 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
80 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
81 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
82}
83
84
85#if 1
86
87/**
88 * Wrapper around the IPRT GIP time methods.
89 */
90DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
91{
92#ifdef IN_RING3
93 return CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
94# else /* !IN_RING3 */
95 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
96 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
97 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
98 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
99 return u64;
100# endif /* !IN_RING3 */
101}
102
103#else
104
105/**
106 * This is (mostly) the same as rtTimeNanoTSInternal() except
107 * for the two globals which live in TM.
108 *
109 * @returns Nanosecond timestamp.
110 * @param pVM The VM handle.
111 */
112static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
113{
114 uint64_t u64Delta;
115 uint32_t u32NanoTSFactor0;
116 uint64_t u64TSC;
117 uint64_t u64NanoTS;
118 uint32_t u32UpdateIntervalTSC;
119 uint64_t u64PrevNanoTS;
120
121 /*
122 * Read the GIP data and the previous value.
123 */
124 for (;;)
125 {
126 uint32_t u32TransactionId;
127 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
128#ifdef IN_RING3
129 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
130 return RTTimeSystemNanoTS();
131#endif
132
133 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
134 {
135 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
136#ifdef RT_OS_L4
137 Assert((u32TransactionId & 1) == 0);
138#endif
139 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
140 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
141 u64TSC = pGip->aCPUs[0].u64TSC;
142 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
143 u64Delta = ASMReadTSC();
144 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
145 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
146 || (u32TransactionId & 1)))
147 continue;
148 }
149 else
150 {
151 /* SUPGIPMODE_ASYNC_TSC */
152 PSUPGIPCPU pGipCpu;
153
154 uint8_t u8ApicId = ASMGetApicId();
155 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
156 pGipCpu = &pGip->aCPUs[u8ApicId];
157 else
158 {
159 AssertMsgFailed(("%x\n", u8ApicId));
160 pGipCpu = &pGip->aCPUs[0];
161 }
162
163 u32TransactionId = pGipCpu->u32TransactionId;
164#ifdef RT_OS_L4
165 Assert((u32TransactionId & 1) == 0);
166#endif
167 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
168 u64NanoTS = pGipCpu->u64NanoTS;
169 u64TSC = pGipCpu->u64TSC;
170 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
171 u64Delta = ASMReadTSC();
172 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
173#ifdef IN_RC
174 Assert(!(ASMGetFlags() & X86_EFL_IF));
175#else
176 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
177 continue;
178 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
179 || (u32TransactionId & 1)))
180 continue;
181#endif
182 }
183 break;
184 }
185
186 /*
187 * Calc NanoTS delta.
188 */
189 u64Delta -= u64TSC;
190 if (u64Delta > u32UpdateIntervalTSC)
191 {
192 /*
193 * We've expired the interval, cap it. If we're here for the 2nd
194 * time without any GIP update inbetween, the checks against
195 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
196 */
197 u64Delta = u32UpdateIntervalTSC;
198 }
199#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
200 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
201 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
202#else
203 __asm
204 {
205 mov eax, dword ptr [u64Delta]
206 mul dword ptr [u32NanoTSFactor0]
207 div dword ptr [u32UpdateIntervalTSC]
208 mov dword ptr [u64Delta], eax
209 xor edx, edx
210 mov dword ptr [u64Delta + 4], edx
211 }
212#endif
213
214 /*
215 * Calculate the time and compare it with the previously returned value.
216 *
217 * Since this function is called *very* frequently when the VM is running
218 * and then mostly on EMT, we can restrict the valid range of the delta
219 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
220 */
221 u64NanoTS += u64Delta;
222 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
223 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
224 /* frequent - less than 1s since last call. */;
225 else if ( (int64_t)u64DeltaPrev < 0
226 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
227 {
228 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
229 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
230 u64NanoTS = u64PrevNanoTS + 1;
231#ifndef IN_RING3
232 VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
233#endif
234 }
235 else if (u64PrevNanoTS)
236 {
237 /* Something has gone bust, if negative offset it's real bad. */
238 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
239 if ((int64_t)u64DeltaPrev < 0)
240 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
241 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
242 else
243 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
244 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
245#ifdef DEBUG_bird
246 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
247 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
248 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
249 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
250#endif
251 }
252 /* else: We're resuming (see TMVirtualResume). */
253 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
254 return u64NanoTS;
255
256 /*
257 * Attempt updating the previous value, provided we're still ahead of it.
258 *
259 * There is no point in recalculating u64NanoTS because we got preemted or if
260 * we raced somebody while the GIP was updated, since these are events
261 * that might occure at any point in the return path as well.
262 */
263 for (int cTries = 50;;)
264 {
265 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
266 if (u64PrevNanoTS >= u64NanoTS)
267 break;
268 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
269 break;
270 AssertBreak(--cTries <= 0);
271 if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
272 break;
273 }
274
275 return u64NanoTS;
276}
277
278#endif
279
280
281/**
282 * Get the time when we're not running at 100%
283 *
284 * @returns The timestamp.
285 * @param pVM The VM handle.
286 */
287static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
288{
289 /*
290 * Recalculate the RTTimeNanoTS() value for the period where
291 * warp drive has been enabled.
292 */
293 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
294 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
295 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
296 u64 /= 100;
297 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
298
299 /*
300 * Now we apply the virtual time offset.
301 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
302 * machine started if it had been running continuously without any suspends.)
303 */
304 u64 -= pVM->tm.s.u64VirtualOffset;
305 return u64;
306}
307
308
309/**
310 * Get the raw virtual time.
311 *
312 * @returns The current time stamp.
313 * @param pVM The VM handle.
314 */
315DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
316{
317 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
318 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
319 return tmVirtualGetRawNonNormal(pVM);
320}
321
322
323/**
324 * Inlined version of tmVirtualGetEx.
325 */
326DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
327{
328 uint64_t u64;
329 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
330 {
331 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
332 u64 = tmVirtualGetRaw(pVM);
333
334 /*
335 * Use the chance to check for expired timers.
336 */
337 if ( fCheckTimers
338 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
339 && !pVM->tm.s.fRunningQueues
340 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
341 || ( pVM->tm.s.fVirtualSyncTicking
342 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
343 )
344 )
345 )
346 {
347 VM_FF_SET(pVM, VM_FF_TIMER);
348 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
349#ifdef IN_RING3
350 REMR3NotifyTimerPending(pVM);
351 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
352#endif
353 }
354 }
355 else
356 u64 = pVM->tm.s.u64Virtual;
357 return u64;
358}
359
360
361/**
362 * Gets the current TMCLOCK_VIRTUAL time
363 *
364 * @returns The timestamp.
365 * @param pVM VM handle.
366 *
367 * @remark While the flow of time will never go backwards, the speed of the
368 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
369 * influenced by power saving (SpeedStep, PowerNow!), while the former
370 * makes use of TSC and kernel timers.
371 */
372VMMDECL(uint64_t) TMVirtualGet(PVM pVM)
373{
374 return tmVirtualGet(pVM, true /* check timers */);
375}
376
377
378/**
379 * Gets the current TMCLOCK_VIRTUAL time
380 *
381 * @returns The timestamp.
382 * @param pVM VM handle.
383 * @param fCheckTimers Check timers or not
384 *
385 * @remark While the flow of time will never go backwards, the speed of the
386 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
387 * influenced by power saving (SpeedStep, PowerNow!), while the former
388 * makes use of TSC and kernel timers.
389 */
390VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
391{
392 return tmVirtualGet(pVM, fCheckTimers);
393}
394
395
396/**
397 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
398 *
399 * @returns The timestamp.
400 * @param pVM VM handle.
401 * @param fCheckTimers Check timers or not
402 * @thread EMT.
403 */
404VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
405{
406 uint64_t u64;
407 if (pVM->tm.s.fVirtualSyncTicking)
408 {
409 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
410
411 /*
412 * Query the virtual clock and do the usual expired timer check.
413 */
414 Assert(pVM->tm.s.cVirtualTicking);
415 u64 = tmVirtualGetRaw(pVM);
416 if ( fCheckTimers
417 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
418 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
419 {
420 VM_FF_SET(pVM, VM_FF_TIMER);
421#ifdef IN_RING3
422 REMR3NotifyTimerPending(pVM);
423 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
424#endif
425 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
426 }
427
428 /*
429 * Read the offset and adjust if we're playing catch-up.
430 *
431 * The catch-up adjusting work by us decrementing the offset by a percentage of
432 * the time elapsed since the previous TMVirtualGetSync call.
433 *
434 * It's possible to get a very long or even negative interval between two read
435 * for the following reasons:
436 * - Someone might have suspended the process execution, frequently the case when
437 * debugging the process.
438 * - We might be on a different CPU which TSC isn't quite in sync with the
439 * other CPUs in the system.
440 * - Another thread is racing us and we might have been preemnted while inside
441 * this function.
442 *
443 * Assuming nano second virtual time, we can simply ignore any intervals which has
444 * any of the upper 32 bits set.
445 */
446 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
447 uint64_t off = pVM->tm.s.offVirtualSync;
448 if (pVM->tm.s.fVirtualSyncCatchUp)
449 {
450 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
451
452 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
453 uint64_t u64Delta = u64 - u64Prev;
454 if (RT_LIKELY(!(u64Delta >> 32)))
455 {
456 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
457 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
458 {
459 off -= u64Sub;
460 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
461 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
462 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
463 }
464 else
465 {
466 /* we've completely caught up. */
467 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
468 off = pVM->tm.s.offVirtualSyncGivenUp;
469 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
470 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
471 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
472 Log4(("TM: %RU64/0: caught up\n", u64));
473 }
474 }
475 else
476 {
477 /* More than 4 seconds since last time (or negative), ignore it. */
478 if (!(u64Delta & RT_BIT_64(63)))
479 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
480 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
481 }
482
483 if (RT_SUCCESS(rc))
484 tmUnlock(pVM);
485 }
486
487 /*
488 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
489 * approach is to never pass the head timer. So, when we do stop the clock and
490 * set the timer pending flag.
491 */
492 u64 -= off;
493 const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
494 if (u64 >= u64Expire)
495 {
496 u64 = u64Expire;
497 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
498 if (RT_SUCCESS(rc))
499 {
500 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
501 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
502 tmUnlock(pVM);
503 }
504 if ( fCheckTimers
505 && !VM_FF_ISSET(pVM, VM_FF_TIMER))
506 {
507 VM_FF_SET(pVM, VM_FF_TIMER);
508#ifdef IN_RING3
509 REMR3NotifyTimerPending(pVM);
510 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
511#endif
512 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
513 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
514 }
515 else
516 Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
517 }
518 }
519 else
520 {
521 u64 = pVM->tm.s.u64VirtualSync;
522
523 /*
524 * If it looks like a halt caused by pending timers, make sure the FF is raised.
525 * This is a safeguard against timer queue runner leaving the virtual sync clock stopped.
526 */
527 if ( fCheckTimers
528 && pVM->tm.s.cVirtualTicking
529 && !VM_FF_ISSET(pVM, VM_FF_TIMER))
530 {
531 const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
532 if (u64 >= u64Expire)
533 {
534 VM_FF_SET(pVM, VM_FF_TIMER);
535#ifdef IN_RING3
536 REMR3NotifyTimerPending(pVM);
537 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
538#endif
539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
540 Log4(("TM: %RU64/%RU64: exp tmr=>ff (!)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
541 }
542 }
543 }
544 return u64;
545}
546
547
548/**
549 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
550 *
551 * @returns The timestamp.
552 * @param pVM VM handle.
553 * @thread EMT.
554 */
555VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
556{
557 return TMVirtualSyncGetEx(pVM, true /* check timers */);
558}
559
560
561/**
562 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
563 *
564 * @return The current lag.
565 * @param pVM VM handle.
566 */
567VMMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
568{
569 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
570}
571
572
573/**
574 * Get the current catch-up percent.
575 *
576 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
577 * @param pVM VM handle.
578 */
579VMMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
580{
581 if (pVM->tm.s.fVirtualSyncCatchUp)
582 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
583 return 0;
584}
585
586
587/**
588 * Gets the current TMCLOCK_VIRTUAL frequency.
589 *
590 * @returns The freqency.
591 * @param pVM VM handle.
592 */
593VMMDECL(uint64_t) TMVirtualGetFreq(PVM pVM)
594{
595 return TMCLOCK_FREQ_VIRTUAL;
596}
597
598
599/**
600 * Resumes the virtual clock.
601 *
602 * @returns VINF_SUCCESS on success.
603 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
604 * @param pVM VM handle.
605 */
606VMMDECL(int) TMVirtualResume(PVM pVM)
607{
608 /*
609 * Note! this is done only in specific cases (vcpu 0 init, termination, debug,
610 * out of memory conditions; there is at least a race for fVirtualSyncTicking.
611 */
612 if (ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking) == 1)
613 {
614 int rc = tmLock(pVM); /* paranoia */
615
616 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
617 pVM->tm.s.u64VirtualRawPrev = 0;
618 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
619 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
620 pVM->tm.s.fVirtualSyncTicking = true;
621
622 if (RT_SUCCESS(rc))
623 tmUnlock(pVM);
624 return VINF_SUCCESS;
625 }
626 AssertMsgReturn(pVM->tm.s.cVirtualTicking <= pVM->cCPUs, ("%d vs %d\n", pVM->tm.s.cVirtualTicking, pVM->cCPUs), VERR_INTERNAL_ERROR);
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Pauses the virtual clock.
633 *
634 * @returns VINF_SUCCESS on success.
635 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
636 * @param pVM VM handle.
637 */
638VMMDECL(int) TMVirtualPause(PVM pVM)
639{
640 /*
641 * Note! this is done only in specific cases (vcpu 0 init, termination, debug,
642 * out of memory conditions; there is at least a race for fVirtualSyncTicking.
643 */
644 if (ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking) == 0)
645 {
646 int rc = tmLock(pVM); /* paranoia */
647
648 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
649 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
650 pVM->tm.s.fVirtualSyncTicking = false;
651
652 if (RT_SUCCESS(rc))
653 tmUnlock(pVM);
654 return VINF_SUCCESS;
655 }
656 AssertMsgReturn(pVM->tm.s.cVirtualTicking <= pVM->cCPUs, ("%d vs %d\n", pVM->tm.s.cVirtualTicking, pVM->cCPUs), VERR_INTERNAL_ERROR);
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Gets the current warp drive percent.
663 *
664 * @returns The warp drive percent.
665 * @param pVM The VM handle.
666 */
667VMMDECL(uint32_t) TMVirtualGetWarpDrive(PVM pVM)
668{
669 return pVM->tm.s.u32VirtualWarpDrivePercentage;
670}
671
672
673/**
674 * Sets the warp drive percent of the virtual time.
675 *
676 * @returns VBox status code.
677 * @param pVM The VM handle.
678 * @param u32Percent The new percentage. 100 means normal operation.
679 */
680VMMDECL(int) TMVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
681{
682/** @todo This isn't a feature specific to virtual time, move to TM level. (It
683 * should affect the TMR3UCTNow as well! */
684#ifdef IN_RING3
685 PVMREQ pReq;
686 int rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)tmVirtualSetWarpDrive, 2, pVM, u32Percent);
687 if (RT_SUCCESS(rc))
688 rc = pReq->iStatus;
689 VMR3ReqFree(pReq);
690 return rc;
691#else
692
693 return tmVirtualSetWarpDrive(pVM, u32Percent);
694#endif
695}
696
697
698/**
699 * EMT worker for tmVirtualSetWarpDrive.
700 *
701 * @returns VBox status code.
702 * @param pVM The VM handle.
703 * @param u32Percent See TMVirtualSetWarpDrive().
704 * @internal
705 */
706static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
707{
708 PVMCPU pVCpu = VMMGetCpu(pVM);
709
710 /*
711 * Validate it.
712 */
713 AssertMsgReturn(u32Percent >= 2 && u32Percent <= 20000,
714 ("%RX32 is not between 2 and 20000 (inclusive).\n", u32Percent),
715 VERR_INVALID_PARAMETER);
716 tmLock(pVM);
717
718 /*
719 * If the time is running we'll have to pause it before we can change
720 * the warp drive settings.
721 */
722 bool fPaused = !!pVM->tm.s.cVirtualTicking;
723 if (fPaused)
724 {
725 int rc = TMVirtualPause(pVM);
726 AssertRC(rc);
727 rc = TMCpuTickPause(pVCpu);
728 AssertRC(rc);
729 }
730
731 pVM->tm.s.u32VirtualWarpDrivePercentage = u32Percent;
732 pVM->tm.s.fVirtualWarpDrive = u32Percent != 100;
733 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32 fVirtualWarpDrive=%RTbool\n",
734 pVM->tm.s.u32VirtualWarpDrivePercentage, pVM->tm.s.fVirtualWarpDrive));
735
736 if (fPaused)
737 {
738 int rc = TMVirtualResume(pVM);
739 AssertRC(rc);
740 rc = TMCpuTickResume(pVCpu);
741 AssertRC(rc);
742 }
743
744 tmUnlock(pVM);
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Converts from virtual ticks to nanoseconds.
751 *
752 * @returns nanoseconds.
753 * @param pVM The VM handle.
754 * @param u64VirtualTicks The virtual ticks to convert.
755 * @remark There could be rounding errors here. We just do a simple integere divide
756 * without any adjustments.
757 */
758VMMDECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
759{
760 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
761 return u64VirtualTicks;
762}
763
764
765/**
766 * Converts from virtual ticks to microseconds.
767 *
768 * @returns microseconds.
769 * @param pVM The VM handle.
770 * @param u64VirtualTicks The virtual ticks to convert.
771 * @remark There could be rounding errors here. We just do a simple integere divide
772 * without any adjustments.
773 */
774VMMDECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
775{
776 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
777 return u64VirtualTicks / 1000;
778}
779
780
781/**
782 * Converts from virtual ticks to milliseconds.
783 *
784 * @returns milliseconds.
785 * @param pVM The VM handle.
786 * @param u64VirtualTicks The virtual ticks to convert.
787 * @remark There could be rounding errors here. We just do a simple integere divide
788 * without any adjustments.
789 */
790VMMDECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
791{
792 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
793 return u64VirtualTicks / 1000000;
794}
795
796
797/**
798 * Converts from nanoseconds to virtual ticks.
799 *
800 * @returns virtual ticks.
801 * @param pVM The VM handle.
802 * @param u64NanoTS The nanosecond value ticks to convert.
803 * @remark There could be rounding and overflow errors here.
804 */
805VMMDECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
806{
807 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
808 return u64NanoTS;
809}
810
811
812/**
813 * Converts from microseconds to virtual ticks.
814 *
815 * @returns virtual ticks.
816 * @param pVM The VM handle.
817 * @param u64MicroTS The microsecond value ticks to convert.
818 * @remark There could be rounding and overflow errors here.
819 */
820VMMDECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
821{
822 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
823 return u64MicroTS * 1000;
824}
825
826
827/**
828 * Converts from milliseconds to virtual ticks.
829 *
830 * @returns virtual ticks.
831 * @param pVM The VM handle.
832 * @param u64MilliTS The millisecond value ticks to convert.
833 * @remark There could be rounding and overflow errors here.
834 */
835VMMDECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
836{
837 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
838 return u64MilliTS * 1000000;
839}
840
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette