VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 4295

Last change on this file since 4295 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.4 KB
Line 
1/* $Id: TMAllVirtual.cpp 4071 2007-08-07 17:07:59Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#ifdef IN_RING3
25# include <VBox/rem.h>
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <VBox/sup.h>
33
34#include <iprt/time.h>
35#include <iprt/assert.h>
36#include <iprt/asm.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent);
43
44
45/**
46 * This is (mostly) the same as rtTimeNanoTSInternal() except
47 * for the two globals which live in TM.
48 *
49 * @returns Nanosecond timestamp.
50 * @param pVM The VM handle.
51 */
52static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
53{
54 uint64_t u64Delta;
55 uint32_t u32NanoTSFactor0;
56 uint64_t u64TSC;
57 uint64_t u64NanoTS;
58 uint32_t u32UpdateIntervalTSC;
59 uint64_t u64PrevNanoTS;
60
61 /*
62 * Read the GIP data and the previous value.
63 */
64 for (;;)
65 {
66 uint32_t u32TransactionId;
67 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
68#ifdef IN_RING3
69 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
70 return RTTimeSystemNanoTS();
71#endif
72
73 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
74 {
75 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
76#ifdef RT_OS_L4
77 Assert((u32TransactionId & 1) == 0);
78#endif
79 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
80 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
81 u64TSC = pGip->aCPUs[0].u64TSC;
82 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
83 u64Delta = ASMReadTSC();
84 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
85 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
86 || (u32TransactionId & 1)))
87 continue;
88 }
89 else
90 {
91 /* SUPGIPMODE_ASYNC_TSC */
92 PSUPGIPCPU pGipCpu;
93
94 uint8_t u8ApicId = ASMGetApicId();
95 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
96 pGipCpu = &pGip->aCPUs[u8ApicId];
97 else
98 {
99 AssertMsgFailed(("%x\n", u8ApicId));
100 pGipCpu = &pGip->aCPUs[0];
101 }
102
103 u32TransactionId = pGipCpu->u32TransactionId;
104#ifdef RT_OS_L4
105 Assert((u32TransactionId & 1) == 0);
106#endif
107 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
108 u64NanoTS = pGipCpu->u64NanoTS;
109 u64TSC = pGipCpu->u64TSC;
110 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
111 u64Delta = ASMReadTSC();
112 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
113 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
114 continue;
115 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
116 || (u32TransactionId & 1)))
117 continue;
118 }
119 break;
120 }
121
122 /*
123 * Calc NanoTS delta.
124 */
125 u64Delta -= u64TSC;
126 if (u64Delta > u32UpdateIntervalTSC)
127 {
128 /*
129 * We've expired the interval, cap it. If we're here for the 2nd
130 * time without any GIP update inbetween, the checks against
131 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
132 */
133 u64Delta = u32UpdateIntervalTSC;
134 }
135#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
136 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
137 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
138#else
139 __asm
140 {
141 mov eax, dword ptr [u64Delta]
142 mul dword ptr [u32NanoTSFactor0]
143 div dword ptr [u32UpdateIntervalTSC]
144 mov dword ptr [u64Delta], eax
145 xor edx, edx
146 mov dword ptr [u64Delta + 4], edx
147 }
148#endif
149
150 /*
151 * Calculate the time and compare it with the previously returned value.
152 *
153 * Since this function is called *very* frequently when the VM is running
154 * and then mostly on EMT, we can restrict the valid range of the delta
155 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
156 */
157 u64NanoTS += u64Delta;
158 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
159 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
160 /* frequent - less than 1s since last call. */;
161 else if ( (int64_t)u64DeltaPrev < 0
162 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
163 {
164 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
165 ASMAtomicIncU32(&pVM->tm.s.c1nsVirtualRawSteps);
166 u64NanoTS = u64PrevNanoTS + 1;
167 }
168 else if (u64PrevNanoTS)
169 {
170 /* Something has gone bust, if negative offset it's real bad. */
171 ASMAtomicIncU32(&pVM->tm.s.cVirtualRawBadRawPrev);
172 if ((int64_t)u64DeltaPrev < 0)
173 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
174 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
175 else
176 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
177 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
178#ifdef DEBUG_bird
179 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
180 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
181 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
182 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
183#endif
184 }
185 /* else: We're resuming (see TMVirtualResume). */
186 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
187 return u64NanoTS;
188
189 /*
190 * Attempt updating the previous value, provided we're still ahead of it.
191 *
192 * There is no point in recalculating u64NanoTS because we got preemted or if
193 * we raced somebody while the GIP was updated, since these are events
194 * that might occure at any point in the return path as well.
195 */
196 for (int cTries = 100;;)
197 {
198 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
199 if (u64PrevNanoTS >= u64NanoTS)
200 break;
201 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
202 break;
203 AssertBreak(--cTries <= 0, );
204 }
205
206 return u64NanoTS;
207}
208
209
210/**
211 * Get the time when we're not running at 100%
212 *
213 * @returns The timestamp.
214 * @param pVM The VM handle.
215 */
216static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
217{
218 /*
219 * Recalculate the RTTimeNanoTS() value for the period where
220 * warp drive has been enabled.
221 */
222 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
223 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
224 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
225 u64 /= 100;
226 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
227
228 /*
229 * Now we apply the virtual time offset.
230 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
231 * machine started if it had been running continuously without any suspends.)
232 */
233 u64 -= pVM->tm.s.u64VirtualOffset;
234 return u64;
235}
236
237
238/**
239 * Get the raw virtual time.
240 *
241 * @returns The current time stamp.
242 * @param pVM The VM handle.
243 */
244DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
245{
246 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
247 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
248 return tmVirtualGetRawNonNormal(pVM);
249}
250
251
252/**
253 * Inlined version of tmVirtualGetEx.
254 */
255DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
256{
257 uint64_t u64;
258 if (RT_LIKELY(pVM->tm.s.fVirtualTicking))
259 {
260 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
261 u64 = tmVirtualGetRaw(pVM);
262
263 /*
264 * Use the chance to check for expired timers.
265 */
266 if ( fCheckTimers
267 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
268 && ( pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
269 || ( pVM->tm.s.fVirtualSyncTicking
270 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
271 )
272 )
273 )
274 {
275 VM_FF_SET(pVM, VM_FF_TIMER);
276 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
277#ifdef IN_RING3
278 REMR3NotifyTimerPending(pVM);
279 VMR3NotifyFF(pVM, true);
280#endif
281 }
282 }
283 else
284 u64 = pVM->tm.s.u64Virtual;
285 return u64;
286}
287
288
289/**
290 * Gets the current TMCLOCK_VIRTUAL time
291 *
292 * @returns The timestamp.
293 * @param pVM VM handle.
294 *
295 * @remark While the flow of time will never go backwards, the speed of the
296 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
297 * influenced by power saving (SpeedStep, PowerNow!), while the former
298 * makes use of TSC and kernel timers.
299 */
300TMDECL(uint64_t) TMVirtualGet(PVM pVM)
301{
302 return TMVirtualGetEx(pVM, true /* check timers */);
303}
304
305
306/**
307 * Gets the current TMCLOCK_VIRTUAL time
308 *
309 * @returns The timestamp.
310 * @param pVM VM handle.
311 * @param fCheckTimers Check timers or not
312 *
313 * @remark While the flow of time will never go backwards, the speed of the
314 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
315 * influenced by power saving (SpeedStep, PowerNow!), while the former
316 * makes use of TSC and kernel timers.
317 */
318TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
319{
320 return tmVirtualGet(pVM, fCheckTimers);
321}
322
323
324/**
325 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
326 *
327 * @returns The timestamp.
328 * @param pVM VM handle.
329 * @param fCheckTimers Check timers or not
330 * @thread EMT.
331 */
332TMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
333{
334 VM_ASSERT_EMT(pVM);
335
336 uint64_t u64;
337 if (pVM->tm.s.fVirtualSyncTicking)
338 {
339 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
340
341 /*
342 * Query the virtual clock and do the usual expired timer check.
343 */
344 Assert(pVM->tm.s.fVirtualTicking);
345 u64 = tmVirtualGetRaw(pVM);
346 if ( fCheckTimers
347 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
348 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
349 {
350 VM_FF_SET(pVM, VM_FF_TIMER);
351#ifdef IN_RING3
352 REMR3NotifyTimerPending(pVM);
353 VMR3NotifyFF(pVM, true);
354#endif
355 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
356 }
357
358 /*
359 * Read the offset and adjust if we're playing catch-up.
360 *
361 * The catch-up adjusting work by us decrementing the offset by a percentage of
362 * the time elapsed since the previous TMVirtualGetSync call.
363 *
364 * It's possible to get a very long or even negative interval between two read
365 * for the following reasons:
366 * - Someone might have suspended the process execution, frequently the case when
367 * debugging the process.
368 * - We might be on a different CPU which TSC isn't quite in sync with the
369 * other CPUs in the system.
370 * - Another thread is racing us and we might have been preemnted while inside
371 * this function.
372 *
373 * Assuming nano second virtual time, we can simply ignore any intervals which has
374 * any of the upper 32 bits set.
375 */
376 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
377 uint64_t off = pVM->tm.s.offVirtualSync;
378 if (pVM->tm.s.fVirtualSyncCatchUp)
379 {
380 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
381 uint64_t u64Delta = u64 - u64Prev;
382 if (RT_LIKELY(!(u64Delta >> 32)))
383 {
384 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
385 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
386 {
387 off -= u64Sub;
388 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
389 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
390 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
391 }
392 else
393 {
394 /* we've completely caught up. */
395 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
396 off = pVM->tm.s.offVirtualSyncGivenUp;
397 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
398 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
399 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
400 Log4(("TM: %RU64/0: caught up\n", u64));
401 }
402 }
403 else
404 {
405 /* More than 4 seconds since last time (or negative), ignore it. */
406 if (!(u64Delta & RT_BIT_64(63)))
407 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
408 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
409 }
410 }
411
412 /*
413 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
414 * approach is to never pass the head timer. So, when we do stop the clock and
415 * set the the timer pending flag.
416 */
417 u64 -= off;
418 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
419 if (u64 >= u64Expire)
420 {
421 u64 = u64Expire;
422 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
423 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
424 if ( fCheckTimers
425 && !VM_FF_ISSET(pVM, VM_FF_TIMER))
426 {
427 VM_FF_SET(pVM, VM_FF_TIMER);
428#ifdef IN_RING3
429 REMR3NotifyTimerPending(pVM);
430 VMR3NotifyFF(pVM, true);
431#endif
432 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
433 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
434 }
435 else
436 Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
437 }
438 }
439 else
440 u64 = pVM->tm.s.u64VirtualSync;
441 return u64;
442}
443
444
445/**
446 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
447 *
448 * @returns The timestamp.
449 * @param pVM VM handle.
450 * @thread EMT.
451 */
452TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
453{
454 return TMVirtualSyncGetEx(pVM, true /* check timers */);
455}
456
457
458/**
459 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
460 *
461 * @return The current lag.
462 * @param pVM VM handle.
463 */
464TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
465{
466 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
467}
468
469
470/**
471 * Get the current catch-up percent.
472 *
473 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
474 * @param pVM VM handle.
475 */
476TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
477{
478 if (pVM->tm.s.fVirtualSyncCatchUp)
479 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
480 return 0;
481}
482
483
484/**
485 * Gets the current TMCLOCK_VIRTUAL frequency.
486 *
487 * @returns The freqency.
488 * @param pVM VM handle.
489 */
490TMDECL(uint64_t) TMVirtualGetFreq(PVM pVM)
491{
492 return TMCLOCK_FREQ_VIRTUAL;
493}
494
495
496/**
497 * Resumes the virtual clock.
498 *
499 * @returns VINF_SUCCESS on success.
500 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
501 * @param pVM VM handle.
502 */
503TMDECL(int) TMVirtualResume(PVM pVM)
504{
505 if (!pVM->tm.s.fVirtualTicking)
506 {
507 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
508 pVM->tm.s.u64VirtualRawPrev = 0;
509 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
510 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
511 pVM->tm.s.fVirtualTicking = true;
512 pVM->tm.s.fVirtualSyncTicking = true;
513 return VINF_SUCCESS;
514 }
515
516 AssertFailed();
517 return VERR_INTERNAL_ERROR;
518}
519
520
521/**
522 * Pauses the virtual clock.
523 *
524 * @returns VINF_SUCCESS on success.
525 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
526 * @param pVM VM handle.
527 */
528TMDECL(int) TMVirtualPause(PVM pVM)
529{
530 if (pVM->tm.s.fVirtualTicking)
531 {
532 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
533 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
534 pVM->tm.s.fVirtualSyncTicking = false;
535 pVM->tm.s.fVirtualTicking = false;
536 return VINF_SUCCESS;
537 }
538
539 AssertFailed();
540 return VERR_INTERNAL_ERROR;
541}
542
543
544/**
545 * Gets the current warp drive percent.
546 *
547 * @returns The warp drive percent.
548 * @param pVM The VM handle.
549 */
550TMDECL(uint32_t) TMVirtualGetWarpDrive(PVM pVM)
551{
552 return pVM->tm.s.u32VirtualWarpDrivePercentage;
553}
554
555
556/**
557 * Sets the warp drive percent of the virtual time.
558 *
559 * @returns VBox status code.
560 * @param pVM The VM handle.
561 * @param u32Percent The new percentage. 100 means normal operation.
562 */
563TMDECL(int) TMVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
564{
565/** @todo This isn't a feature specific to virtual time, move to TM level. (It
566 * should affect the TMR3UCTNow as well! */
567#ifdef IN_RING3
568 PVMREQ pReq;
569 int rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)tmVirtualSetWarpDrive, 2, pVM, u32Percent);
570 if (VBOX_SUCCESS(rc))
571 rc = pReq->iStatus;
572 VMR3ReqFree(pReq);
573 return rc;
574#else
575
576 return tmVirtualSetWarpDrive(pVM, u32Percent);
577#endif
578}
579
580
581/**
582 * EMT worker for tmVirtualSetWarpDrive.
583 *
584 * @returns VBox status code.
585 * @param pVM The VM handle.
586 * @param u32Percent See TMVirtualSetWarpDrive().
587 * @internal
588 */
589static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
590{
591 /*
592 * Validate it.
593 */
594 AssertMsgReturn(u32Percent >= 2 && u32Percent <= 20000,
595 ("%RX32 is not between 2 and 20000 (inclusive).\n", u32Percent),
596 VERR_INVALID_PARAMETER);
597
598 /*
599 * If the time is running we'll have to pause it before we can change
600 * the warp drive settings.
601 */
602 bool fPaused = pVM->tm.s.fVirtualTicking;
603 if (fPaused)
604 {
605 int rc = TMVirtualPause(pVM);
606 AssertRCReturn(rc, rc);
607 rc = TMCpuTickPause(pVM);
608 AssertRCReturn(rc, rc);
609 }
610
611 pVM->tm.s.u32VirtualWarpDrivePercentage = u32Percent;
612 pVM->tm.s.fVirtualWarpDrive = u32Percent != 100;
613 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32 fVirtualWarpDrive=%RTbool\n",
614 pVM->tm.s.u32VirtualWarpDrivePercentage, pVM->tm.s.fVirtualWarpDrive));
615
616 if (fPaused)
617 {
618 int rc = TMVirtualResume(pVM);
619 AssertRCReturn(rc, rc);
620 rc = TMCpuTickResume(pVM);
621 AssertRCReturn(rc, rc);
622 }
623
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Converts from virtual ticks to nanoseconds.
630 *
631 * @returns nanoseconds.
632 * @param pVM The VM handle.
633 * @param u64VirtualTicks The virtual ticks to convert.
634 * @remark There could be rounding errors here. We just do a simple integere divide
635 * without any adjustments.
636 */
637TMDECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
638{
639 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
640 return u64VirtualTicks;
641}
642
643
644/**
645 * Converts from virtual ticks to microseconds.
646 *
647 * @returns microseconds.
648 * @param pVM The VM handle.
649 * @param u64VirtualTicks The virtual ticks to convert.
650 * @remark There could be rounding errors here. We just do a simple integere divide
651 * without any adjustments.
652 */
653TMDECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
654{
655 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
656 return u64VirtualTicks / 1000;
657}
658
659
660/**
661 * Converts from virtual ticks to milliseconds.
662 *
663 * @returns milliseconds.
664 * @param pVM The VM handle.
665 * @param u64VirtualTicks The virtual ticks to convert.
666 * @remark There could be rounding errors here. We just do a simple integere divide
667 * without any adjustments.
668 */
669TMDECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
670{
671 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
672 return u64VirtualTicks / 1000000;
673}
674
675
676/**
677 * Converts from nanoseconds to virtual ticks.
678 *
679 * @returns virtual ticks.
680 * @param pVM The VM handle.
681 * @param u64NanoTS The nanosecond value ticks to convert.
682 * @remark There could be rounding and overflow errors here.
683 */
684TMDECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
685{
686 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
687 return u64NanoTS;
688}
689
690
691/**
692 * Converts from microseconds to virtual ticks.
693 *
694 * @returns virtual ticks.
695 * @param pVM The VM handle.
696 * @param u64MicroTS The microsecond value ticks to convert.
697 * @remark There could be rounding and overflow errors here.
698 */
699TMDECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
700{
701 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
702 return u64MicroTS * 1000;
703}
704
705
706/**
707 * Converts from milliseconds to virtual ticks.
708 *
709 * @returns virtual ticks.
710 * @param pVM The VM handle.
711 * @param u64MilliTS The millisecond value ticks to convert.
712 * @remark There could be rounding and overflow errors here.
713 */
714TMDECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
715{
716 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
717 return u64MilliTS * 1000000;
718}
719
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette