VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 23794

Last change on this file since 23794 was 22890, checked in by vboxsync, 15 years ago

VM::cCPUs -> VM::cCpus so it matches all the other cCpus and aCpus members.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.1 KB
Line 
1/* $Id: TMAllVirtual.cpp 22890 2009-09-09 23:11:31Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#ifdef IN_RING3
29# include <VBox/rem.h>
30# include <iprt/thread.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/vmm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/sup.h>
38
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42
43
44
45/**
46 * Helper function that's used by the assembly routines when something goes bust.
47 *
48 * @param pData Pointer to the data structure.
49 * @param u64NanoTS The calculated nano ts.
50 * @param u64DeltaPrev The delta relative to the previously returned timestamp.
51 * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
52 */
53DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
54{
55 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
56 pData->cBadPrev++;
57 if ((int64_t)u64DeltaPrev < 0)
58 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
59 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
60 else
61 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
62 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
63}
64
65
66/**
67 * Called the first time somebody asks for the time or when the GIP
68 * is mapped/unmapped.
69 *
70 * This should never ever happen.
71 */
72DECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
73{
74 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
75 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
76 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
77}
78
79
80#if 1
81
82/**
83 * Wrapper around the IPRT GIP time methods.
84 */
85DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
86{
87#ifdef IN_RING3
88 return CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
89# else /* !IN_RING3 */
90 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
91 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
92 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
93 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
94 return u64;
95# endif /* !IN_RING3 */
96}
97
98#else
99
100/**
101 * This is (mostly) the same as rtTimeNanoTSInternal() except
102 * for the two globals which live in TM.
103 *
104 * @returns Nanosecond timestamp.
105 * @param pVM The VM handle.
106 */
107static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
108{
109 uint64_t u64Delta;
110 uint32_t u32NanoTSFactor0;
111 uint64_t u64TSC;
112 uint64_t u64NanoTS;
113 uint32_t u32UpdateIntervalTSC;
114 uint64_t u64PrevNanoTS;
115
116 /*
117 * Read the GIP data and the previous value.
118 */
119 for (;;)
120 {
121 uint32_t u32TransactionId;
122 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
123#ifdef IN_RING3
124 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
125 return RTTimeSystemNanoTS();
126#endif
127
128 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
129 {
130 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
131#ifdef RT_OS_L4
132 Assert((u32TransactionId & 1) == 0);
133#endif
134 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
135 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
136 u64TSC = pGip->aCPUs[0].u64TSC;
137 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
138 u64Delta = ASMReadTSC();
139 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
140 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
141 || (u32TransactionId & 1)))
142 continue;
143 }
144 else
145 {
146 /* SUPGIPMODE_ASYNC_TSC */
147 PSUPGIPCPU pGipCpu;
148
149 uint8_t u8ApicId = ASMGetApicId();
150 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
151 pGipCpu = &pGip->aCPUs[u8ApicId];
152 else
153 {
154 AssertMsgFailed(("%x\n", u8ApicId));
155 pGipCpu = &pGip->aCPUs[0];
156 }
157
158 u32TransactionId = pGipCpu->u32TransactionId;
159#ifdef RT_OS_L4
160 Assert((u32TransactionId & 1) == 0);
161#endif
162 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
163 u64NanoTS = pGipCpu->u64NanoTS;
164 u64TSC = pGipCpu->u64TSC;
165 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
166 u64Delta = ASMReadTSC();
167 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
168#ifdef IN_RC
169 Assert(!(ASMGetFlags() & X86_EFL_IF));
170#else
171 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
172 continue;
173 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
174 || (u32TransactionId & 1)))
175 continue;
176#endif
177 }
178 break;
179 }
180
181 /*
182 * Calc NanoTS delta.
183 */
184 u64Delta -= u64TSC;
185 if (u64Delta > u32UpdateIntervalTSC)
186 {
187 /*
188 * We've expired the interval, cap it. If we're here for the 2nd
189 * time without any GIP update inbetween, the checks against
190 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
191 */
192 u64Delta = u32UpdateIntervalTSC;
193 }
194#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
195 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
196 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
197#else
198 __asm
199 {
200 mov eax, dword ptr [u64Delta]
201 mul dword ptr [u32NanoTSFactor0]
202 div dword ptr [u32UpdateIntervalTSC]
203 mov dword ptr [u64Delta], eax
204 xor edx, edx
205 mov dword ptr [u64Delta + 4], edx
206 }
207#endif
208
209 /*
210 * Calculate the time and compare it with the previously returned value.
211 *
212 * Since this function is called *very* frequently when the VM is running
213 * and then mostly on EMT, we can restrict the valid range of the delta
214 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
215 */
216 u64NanoTS += u64Delta;
217 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
218 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
219 /* frequent - less than 1s since last call. */;
220 else if ( (int64_t)u64DeltaPrev < 0
221 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
222 {
223 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
224 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
225 u64NanoTS = u64PrevNanoTS + 1;
226#ifndef IN_RING3
227 VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
228#endif
229 }
230 else if (u64PrevNanoTS)
231 {
232 /* Something has gone bust, if negative offset it's real bad. */
233 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
234 if ((int64_t)u64DeltaPrev < 0)
235 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
236 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
237 else
238 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
239 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
240#ifdef DEBUG_bird
241 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
242 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
243 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
244 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
245#endif
246 }
247 /* else: We're resuming (see TMVirtualResume). */
248 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
249 return u64NanoTS;
250
251 /*
252 * Attempt updating the previous value, provided we're still ahead of it.
253 *
254 * There is no point in recalculating u64NanoTS because we got preemted or if
255 * we raced somebody while the GIP was updated, since these are events
256 * that might occure at any point in the return path as well.
257 */
258 for (int cTries = 50;;)
259 {
260 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
261 if (u64PrevNanoTS >= u64NanoTS)
262 break;
263 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
264 break;
265 AssertBreak(--cTries <= 0);
266 if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
267 break;
268 }
269
270 return u64NanoTS;
271}
272
273#endif
274
275
276/**
277 * Get the time when we're not running at 100%
278 *
279 * @returns The timestamp.
280 * @param pVM The VM handle.
281 */
282static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
283{
284 /*
285 * Recalculate the RTTimeNanoTS() value for the period where
286 * warp drive has been enabled.
287 */
288 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
289 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
290 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
291 u64 /= 100;
292 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
293
294 /*
295 * Now we apply the virtual time offset.
296 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
297 * machine started if it had been running continuously without any suspends.)
298 */
299 u64 -= pVM->tm.s.u64VirtualOffset;
300 return u64;
301}
302
303
304/**
305 * Get the raw virtual time.
306 *
307 * @returns The current time stamp.
308 * @param pVM The VM handle.
309 */
310DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
311{
312 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
313 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
314 return tmVirtualGetRawNonNormal(pVM);
315}
316
317
318/**
319 * Inlined version of tmVirtualGetEx.
320 */
321DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
322{
323 uint64_t u64;
324 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
325 {
326 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
327 u64 = tmVirtualGetRaw(pVM);
328
329 /*
330 * Use the chance to check for expired timers.
331 */
332 if (fCheckTimers)
333 {
334 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
335 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
336 && !pVM->tm.s.fRunningQueues
337 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
338 || ( pVM->tm.s.fVirtualSyncTicking
339 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
340 )
341 )
342 && !pVM->tm.s.fRunningQueues
343 )
344 {
345 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
346 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
347 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
348#ifdef IN_RING3
349 REMR3NotifyTimerPending(pVM, pVCpuDst);
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 }
353 }
354 }
355 else
356 u64 = pVM->tm.s.u64Virtual;
357 return u64;
358}
359
360
361/**
362 * Gets the current TMCLOCK_VIRTUAL time
363 *
364 * @returns The timestamp.
365 * @param pVM VM handle.
366 *
367 * @remark While the flow of time will never go backwards, the speed of the
368 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
369 * influenced by power saving (SpeedStep, PowerNow!), while the former
370 * makes use of TSC and kernel timers.
371 */
372VMM_INT_DECL(uint64_t) TMVirtualGet(PVM pVM)
373{
374 return tmVirtualGet(pVM, true /* check timers */);
375}
376
377
378/**
379 * Gets the current TMCLOCK_VIRTUAL time without checking
380 * timers or anything.
381 *
382 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
383 *
384 * @returns The timestamp.
385 * @param pVM VM handle.
386 *
387 * @remarks See TMVirtualGet.
388 */
389VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
390{
391 return tmVirtualGet(pVM, false /*fCheckTimers*/);
392}
393
394
395/**
396 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
397 *
398 * @returns The timestamp.
399 * @param pVM VM handle.
400 * @param u64 raw virtual time.
401 * @param off offVirtualSync.
402 */
403DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off)
404{
405 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
406
407 /*
408 * Don't make updates untill
409 */
410 bool fUpdatePrev = true;
411 bool fUpdateOff = true;
412 bool fStop = false;
413 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
414 uint64_t u64Delta = u64 - u64Prev;
415 if (RT_LIKELY(!(u64Delta >> 32)))
416 {
417 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
418 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
419 {
420 off -= u64Sub;
421 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
422 }
423 else
424 {
425 /* we've completely caught up. */
426 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
427 off = pVM->tm.s.offVirtualSyncGivenUp;
428 fStop = true;
429 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
430 }
431 }
432 else
433 {
434 /* More than 4 seconds since last time (or negative), ignore it. */
435 fUpdateOff = false;
436 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
437 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
438 }
439
440 /*
441 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
442 * approach is to never pass the head timer. So, when we do stop the clock and
443 * set the timer pending flag.
444 */
445 u64 -= off;
446 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
447 if (u64 < u64Expire)
448 {
449 if (fUpdateOff)
450 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
451 if (fStop)
452 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
453 if (fUpdatePrev)
454 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
455 tmVirtualSyncUnlock(pVM);
456 }
457 else
458 {
459 u64 = u64Expire;
460 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
461 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
462
463 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
464 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
465 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
466 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
467 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
468 tmVirtualSyncUnlock(pVM);
469
470#ifdef IN_RING3
471 REMR3NotifyTimerPending(pVM, pVCpuDst);
472 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
473#endif
474 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
475 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
476 }
477 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
478
479 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
480 return u64;
481}
482
483
484/**
485 * tmVirtualSyncGetEx worker for when we get the lock.
486 *
487 * @returns timesamp.
488 * @param pVM The VM handle.
489 * @param u64 The virtual clock timestamp.
490 */
491DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64)
492{
493 /*
494 * Not ticking?
495 */
496 if (!pVM->tm.s.fVirtualSyncTicking)
497 {
498 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
499 tmVirtualSyncUnlock(pVM);
500 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
501 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
502 return u64;
503 }
504
505 /*
506 * Handle catch up in a separate function.
507 */
508 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
509 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
510 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off);
511
512 /*
513 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
514 * approach is to never pass the head timer. So, when we do stop the clock and
515 * set the timer pending flag.
516 */
517 u64 -= off;
518 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
519 if (u64 < u64Expire)
520 tmVirtualSyncUnlock(pVM);
521 else
522 {
523 u64 = u64Expire;
524 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
525 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
526
527 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
528 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
529 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
530 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
531 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
532 tmVirtualSyncUnlock(pVM);
533
534#ifdef IN_RING3
535 REMR3NotifyTimerPending(pVM, pVCpuDst);
536 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
537#endif
538 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
540 }
541 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
542 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
543 return u64;
544}
545
546
547/**
548 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
549 *
550 * @returns The timestamp.
551 * @param pVM VM handle.
552 * @param fCheckTimers Check timers or not
553 * @thread EMT.
554 */
555DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
556{
557 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
558
559 if (!pVM->tm.s.fVirtualSyncTicking)
560 return pVM->tm.s.u64VirtualSync;
561
562 /*
563 * Query the virtual clock and do the usual expired timer check.
564 */
565 Assert(pVM->tm.s.cVirtualTicking);
566 uint64_t u64 = tmVirtualGetRaw(pVM);
567 if (fCheckTimers)
568 {
569 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
570 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
571 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
572 {
573 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
574 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
575#ifdef IN_RING3
576 REMR3NotifyTimerPending(pVM, pVCpuDst);
577 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
578#endif
579 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
580 }
581 }
582
583 /*
584 * When the clock is ticking, not doing catch ups and not running into an
585 * expired time, we can get away without locking. Try this first.
586 */
587 uint64_t off;
588 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
589 {
590 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
591 {
592 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
593 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
594 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
595 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
596 {
597 off = u64 - off;
598 if (off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))
599 {
600 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
601 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
602 return off;
603 }
604 }
605 }
606 }
607 else
608 {
609 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
610 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
611 {
612 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
613 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
614 return off;
615 }
616 }
617
618 /*
619 * Read the offset and adjust if we're playing catch-up.
620 *
621 * The catch-up adjusting work by us decrementing the offset by a percentage of
622 * the time elapsed since the previous TMVirtualGetSync call.
623 *
624 * It's possible to get a very long or even negative interval between two read
625 * for the following reasons:
626 * - Someone might have suspended the process execution, frequently the case when
627 * debugging the process.
628 * - We might be on a different CPU which TSC isn't quite in sync with the
629 * other CPUs in the system.
630 * - Another thread is racing us and we might have been preemnted while inside
631 * this function.
632 *
633 * Assuming nano second virtual time, we can simply ignore any intervals which has
634 * any of the upper 32 bits set.
635 */
636 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
637 int cOuterTries = 42;
638 for (;; cOuterTries--)
639 {
640 /* Try grab the lock, things get simpler when owning the lock. */
641 int rcLock = tmVirtualSyncTryLock(pVM);
642 if (RT_SUCCESS_NP(rcLock))
643 return tmVirtualSyncGetLocked(pVM, u64);
644
645 /* Re-check the ticking flag. */
646 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
647 {
648 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
649 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
650 && cOuterTries > 0)
651 continue;
652 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
653 return off;
654 }
655
656 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
657 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
658 {
659 /* No changes allowed, try get a consistent set of parameters. */
660 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
661 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
662 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
663 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
664 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
665 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
666 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
667 || cOuterTries <= 0)
668 {
669 uint64_t u64Delta = u64 - u64Prev;
670 if (RT_LIKELY(!(u64Delta >> 32)))
671 {
672 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
673 if (off > u64Sub + offGivenUp)
674 {
675 off -= u64Sub;
676 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
677 }
678 else
679 {
680 /* we've completely caught up. */
681 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
682 off = offGivenUp;
683 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
684 }
685 }
686 else
687 /* More than 4 seconds since last time (or negative), ignore it. */
688 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
689
690 /* Check that we're still running and in catch up. */
691 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
692 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
693 break;
694 if (cOuterTries <= 0)
695 break; /* enough */
696 }
697 }
698 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
699 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
700 break; /* Got an consistent offset */
701 else if (cOuterTries <= 0)
702 break; /* enough */
703 }
704 if (cOuterTries <= 0)
705 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
706
707 /*
708 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
709 * approach is to never pass the head timer. So, when we do stop the clock and
710 * set the timer pending flag.
711 */
712 u64 -= off;
713 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
714 if (u64 >= u64Expire)
715 {
716 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
717 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
718 {
719 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
720 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
721 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
722#ifdef IN_RING3
723 REMR3NotifyTimerPending(pVM, pVCpuDst);
724 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
725#endif
726 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
727 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
728 }
729 else
730 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
731 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
732 }
733
734 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
735 return u64;
736}
737
738
739/**
740 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
741 *
742 * @returns The timestamp.
743 * @param pVM VM handle.
744 * @thread EMT.
745 * @remarks May set the timer and virtual sync FFs.
746 */
747VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVM pVM)
748{
749 return tmVirtualSyncGetEx(pVM, true /* check timers */);
750}
751
752
753/**
754 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
755 * TMCLOCK_VIRTUAL.
756 *
757 * @returns The timestamp.
758 * @param pVM VM handle.
759 * @thread EMT.
760 * @remarks May set the timer and virtual sync FFs.
761 */
762VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
763{
764 return tmVirtualSyncGetEx(pVM, false /* check timers */);
765}
766
767
768/**
769 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
770 *
771 * @returns The timestamp.
772 * @param pVM VM handle.
773 * @param fCheckTimers Check timers on the virtual clock or not.
774 * @thread EMT.
775 * @remarks May set the timer and virtual sync FFs.
776 */
777VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
778{
779 return tmVirtualSyncGetEx(pVM, fCheckTimers);
780}
781
782
783/**
784 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
785 *
786 * @return The current lag.
787 * @param pVM VM handle.
788 */
789VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
790{
791 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
792}
793
794
795/**
796 * Get the current catch-up percent.
797 *
798 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
799 * @param pVM VM handle.
800 */
801VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
802{
803 if (pVM->tm.s.fVirtualSyncCatchUp)
804 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
805 return 0;
806}
807
808
809/**
810 * Gets the current TMCLOCK_VIRTUAL frequency.
811 *
812 * @returns The freqency.
813 * @param pVM VM handle.
814 */
815VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
816{
817 return TMCLOCK_FREQ_VIRTUAL;
818}
819
820
821/**
822 * Worker for TMR3PauseClocks.
823 *
824 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
825 * @param pVM The VM handle.
826 */
827int tmVirtualPauseLocked(PVM pVM)
828{
829 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
830 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
831 if (c == 0)
832 {
833 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
834 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
835 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
836 }
837 return VINF_SUCCESS;
838}
839
840
841/**
842 * Worker for TMR3ResumeClocks.
843 *
844 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
845 * @param pVM The VM handle.
846 */
847int tmVirtualResumeLocked(PVM pVM)
848{
849 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
850 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
851 if (c == 1)
852 {
853 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
854 pVM->tm.s.u64VirtualRawPrev = 0;
855 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
856 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
857 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
858 }
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Converts from virtual ticks to nanoseconds.
865 *
866 * @returns nanoseconds.
867 * @param pVM The VM handle.
868 * @param u64VirtualTicks The virtual ticks to convert.
869 * @remark There could be rounding errors here. We just do a simple integere divide
870 * without any adjustments.
871 */
872VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
873{
874 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
875 return u64VirtualTicks;
876}
877
878
879/**
880 * Converts from virtual ticks to microseconds.
881 *
882 * @returns microseconds.
883 * @param pVM The VM handle.
884 * @param u64VirtualTicks The virtual ticks to convert.
885 * @remark There could be rounding errors here. We just do a simple integere divide
886 * without any adjustments.
887 */
888VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
889{
890 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
891 return u64VirtualTicks / 1000;
892}
893
894
895/**
896 * Converts from virtual ticks to milliseconds.
897 *
898 * @returns milliseconds.
899 * @param pVM The VM handle.
900 * @param u64VirtualTicks The virtual ticks to convert.
901 * @remark There could be rounding errors here. We just do a simple integere divide
902 * without any adjustments.
903 */
904VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
905{
906 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
907 return u64VirtualTicks / 1000000;
908}
909
910
911/**
912 * Converts from nanoseconds to virtual ticks.
913 *
914 * @returns virtual ticks.
915 * @param pVM The VM handle.
916 * @param u64NanoTS The nanosecond value ticks to convert.
917 * @remark There could be rounding and overflow errors here.
918 */
919VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
920{
921 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
922 return u64NanoTS;
923}
924
925
926/**
927 * Converts from microseconds to virtual ticks.
928 *
929 * @returns virtual ticks.
930 * @param pVM The VM handle.
931 * @param u64MicroTS The microsecond value ticks to convert.
932 * @remark There could be rounding and overflow errors here.
933 */
934VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
935{
936 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
937 return u64MicroTS * 1000;
938}
939
940
941/**
942 * Converts from milliseconds to virtual ticks.
943 *
944 * @returns virtual ticks.
945 * @param pVM The VM handle.
946 * @param u64MilliTS The millisecond value ticks to convert.
947 * @remark There could be rounding and overflow errors here.
948 */
949VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
950{
951 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
952 return u64MilliTS * 1000000;
953}
954
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette