VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 81150

Last change on this file since 81150 was 81150, checked in by vboxsync, 5 years ago

VMM,/Makefile.kmk: Kicked out more recompiler related code. bugref:9576

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.4 KB
Line 
1/* $Id: TMAllVirtual.cpp 81150 2019-10-08 12:53:47Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59/**
60 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
61 *
62 * This is the initial worker, so the first call in each context ends up here.
63 * It is also used should the delta rating of the host CPUs change or if the
64 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
65 * last two events may occur as CPUs are taken online.
66 */
67DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
68{
69 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
70
71 /*
72 * We require a valid GIP for the selection below. Invalid GIP is fatal.
73 */
74 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
75 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
76 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
77 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
78 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
79
80 /*
81 * Determine the new worker.
82 */
83 PFNTIMENANOTSINTERNAL pfnWorker;
84 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
85 switch (pGip->u32Mode)
86 {
87 case SUPGIPMODE_SYNC_TSC:
88 case SUPGIPMODE_INVARIANT_TSC:
89#ifdef IN_RING0
90 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
91 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
92 else
93 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
94#else
95 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
96 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
97 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
98 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
99 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
100 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
101 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
102 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
103 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
104 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
105 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
106 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
107 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
108 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
109 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
110 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
111 else
112 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
113 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
114 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
115#endif
116 break;
117
118 case SUPGIPMODE_ASYNC_TSC:
119#ifdef IN_RING0
120 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
121#else
122 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
123 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
124 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
125 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
126 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
127 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
128 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
129 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
130 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
131 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
132 else
133 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
134#endif
135 break;
136
137 default:
138 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
139 }
140
141 /*
142 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
143 */
144 ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
145 return pfnWorker(pData);
146}
147
148
149/**
150 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
151 */
152DECLEXPORT(uint64_t) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
153{
154 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
155 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x\n", pVM, idApic, iCpuSet, iGipCpu));
156#ifndef _MSC_VER
157 return UINT64_MAX;
158#endif
159}
160
161
162/**
163 * Wrapper around the IPRT GIP time methods.
164 */
165DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
166{
167# ifdef IN_RING3
168 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
169# else /* !IN_RING3 */
170 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
171 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
172 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
173 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
174# endif /* !IN_RING3 */
175 /*DBGFTRACE_POS_U64(pVM, u64);*/
176 return u64;
177}
178
179
180/**
181 * Get the time when we're not running at 100%
182 *
183 * @returns The timestamp.
184 * @param pVM The cross context VM structure.
185 */
186static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM)
187{
188 /*
189 * Recalculate the RTTimeNanoTS() value for the period where
190 * warp drive has been enabled.
191 */
192 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
193 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
194 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
195 u64 /= 100;
196 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
197
198 /*
199 * Now we apply the virtual time offset.
200 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
201 * machine started if it had been running continuously without any suspends.)
202 */
203 u64 -= pVM->tm.s.u64VirtualOffset;
204 return u64;
205}
206
207
208/**
209 * Get the raw virtual time.
210 *
211 * @returns The current time stamp.
212 * @param pVM The cross context VM structure.
213 */
214DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
215{
216 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
217 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
218 return tmVirtualGetRawNonNormal(pVM);
219}
220
221
222/**
223 * Inlined version of tmVirtualGetEx.
224 */
225DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
226{
227 uint64_t u64;
228 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
229 {
230 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
231 u64 = tmVirtualGetRaw(pVM);
232
233 /*
234 * Use the chance to check for expired timers.
235 */
236 if (fCheckTimers)
237 {
238 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
239 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
240 && !pVM->tm.s.fRunningQueues
241 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
242 || ( pVM->tm.s.fVirtualSyncTicking
243 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
244 )
245 )
246 && !pVM->tm.s.fRunningQueues
247 )
248 {
249 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
250 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
251 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
252#ifdef IN_RING3
253# ifdef VBOX_WITH_REM
254 REMR3NotifyTimerPending(pVM, pVCpuDst);
255# endif
256 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
257#endif
258 }
259 }
260 }
261 else
262 u64 = pVM->tm.s.u64Virtual;
263 return u64;
264}
265
266
267/**
268 * Gets the current TMCLOCK_VIRTUAL time
269 *
270 * @returns The timestamp.
271 * @param pVM The cross context VM structure.
272 *
273 * @remark While the flow of time will never go backwards, the speed of the
274 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
275 * influenced by power saving (SpeedStep, PowerNow!), while the former
276 * makes use of TSC and kernel timers.
277 */
278VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
279{
280 return tmVirtualGet(pVM, true /*fCheckTimers*/);
281}
282
283
284/**
285 * Gets the current TMCLOCK_VIRTUAL time without checking
286 * timers or anything.
287 *
288 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
289 *
290 * @returns The timestamp.
291 * @param pVM The cross context VM structure.
292 *
293 * @remarks See TMVirtualGet.
294 */
295VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
296{
297 return tmVirtualGet(pVM, false /*fCheckTimers*/);
298}
299
300
301/**
302 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
303 *
304 * @returns Host nano second count.
305 * @param pVM The cross context VM structure.
306 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
307 */
308DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
309{
310 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
311 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
312 return cVirtTicksToDeadline;
313}
314
315
316/**
317 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
318 *
319 * @returns The timestamp.
320 * @param pVM The cross context VM structure.
321 * @param u64 raw virtual time.
322 * @param off offVirtualSync.
323 * @param pcNsToDeadline Where to return the number of nano seconds to
324 * the next virtual sync timer deadline. Can be
325 * NULL.
326 */
327DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
328{
329 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
330
331 /*
332 * Don't make updates until we've check the timer queue.
333 */
334 bool fUpdatePrev = true;
335 bool fUpdateOff = true;
336 bool fStop = false;
337 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
338 uint64_t u64Delta = u64 - u64Prev;
339 if (RT_LIKELY(!(u64Delta >> 32)))
340 {
341 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
342 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
343 {
344 off -= u64Sub;
345 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
346 }
347 else
348 {
349 /* we've completely caught up. */
350 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
351 off = pVM->tm.s.offVirtualSyncGivenUp;
352 fStop = true;
353 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
354 }
355 }
356 else
357 {
358 /* More than 4 seconds since last time (or negative), ignore it. */
359 fUpdateOff = false;
360 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
361 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
362 }
363
364 /*
365 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
366 * approach is to never pass the head timer. So, when we do stop the clock and
367 * set the timer pending flag.
368 */
369 u64 -= off;
370
371 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
372 if (u64Last > u64)
373 {
374 u64 = u64Last + 1;
375 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
376 }
377
378 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
379 if (u64 < u64Expire)
380 {
381 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
382 if (fUpdateOff)
383 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
384 if (fStop)
385 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
386 if (fUpdatePrev)
387 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
388 if (pcNsToDeadline)
389 {
390 uint64_t cNsToDeadline = u64Expire - u64;
391 if (pVM->tm.s.fVirtualSyncCatchUp)
392 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
393 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
394 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
395 }
396 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
397 }
398 else
399 {
400 u64 = u64Expire;
401 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
402 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
403
404 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
405 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
406 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
407 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
408 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
409 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
410
411 if (pcNsToDeadline)
412 *pcNsToDeadline = 0;
413#ifdef IN_RING3
414# ifdef VBOX_WITH_REM
415 REMR3NotifyTimerPending(pVM, pVCpuDst);
416# endif
417 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
418#endif
419 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
420 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
421 }
422 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
423
424 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
425 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
426 return u64;
427}
428
429
430/**
431 * tmVirtualSyncGetEx worker for when we get the lock.
432 *
433 * @returns timesamp.
434 * @param pVM The cross context VM structure.
435 * @param u64 The virtual clock timestamp.
436 * @param pcNsToDeadline Where to return the number of nano seconds to
437 * the next virtual sync timer deadline. Can be
438 * NULL.
439 */
440DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline)
441{
442 /*
443 * Not ticking?
444 */
445 if (!pVM->tm.s.fVirtualSyncTicking)
446 {
447 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
448 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
449 if (pcNsToDeadline)
450 *pcNsToDeadline = 0;
451 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
452 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
453 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
454 return u64;
455 }
456
457 /*
458 * Handle catch up in a separate function.
459 */
460 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
461 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
462 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
463
464 /*
465 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
466 * approach is to never pass the head timer. So, when we do stop the clock and
467 * set the timer pending flag.
468 */
469 u64 -= off;
470
471 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
472 if (u64Last > u64)
473 {
474 u64 = u64Last + 1;
475 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
476 }
477
478 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
479 if (u64 < u64Expire)
480 {
481 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
482 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
483 if (pcNsToDeadline)
484 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
485 }
486 else
487 {
488 u64 = u64Expire;
489 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
490 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
491
492 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
493 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
494 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
495 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
496 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
497 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
498
499#ifdef IN_RING3
500# ifdef VBOX_WITH_REM
501 REMR3NotifyTimerPending(pVM, pVCpuDst);
502# endif
503 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
504#endif
505 if (pcNsToDeadline)
506 *pcNsToDeadline = 0;
507 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
508 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
509 }
510 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
511 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
512 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
513 return u64;
514}
515
516
517/**
518 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
519 *
520 * @returns The timestamp.
521 * @param pVM The cross context VM structure.
522 * @param fCheckTimers Check timers or not
523 * @param pcNsToDeadline Where to return the number of nano seconds to
524 * the next virtual sync timer deadline. Can be
525 * NULL.
526 * @thread EMT.
527 */
528DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
529{
530 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
531
532 uint64_t u64;
533 if (!pVM->tm.s.fVirtualSyncTicking)
534 {
535 if (pcNsToDeadline)
536 *pcNsToDeadline = 0;
537 u64 = pVM->tm.s.u64VirtualSync;
538 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
539 return u64;
540 }
541
542 /*
543 * Query the virtual clock and do the usual expired timer check.
544 */
545 Assert(pVM->tm.s.cVirtualTicking);
546 u64 = tmVirtualGetRaw(pVM);
547 if (fCheckTimers)
548 {
549 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
550 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
551 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
552 {
553 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
554 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
555#ifdef IN_RING3
556# ifdef VBOX_WITH_REM
557 REMR3NotifyTimerPending(pVM, pVCpuDst);
558# endif
559 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
560#endif
561 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
562 }
563 }
564
565 /*
566 * If we can get the lock, get it. The result is much more reliable.
567 *
568 * Note! This is where all clock source devices branch off because they
569 * will be owning the lock already. The 'else' is taken by code
570 * which is less picky or hasn't been adjusted yet
571 */
572 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
573 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
574
575 /*
576 * When the clock is ticking, not doing catch ups and not running into an
577 * expired time, we can get away without locking. Try this first.
578 */
579 uint64_t off;
580 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
581 {
582 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
583 {
584 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
585 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
586 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
587 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
588 {
589 off = u64 - off;
590 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
591 if (off < u64Expire)
592 {
593 if (pcNsToDeadline)
594 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
595 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
596 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
597 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
598 return off;
599 }
600 }
601 }
602 }
603 else
604 {
605 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
606 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
607 {
608 if (pcNsToDeadline)
609 *pcNsToDeadline = 0;
610 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
611 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
612 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
613 return off;
614 }
615 }
616
617 /*
618 * Read the offset and adjust if we're playing catch-up.
619 *
620 * The catch-up adjusting work by us decrementing the offset by a percentage of
621 * the time elapsed since the previous TMVirtualGetSync call.
622 *
623 * It's possible to get a very long or even negative interval between two read
624 * for the following reasons:
625 * - Someone might have suspended the process execution, frequently the case when
626 * debugging the process.
627 * - We might be on a different CPU which TSC isn't quite in sync with the
628 * other CPUs in the system.
629 * - Another thread is racing us and we might have been preempted while inside
630 * this function.
631 *
632 * Assuming nano second virtual time, we can simply ignore any intervals which has
633 * any of the upper 32 bits set.
634 */
635 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
636 int cOuterTries = 42;
637 for (;; cOuterTries--)
638 {
639 /* Try grab the lock, things get simpler when owning the lock. */
640 int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
641 if (RT_SUCCESS_NP(rcLock))
642 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
643
644 /* Re-check the ticking flag. */
645 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
646 {
647 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
648 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
649 && cOuterTries > 0)
650 continue;
651 if (pcNsToDeadline)
652 *pcNsToDeadline = 0;
653 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
654 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
655 return off;
656 }
657
658 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
659 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
660 {
661 /* No changes allowed, try get a consistent set of parameters. */
662 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
663 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
664 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
665 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
666 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
667 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
668 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
669 || cOuterTries <= 0)
670 {
671 uint64_t u64Delta = u64 - u64Prev;
672 if (RT_LIKELY(!(u64Delta >> 32)))
673 {
674 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
675 if (off > u64Sub + offGivenUp)
676 {
677 off -= u64Sub;
678 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
679 }
680 else
681 {
682 /* we've completely caught up. */
683 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
684 off = offGivenUp;
685 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
686 }
687 }
688 else
689 /* More than 4 seconds since last time (or negative), ignore it. */
690 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
691
692 /* Check that we're still running and in catch up. */
693 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
694 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
695 break;
696 if (cOuterTries <= 0)
697 break; /* enough */
698 }
699 }
700 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
701 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
702 break; /* Got an consistent offset */
703 else if (cOuterTries <= 0)
704 break; /* enough */
705 }
706 if (cOuterTries <= 0)
707 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
708
709 /*
710 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
711 * approach is to never pass the head timer. So, when we do stop the clock and
712 * set the timer pending flag.
713 */
714 u64 -= off;
715/** @todo u64VirtualSyncLast */
716 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
717 if (u64 >= u64Expire)
718 {
719 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
720 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
721 {
722 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
723 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
724 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
725#ifdef IN_RING3
726# ifdef VBOX_WITH_REM
727 REMR3NotifyTimerPending(pVM, pVCpuDst);
728# endif
729 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
730#endif
731 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
732 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
733 }
734 else
735 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
736 if (pcNsToDeadline)
737 *pcNsToDeadline = 0;
738 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
739 }
740 else if (pcNsToDeadline)
741 {
742 uint64_t cNsToDeadline = u64Expire - u64;
743 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
744 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
745 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
746 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
747 }
748
749 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
750 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
751 return u64;
752}
753
754
755/**
756 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
757 *
758 * @returns The timestamp.
759 * @param pVM The cross context VM structure.
760 * @thread EMT.
761 * @remarks May set the timer and virtual sync FFs.
762 */
763VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
764{
765 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
766}
767
768
769/**
770 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
771 * TMCLOCK_VIRTUAL.
772 *
773 * @returns The timestamp.
774 * @param pVM The cross context VM structure.
775 * @thread EMT.
776 * @remarks May set the timer and virtual sync FFs.
777 */
778VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
779{
780 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
781}
782
783
784/**
785 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
786 *
787 * @returns The timestamp.
788 * @param pVM The cross context VM structure.
789 * @param fCheckTimers Check timers on the virtual clock or not.
790 * @thread EMT.
791 * @remarks May set the timer and virtual sync FFs.
792 */
793VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
794{
795 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
796}
797
798
799/**
800 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
801 * without checking timers running on TMCLOCK_VIRTUAL.
802 *
803 * @returns The timestamp.
804 * @param pVM The cross context VM structure.
805 * @param pcNsToDeadline Where to return the number of nano seconds to
806 * the next virtual sync timer deadline.
807 * @thread EMT.
808 * @remarks May set the timer and virtual sync FFs.
809 */
810VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline)
811{
812 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
813 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
814 *pcNsToDeadline = cNsToDeadlineTmp;
815 return u64Now;
816}
817
818
819/**
820 * Gets the number of nano seconds to the next virtual sync deadline.
821 *
822 * @returns The number of TMCLOCK_VIRTUAL ticks.
823 * @param pVM The cross context VM structure.
824 * @thread EMT.
825 * @remarks May set the timer and virtual sync FFs.
826 */
827VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM)
828{
829 uint64_t cNsToDeadline;
830 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
831 return cNsToDeadline;
832}
833
834
835/**
836 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
837 *
838 * @return The current lag.
839 * @param pVM The cross context VM structure.
840 */
841VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
842{
843 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
844}
845
846
847/**
848 * Get the current catch-up percent.
849 *
850 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
851 * @param pVM The cross context VM structure.
852 */
853VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
854{
855 if (pVM->tm.s.fVirtualSyncCatchUp)
856 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
857 return 0;
858}
859
860
861/**
862 * Gets the current TMCLOCK_VIRTUAL frequency.
863 *
864 * @returns The frequency.
865 * @param pVM The cross context VM structure.
866 */
867VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
868{
869 NOREF(pVM);
870 return TMCLOCK_FREQ_VIRTUAL;
871}
872
873
874/**
875 * Worker for TMR3PauseClocks.
876 *
877 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
878 * @param pVM The cross context VM structure.
879 */
880int tmVirtualPauseLocked(PVMCC pVM)
881{
882 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
883 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
884 if (c == 0)
885 {
886 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
887 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
888 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
889 }
890 return VINF_SUCCESS;
891}
892
893
894/**
895 * Worker for TMR3ResumeClocks.
896 *
897 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
898 * @param pVM The cross context VM structure.
899 */
900int tmVirtualResumeLocked(PVMCC pVM)
901{
902 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
903 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
904 if (c == 1)
905 {
906 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
907 pVM->tm.s.u64VirtualRawPrev = 0;
908 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
909 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
910 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
911 }
912 return VINF_SUCCESS;
913}
914
915
916/**
917 * Converts from virtual ticks to nanoseconds.
918 *
919 * @returns nanoseconds.
920 * @param pVM The cross context VM structure.
921 * @param u64VirtualTicks The virtual ticks to convert.
922 * @remark There could be rounding errors here. We just do a simple integer divide
923 * without any adjustments.
924 */
925VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
926{
927 NOREF(pVM);
928 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
929 return u64VirtualTicks;
930}
931
932
933/**
934 * Converts from virtual ticks to microseconds.
935 *
936 * @returns microseconds.
937 * @param pVM The cross context VM structure.
938 * @param u64VirtualTicks The virtual ticks to convert.
939 * @remark There could be rounding errors here. We just do a simple integer divide
940 * without any adjustments.
941 */
942VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
943{
944 NOREF(pVM);
945 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
946 return u64VirtualTicks / 1000;
947}
948
949
950/**
951 * Converts from virtual ticks to milliseconds.
952 *
953 * @returns milliseconds.
954 * @param pVM The cross context VM structure.
955 * @param u64VirtualTicks The virtual ticks to convert.
956 * @remark There could be rounding errors here. We just do a simple integer divide
957 * without any adjustments.
958 */
959VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
960{
961 NOREF(pVM);
962 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
963 return u64VirtualTicks / 1000000;
964}
965
966
967/**
968 * Converts from nanoseconds to virtual ticks.
969 *
970 * @returns virtual ticks.
971 * @param pVM The cross context VM structure.
972 * @param u64NanoTS The nanosecond value ticks to convert.
973 * @remark There could be rounding and overflow errors here.
974 */
975VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
976{
977 NOREF(pVM);
978 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
979 return u64NanoTS;
980}
981
982
983/**
984 * Converts from microseconds to virtual ticks.
985 *
986 * @returns virtual ticks.
987 * @param pVM The cross context VM structure.
988 * @param u64MicroTS The microsecond value ticks to convert.
989 * @remark There could be rounding and overflow errors here.
990 */
991VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
992{
993 NOREF(pVM);
994 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
995 return u64MicroTS * 1000;
996}
997
998
999/**
1000 * Converts from milliseconds to virtual ticks.
1001 *
1002 * @returns virtual ticks.
1003 * @param pVM The cross context VM structure.
1004 * @param u64MilliTS The millisecond value ticks to convert.
1005 * @remark There could be rounding and overflow errors here.
1006 */
1007VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1008{
1009 NOREF(pVM);
1010 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1011 return u64MilliTS * 1000000;
1012}
1013
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette