VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 58912

Last change on this file since 58912 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.9 KB
Line 
1/* $Id: TMAllCpu.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include "TMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/gim.h>
28#include <VBox/sup.h>
29
30#include <VBox/param.h>
31#include <VBox/err.h>
32#include <iprt/asm-math.h>
33#include <iprt/assert.h>
34#include <VBox/log.h>
35
36
37/**
38 * Gets the raw cpu tick from current virtual time.
39 *
40 * @param pVM The cross context VM structure.
41 * @param fCheckTimers Whether to check timers.
42 */
43DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
44{
45 uint64_t u64;
46 if (fCheckTimers)
47 u64 = TMVirtualSyncGet(pVM);
48 else
49 u64 = TMVirtualSyncGetNoCheck(pVM);
50 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
51 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
52 return u64;
53}
54
55
56#ifdef IN_RING3
57/**
58 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
59 *
60 * @param pVM The cross context VM structure.
61 */
62uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
63{
64 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
65}
66#endif
67
68
69/**
70 * Resumes the CPU timestamp counter ticking.
71 *
72 * @returns VBox status code.
73 * @param pVM The cross context VM structure.
74 * @param pVCpu The cross context virtual CPU structure.
75 * @internal
76 */
77int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
78{
79 if (!pVCpu->tm.s.fTSCTicking)
80 {
81 pVCpu->tm.s.fTSCTicking = true;
82
83 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
84 * unpaused before the virtual time and stopped after it. */
85 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
86 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
87 else
88 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
89 - pVCpu->tm.s.u64TSC;
90 return VINF_SUCCESS;
91 }
92 AssertFailed();
93 return VERR_TM_TSC_ALREADY_TICKING;
94}
95
96
97/**
98 * Resumes the CPU timestamp counter ticking.
99 *
100 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
101 * @param pVM The cross context VM structure.
102 * @param pVCpu The cross context virtual CPU structure.
103 */
104int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
105{
106 if (!pVCpu->tm.s.fTSCTicking)
107 {
108 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
109 pVCpu->tm.s.fTSCTicking = true;
110 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
111 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
112 if (c == 1)
113 {
114 /* The first VCPU to resume. */
115 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
116
117 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
118
119 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
120 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
121 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
122 else
123 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
124 - pVM->tm.s.u64LastPausedTSC;
125
126 /* Calculate the offset for other VCPUs to use. */
127 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
128 }
129 else
130 {
131 /* All other VCPUs (if any). */
132 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
133 }
134 }
135 return VINF_SUCCESS;
136}
137
138
139/**
140 * Pauses the CPU timestamp counter ticking.
141 *
142 * @returns VBox status code.
143 * @param pVCpu The cross context virtual CPU structure.
144 * @internal
145 */
146int tmCpuTickPause(PVMCPU pVCpu)
147{
148 if (pVCpu->tm.s.fTSCTicking)
149 {
150 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
151 pVCpu->tm.s.fTSCTicking = false;
152 return VINF_SUCCESS;
153 }
154 AssertFailed();
155 return VERR_TM_TSC_ALREADY_PAUSED;
156}
157
158
159/**
160 * Pauses the CPU timestamp counter ticking.
161 *
162 * @returns VBox status code.
163 * @param pVM The cross context VM structure.
164 * @param pVCpu The cross context virtual CPU structure.
165 * @internal
166 */
167int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
168{
169 if (pVCpu->tm.s.fTSCTicking)
170 {
171 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
172 pVCpu->tm.s.fTSCTicking = false;
173
174 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
175 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
176 if (c == 0)
177 {
178 /* When the last TSC stops, remember the value. */
179 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
180 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
181 }
182 return VINF_SUCCESS;
183 }
184 AssertFailed();
185 return VERR_TM_TSC_ALREADY_PAUSED;
186}
187
188
189/**
190 * Record why we refused to use offsetted TSC.
191 *
192 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
193 *
194 * @param pVM The cross context VM structure.
195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
196 */
197DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
198{
199 /* Sample the reason for refusing. */
200 if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
201 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
202 else if (!pVCpu->tm.s.fTSCTicking)
203 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
204 else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
205 {
206 if (pVM->tm.s.fVirtualSyncCatchUp)
207 {
208 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
209 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
210 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
211 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
212 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
213 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
214 else
215 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
216 }
217 else if (!pVM->tm.s.fVirtualSyncTicking)
218 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
219 else if (pVM->tm.s.fVirtualWarpDrive)
220 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
221 }
222}
223
224
225/**
226 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
227 *
228 * @returns true/false accordingly.
229 * @param pVM The cross context VM structure.
230 * @param pVCpu The cross context virtual CPU structure.
231 * @param poffRealTsc The offset against the TSC of the current host CPU,
232 * if pfOffsettedTsc is set to true.
233 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
234 *
235 * @thread EMT(pVCpu).
236 * @see TMCpuTickGetDeadlineAndTscOffset().
237 */
238VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
239{
240 Assert(pVCpu->tm.s.fTSCTicking);
241
242 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
243
244 /*
245 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
246 * the CPU will add them to RDTSC and RDTSCP at runtime.
247 *
248 * In tmCpuTickGetInternal we do:
249 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
250 * Where SUPReadTsc() does:
251 * ASMReadTSC() - pGipCpu->i64TscDelta;
252 * Which means tmCpuTickGetInternal actually does:
253 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
254 * So, the offset to be ADDED to RDTSC[P] is:
255 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
256 */
257 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
258 {
259 /** @todo We should negate both deltas! It's soo weird that we do the
260 * exact opposite of what the hardware implements. */
261#ifdef IN_RING3
262 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
263#else
264 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
265#endif
266 return true;
267 }
268
269 /*
270 * We require:
271 * 1. A fixed TSC, this is checked at init time.
272 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
273 * 3. Either that we're using the real TSC as time source or
274 * a) we don't have any lag to catch up, and
275 * b) the virtual sync clock hasn't been halted by an expired timer, and
276 * c) we're not using warp drive (accelerated virtual guest time).
277 */
278 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
279 && !pVM->tm.s.fVirtualSyncCatchUp
280 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
281 && !pVM->tm.s.fVirtualWarpDrive)
282 {
283 /* The source is the timer synchronous virtual clock. */
284 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
285 - pVCpu->tm.s.offTSCRawSrc;
286 /** @todo When we start collecting statistics on how much time we spend executing
287 * guest code before exiting, we should check this against the next virtual sync
288 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
289 * the chance that we'll get interrupted right after the timer expired. */
290 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
291 {
292 *poffRealTsc = u64Now - ASMReadTSC();
293 return true; /** @todo count this? */
294 }
295 }
296
297#ifdef VBOX_WITH_STATISTICS
298 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
299#endif
300 return false;
301}
302
303
304/**
305 * Calculates the number of host CPU ticks till the next virtual sync deadline.
306 *
307 * @note To save work, this function will not bother calculating the accurate
308 * tick count for deadlines that are more than a second ahead.
309 *
310 * @returns The number of host cpu ticks to the next deadline. Max one second.
311 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
312 * @param cNsToDeadline The number of nano seconds to the next virtual
313 * sync deadline.
314 */
315DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)
316{
317 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
318#ifdef IN_RING3
319 uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
320#else
321 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
322#endif
323 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
324 return uCpuHz;
325 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
326 if (cTicks > 4000)
327 cTicks -= 4000; /* fudge to account for overhead */
328 else
329 cTicks >>= 1;
330 return cTicks;
331}
332
333
334/**
335 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
336 * use the raw TSC.
337 *
338 * @returns The number of host CPU clock ticks to the next timer deadline.
339 * @param pVM The cross context VM structure.
340 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
341 * @param poffRealTsc The offset against the TSC of the current host CPU,
342 * if pfOffsettedTsc is set to true.
343 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used.
344 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
345 *
346 * @thread EMT(pVCpu).
347 * @see TMCpuTickCanUseRealTSC().
348 */
349VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
350 bool *pfOffsettedTsc, bool *pfParavirtTsc)
351{
352 Assert(pVCpu->tm.s.fTSCTicking);
353
354 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
355
356 /*
357 * Same logic as in TMCpuTickCanUseRealTSC.
358 */
359 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
360 {
361 /** @todo We should negate both deltas! It's soo weird that we do the
362 * exact opposite of what the hardware implements. */
363#ifdef IN_RING3
364 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
365#else
366 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
367#endif
368 *pfOffsettedTsc = true;
369 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
370 }
371
372 /*
373 * Same logic as in TMCpuTickCanUseRealTSC.
374 */
375 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
376 && !pVM->tm.s.fVirtualSyncCatchUp
377 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
378 && !pVM->tm.s.fVirtualWarpDrive)
379 {
380 /* The source is the timer synchronous virtual clock. */
381 uint64_t cNsToDeadline;
382 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
383 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
384 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
385 : u64NowVirtSync;
386 u64Now -= pVCpu->tm.s.offTSCRawSrc;
387 *poffRealTsc = u64Now - ASMReadTSC();
388 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
389 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
390 }
391
392#ifdef VBOX_WITH_STATISTICS
393 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
394#endif
395 *pfOffsettedTsc = false;
396 *poffRealTsc = 0;
397 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
398}
399
400
401/**
402 * Read the current CPU timestamp counter.
403 *
404 * @returns Gets the CPU tsc.
405 * @param pVCpu The cross context virtual CPU structure.
406 * @param fCheckTimers Whether to check timers.
407 */
408DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
409{
410 uint64_t u64;
411
412 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
413 {
414 PVM pVM = pVCpu->CTX_SUFF(pVM);
415 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
416 u64 = SUPReadTsc();
417 else
418 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
419 u64 -= pVCpu->tm.s.offTSCRawSrc;
420
421 /* Always return a value higher than what the guest has already seen. */
422 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
423 pVCpu->tm.s.u64TSCLastSeen = u64;
424 else
425 {
426 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
427 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
428 u64 = pVCpu->tm.s.u64TSCLastSeen;
429 }
430 }
431 else
432 u64 = pVCpu->tm.s.u64TSC;
433 return u64;
434}
435
436
437/**
438 * Read the current CPU timestamp counter.
439 *
440 * @returns Gets the CPU tsc.
441 * @param pVCpu The cross context virtual CPU structure.
442 */
443VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
444{
445 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
446}
447
448
449/**
450 * Read the current CPU timestamp counter, don't check for expired timers.
451 *
452 * @returns Gets the CPU tsc.
453 * @param pVCpu The cross context virtual CPU structure.
454 */
455VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
456{
457 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
458}
459
460
461/**
462 * Sets the current CPU timestamp counter.
463 *
464 * @returns VBox status code.
465 * @param pVM The cross context VM structure.
466 * @param pVCpu The cross context virtual CPU structure.
467 * @param u64Tick The new timestamp value.
468 *
469 * @thread EMT which TSC is to be set.
470 */
471VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
472{
473 VMCPU_ASSERT_EMT(pVCpu);
474 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
475
476 /*
477 * This is easier to do when the TSC is paused since resume will
478 * do all the calculations for us. Actually, we don't need to
479 * call tmCpuTickPause here since we overwrite u64TSC anyway.
480 */
481 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
482 pVCpu->tm.s.fTSCTicking = false;
483 pVCpu->tm.s.u64TSC = u64Tick;
484 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
485 if (fTSCTicking)
486 tmCpuTickResume(pVM, pVCpu);
487 /** @todo Try help synchronizing it better among the virtual CPUs? */
488
489 return VINF_SUCCESS;
490}
491
492/**
493 * Sets the last seen CPU timestamp counter.
494 *
495 * @returns VBox status code.
496 * @param pVCpu The cross context virtual CPU structure.
497 * @param u64LastSeenTick The last seen timestamp value.
498 *
499 * @thread EMT which TSC is to be set.
500 */
501VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
502{
503 VMCPU_ASSERT_EMT(pVCpu);
504
505 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
506 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
507 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
508 return VINF_SUCCESS;
509}
510
511/**
512 * Gets the last seen CPU timestamp counter of the guest.
513 *
514 * @returns the last seen TSC.
515 * @param pVCpu The cross context virtual CPU structure.
516 *
517 * @thread EMT(pVCpu).
518 */
519VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
520{
521 VMCPU_ASSERT_EMT(pVCpu);
522
523 return pVCpu->tm.s.u64TSCLastSeen;
524}
525
526
527/**
528 * Get the timestamp frequency.
529 *
530 * @returns Number of ticks per second.
531 * @param pVM The cross context VM structure.
532 */
533VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
534{
535 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
536 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
537 {
538#ifdef IN_RING3
539 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
540#elif defined(IN_RING0)
541 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
542#else
543 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
544#endif
545 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
546 return cTSCTicksPerSecond;
547 }
548 return pVM->tm.s.cTSCTicksPerSecond;
549}
550
551
552/**
553 * Whether the TSC is ticking for the VCPU.
554 *
555 * @returns true if ticking, false otherwise.
556 * @param pVCpu The cross context virtual CPU structure.
557 */
558VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPU pVCpu)
559{
560 return pVCpu->tm.s.fTSCTicking;
561}
562
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette