VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 60185

Last change on this file since 60185 was 60185, checked in by vboxsync, 9 years ago

IEM: Fixed a couple of edge cases and broken verification mode.

  • Update enmCpuMode after loading hidden CS flags (prep for recompiling).
  • Fixed retf in 64-bit mode where we would load CS.BASE with zero when returning to 16-bit or 32-bit code.
  • Fixed ESP/SP handling for protected mode exception injection.
  • Implemented the two string I/O notification functions that would assert in verification mode.
  • The IEMExec* methods must call iemUninitExec to undo poisoning of decoding data members as it will otherwise interfere with verification mode opcode fetching optimizations and other stuff.

The above makes the current bs3-cpu-basic-2 code work in --execute-all-in-iem mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.0 KB
Line 
1/* $Id: TMAllCpu.cpp 60185 2016-03-24 17:39:40Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include "TMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/gim.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/sup.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/asm-math.h>
34#include <iprt/assert.h>
35#include <VBox/log.h>
36
37
38/**
39 * Gets the raw cpu tick from current virtual time.
40 *
41 * @param pVM The cross context VM structure.
42 * @param fCheckTimers Whether to check timers.
43 */
44DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
45{
46 uint64_t u64;
47 if (fCheckTimers)
48 u64 = TMVirtualSyncGet(pVM);
49 else
50 u64 = TMVirtualSyncGetNoCheck(pVM);
51 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
52 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
53 return u64;
54}
55
56
57#ifdef IN_RING3
58/**
59 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
60 *
61 * @param pVM The cross context VM structure.
62 */
63uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
64{
65 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
66}
67#endif
68
69
70/**
71 * Resumes the CPU timestamp counter ticking.
72 *
73 * @returns VBox status code.
74 * @param pVM The cross context VM structure.
75 * @param pVCpu The cross context virtual CPU structure.
76 * @internal
77 */
78int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
79{
80 if (!pVCpu->tm.s.fTSCTicking)
81 {
82 pVCpu->tm.s.fTSCTicking = true;
83
84 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
85 * unpaused before the virtual time and stopped after it. */
86 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
87 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
88 else
89 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
90 - pVCpu->tm.s.u64TSC;
91 return VINF_SUCCESS;
92 }
93 AssertFailed();
94 return VERR_TM_TSC_ALREADY_TICKING;
95}
96
97
98/**
99 * Resumes the CPU timestamp counter ticking.
100 *
101 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
102 * @param pVM The cross context VM structure.
103 * @param pVCpu The cross context virtual CPU structure.
104 */
105int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
106{
107 if (!pVCpu->tm.s.fTSCTicking)
108 {
109 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
110 pVCpu->tm.s.fTSCTicking = true;
111 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
112 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
113 if (c == 1)
114 {
115 /* The first VCPU to resume. */
116 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
117
118 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
119
120 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
121 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
122 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
123 else
124 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
125 - pVM->tm.s.u64LastPausedTSC;
126
127 /* Calculate the offset for other VCPUs to use. */
128 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
129 }
130 else
131 {
132 /* All other VCPUs (if any). */
133 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
134 }
135 }
136 return VINF_SUCCESS;
137}
138
139
140/**
141 * Pauses the CPU timestamp counter ticking.
142 *
143 * @returns VBox status code.
144 * @param pVCpu The cross context virtual CPU structure.
145 * @internal
146 */
147int tmCpuTickPause(PVMCPU pVCpu)
148{
149 if (pVCpu->tm.s.fTSCTicking)
150 {
151 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
152 pVCpu->tm.s.fTSCTicking = false;
153 return VINF_SUCCESS;
154 }
155 AssertFailed();
156 return VERR_TM_TSC_ALREADY_PAUSED;
157}
158
159
160/**
161 * Pauses the CPU timestamp counter ticking.
162 *
163 * @returns VBox status code.
164 * @param pVM The cross context VM structure.
165 * @param pVCpu The cross context virtual CPU structure.
166 * @internal
167 */
168int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
169{
170 if (pVCpu->tm.s.fTSCTicking)
171 {
172 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
173 pVCpu->tm.s.fTSCTicking = false;
174
175 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
176 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
177 if (c == 0)
178 {
179 /* When the last TSC stops, remember the value. */
180 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
181 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
182 }
183 return VINF_SUCCESS;
184 }
185 AssertFailed();
186 return VERR_TM_TSC_ALREADY_PAUSED;
187}
188
189
190/**
191 * Record why we refused to use offsetted TSC.
192 *
193 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
194 *
195 * @param pVM The cross context VM structure.
196 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
197 */
198DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
199{
200 /* Sample the reason for refusing. */
201 if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
202 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
203 else if (!pVCpu->tm.s.fTSCTicking)
204 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
205 else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
206 {
207 if (pVM->tm.s.fVirtualSyncCatchUp)
208 {
209 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
210 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
211 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
212 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
213 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
214 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
215 else
216 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
217 }
218 else if (!pVM->tm.s.fVirtualSyncTicking)
219 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
220 else if (pVM->tm.s.fVirtualWarpDrive)
221 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
222 }
223}
224
225
226/**
227 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
228 *
229 * @returns true/false accordingly.
230 * @param pVM The cross context VM structure.
231 * @param pVCpu The cross context virtual CPU structure.
232 * @param poffRealTsc The offset against the TSC of the current host CPU,
233 * if pfOffsettedTsc is set to true.
234 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
235 *
236 * @thread EMT(pVCpu).
237 * @see TMCpuTickGetDeadlineAndTscOffset().
238 */
239VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
240{
241 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
242
243 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
244
245 /*
246 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
247 * the CPU will add them to RDTSC and RDTSCP at runtime.
248 *
249 * In tmCpuTickGetInternal we do:
250 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
251 * Where SUPReadTsc() does:
252 * ASMReadTSC() - pGipCpu->i64TscDelta;
253 * Which means tmCpuTickGetInternal actually does:
254 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
255 * So, the offset to be ADDED to RDTSC[P] is:
256 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
257 */
258 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
259 {
260 /** @todo We should negate both deltas! It's soo weird that we do the
261 * exact opposite of what the hardware implements. */
262#ifdef IN_RING3
263 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
264#else
265 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
266#endif
267 return true;
268 }
269
270 /*
271 * We require:
272 * 1. A fixed TSC, this is checked at init time.
273 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
274 * 3. Either that we're using the real TSC as time source or
275 * a) we don't have any lag to catch up, and
276 * b) the virtual sync clock hasn't been halted by an expired timer, and
277 * c) we're not using warp drive (accelerated virtual guest time).
278 */
279 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
280 && !pVM->tm.s.fVirtualSyncCatchUp
281 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
282 && !pVM->tm.s.fVirtualWarpDrive)
283 {
284 /* The source is the timer synchronous virtual clock. */
285 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
286 - pVCpu->tm.s.offTSCRawSrc;
287 /** @todo When we start collecting statistics on how much time we spend executing
288 * guest code before exiting, we should check this against the next virtual sync
289 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
290 * the chance that we'll get interrupted right after the timer expired. */
291 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
292 {
293 *poffRealTsc = u64Now - ASMReadTSC();
294 return true; /** @todo count this? */
295 }
296 }
297
298#ifdef VBOX_WITH_STATISTICS
299 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
300#endif
301 return false;
302}
303
304
305/**
306 * Calculates the number of host CPU ticks till the next virtual sync deadline.
307 *
308 * @note To save work, this function will not bother calculating the accurate
309 * tick count for deadlines that are more than a second ahead.
310 *
311 * @returns The number of host cpu ticks to the next deadline. Max one second.
312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
313 * @param cNsToDeadline The number of nano seconds to the next virtual
314 * sync deadline.
315 */
316DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)
317{
318 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
319#ifdef IN_RING3
320 uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
321#else
322 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
323#endif
324 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
325 return uCpuHz;
326 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
327 if (cTicks > 4000)
328 cTicks -= 4000; /* fudge to account for overhead */
329 else
330 cTicks >>= 1;
331 return cTicks;
332}
333
334
335/**
336 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
337 * use the raw TSC.
338 *
339 * @returns The number of host CPU clock ticks to the next timer deadline.
340 * @param pVM The cross context VM structure.
341 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
342 * @param poffRealTsc The offset against the TSC of the current host CPU,
343 * if pfOffsettedTsc is set to true.
344 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used.
345 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
346 *
347 * @thread EMT(pVCpu).
348 * @see TMCpuTickCanUseRealTSC().
349 */
350VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
351 bool *pfOffsettedTsc, bool *pfParavirtTsc)
352{
353 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
354
355 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
356
357 /*
358 * Same logic as in TMCpuTickCanUseRealTSC.
359 */
360 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
361 {
362 /** @todo We should negate both deltas! It's soo weird that we do the
363 * exact opposite of what the hardware implements. */
364#ifdef IN_RING3
365 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
366#else
367 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
368#endif
369 *pfOffsettedTsc = true;
370 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
371 }
372
373 /*
374 * Same logic as in TMCpuTickCanUseRealTSC.
375 */
376 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
377 && !pVM->tm.s.fVirtualSyncCatchUp
378 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
379 && !pVM->tm.s.fVirtualWarpDrive)
380 {
381 /* The source is the timer synchronous virtual clock. */
382 uint64_t cNsToDeadline;
383 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
384 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
385 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
386 : u64NowVirtSync;
387 u64Now -= pVCpu->tm.s.offTSCRawSrc;
388 *poffRealTsc = u64Now - ASMReadTSC();
389 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
390 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
391 }
392
393#ifdef VBOX_WITH_STATISTICS
394 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
395#endif
396 *pfOffsettedTsc = false;
397 *poffRealTsc = 0;
398 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
399}
400
401
402/**
403 * Read the current CPU timestamp counter.
404 *
405 * @returns Gets the CPU tsc.
406 * @param pVCpu The cross context virtual CPU structure.
407 * @param fCheckTimers Whether to check timers.
408 */
409DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
410{
411 uint64_t u64;
412
413 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
414 {
415 PVM pVM = pVCpu->CTX_SUFF(pVM);
416 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
417 u64 = SUPReadTsc();
418 else
419 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
420 u64 -= pVCpu->tm.s.offTSCRawSrc;
421
422 /* Always return a value higher than what the guest has already seen. */
423 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
424 pVCpu->tm.s.u64TSCLastSeen = u64;
425 else
426 {
427 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
428 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
429 u64 = pVCpu->tm.s.u64TSCLastSeen;
430 }
431 }
432 else
433 u64 = pVCpu->tm.s.u64TSC;
434 return u64;
435}
436
437
438/**
439 * Read the current CPU timestamp counter.
440 *
441 * @returns Gets the CPU tsc.
442 * @param pVCpu The cross context virtual CPU structure.
443 */
444VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
445{
446 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
447}
448
449
450/**
451 * Read the current CPU timestamp counter, don't check for expired timers.
452 *
453 * @returns Gets the CPU tsc.
454 * @param pVCpu The cross context virtual CPU structure.
455 */
456VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
457{
458 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
459}
460
461
462/**
463 * Sets the current CPU timestamp counter.
464 *
465 * @returns VBox status code.
466 * @param pVM The cross context VM structure.
467 * @param pVCpu The cross context virtual CPU structure.
468 * @param u64Tick The new timestamp value.
469 *
470 * @thread EMT which TSC is to be set.
471 */
472VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
473{
474 VMCPU_ASSERT_EMT(pVCpu);
475 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
476
477 /*
478 * This is easier to do when the TSC is paused since resume will
479 * do all the calculations for us. Actually, we don't need to
480 * call tmCpuTickPause here since we overwrite u64TSC anyway.
481 */
482 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
483 pVCpu->tm.s.fTSCTicking = false;
484 pVCpu->tm.s.u64TSC = u64Tick;
485 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
486 if (fTSCTicking)
487 tmCpuTickResume(pVM, pVCpu);
488 /** @todo Try help synchronizing it better among the virtual CPUs? */
489
490 return VINF_SUCCESS;
491}
492
493/**
494 * Sets the last seen CPU timestamp counter.
495 *
496 * @returns VBox status code.
497 * @param pVCpu The cross context virtual CPU structure.
498 * @param u64LastSeenTick The last seen timestamp value.
499 *
500 * @thread EMT which TSC is to be set.
501 */
502VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
503{
504 VMCPU_ASSERT_EMT(pVCpu);
505
506 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
507 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
508 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
509 return VINF_SUCCESS;
510}
511
512/**
513 * Gets the last seen CPU timestamp counter of the guest.
514 *
515 * @returns the last seen TSC.
516 * @param pVCpu The cross context virtual CPU structure.
517 *
518 * @thread EMT(pVCpu).
519 */
520VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
521{
522 VMCPU_ASSERT_EMT(pVCpu);
523
524 return pVCpu->tm.s.u64TSCLastSeen;
525}
526
527
528/**
529 * Get the timestamp frequency.
530 *
531 * @returns Number of ticks per second.
532 * @param pVM The cross context VM structure.
533 */
534VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
535{
536 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
537 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
538 {
539#ifdef IN_RING3
540 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
541#elif defined(IN_RING0)
542 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
543#else
544 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
545#endif
546 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
547 return cTSCTicksPerSecond;
548 }
549 return pVM->tm.s.cTSCTicksPerSecond;
550}
551
552
553/**
554 * Whether the TSC is ticking for the VCPU.
555 *
556 * @returns true if ticking, false otherwise.
557 * @param pVCpu The cross context virtual CPU structure.
558 */
559VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPU pVCpu)
560{
561 return pVCpu->tm.s.fTSCTicking;
562}
563
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette