VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp@ 54998

Last change on this file since 54998 was 54581, checked in by vboxsync, 10 years ago

HostDrivers/Support: add and use supdrvOSAreCpusOfflinedOnSuspend(). FreeBSD, Darwin, Solaris need verification.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 172.0 KB
Line 
1/* $Id: SUPDrvGip.cpp 54581 2015-03-02 14:56:02Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code for GIP.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#define LOG_GROUP LOG_GROUP_SUP_DRV
31#define SUPDRV_AGNOSTIC
32#include "SUPDrvInternal.h"
33#ifndef PAGE_SHIFT
34# include <iprt/param.h>
35#endif
36#include <iprt/asm.h>
37#include <iprt/asm-amd64-x86.h>
38#include <iprt/asm-math.h>
39#include <iprt/cpuset.h>
40#include <iprt/handletable.h>
41#include <iprt/mem.h>
42#include <iprt/mp.h>
43#include <iprt/power.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/thread.h>
48#include <iprt/uuid.h>
49#include <iprt/net.h>
50#include <iprt/crc.h>
51#include <iprt/string.h>
52#include <iprt/timer.h>
53#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
54# include <iprt/rand.h>
55# include <iprt/path.h>
56#endif
57#include <iprt/uint128.h>
58#include <iprt/x86.h>
59
60#include <VBox/param.h>
61#include <VBox/log.h>
62#include <VBox/err.h>
63
64#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
65# include "dtrace/SUPDrv.h"
66#else
67/* ... */
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74/** The frequency by which we recalculate the u32UpdateHz and
75 * u32UpdateIntervalNS GIP members. The value must be a power of 2.
76 *
77 * Warning: Bumping this too high might overflow u32UpdateIntervalNS.
78 */
79#define GIP_UPDATEHZ_RECALC_FREQ 0x800
80
81/** A reserved TSC value used for synchronization as well as measurement of
82 * TSC deltas. */
83#define GIP_TSC_DELTA_RSVD UINT64_MAX
84/** The number of TSC delta measurement loops in total (includes primer and
85 * read-time loops). */
86#define GIP_TSC_DELTA_LOOPS 96
87/** The number of cache primer loops. */
88#define GIP_TSC_DELTA_PRIMER_LOOPS 4
89/** The number of loops until we keep computing the minumum read time. */
90#define GIP_TSC_DELTA_READ_TIME_LOOPS 24
91
92/** The TSC frequency refinement period in seconds.
93 * The timer fires after 200ms, then every second, this value just says when
94 * to stop it after that. */
95#define GIP_TSC_REFINE_PERIOD_IN_SECS 12
96/** The TSC-delta threshold for the SUPGIPUSETSCDELTA_PRACTICALLY_ZERO rating */
97#define GIP_TSC_DELTA_THRESHOLD_PRACTICALLY_ZERO 32
98/** The TSC-delta threshold for the SUPGIPUSETSCDELTA_ROUGHLY_ZERO rating */
99#define GIP_TSC_DELTA_THRESHOLD_ROUGHLY_ZERO 448
100/** The TSC delta value for the initial GIP master - 0 in regular builds.
101 * To test the delta code this can be set to a non-zero value. */
102#if 0
103# define GIP_TSC_DELTA_INITIAL_MASTER_VALUE INT64_C(170139095182512) /* 0x00009abd9854acb0 */
104#else
105# define GIP_TSC_DELTA_INITIAL_MASTER_VALUE INT64_C(0)
106#endif
107
108AssertCompile(GIP_TSC_DELTA_PRIMER_LOOPS < GIP_TSC_DELTA_READ_TIME_LOOPS);
109AssertCompile(GIP_TSC_DELTA_PRIMER_LOOPS + GIP_TSC_DELTA_READ_TIME_LOOPS < GIP_TSC_DELTA_LOOPS);
110
111/** @def VBOX_SVN_REV
112 * The makefile should define this if it can. */
113#ifndef VBOX_SVN_REV
114# define VBOX_SVN_REV 0
115#endif
116
117#if 0 /* Don't start the GIP timers. Useful when debugging the IPRT timer code. */
118# define DO_NOT_START_GIP
119#endif
120
121
122/*******************************************************************************
123* Internal Functions *
124*******************************************************************************/
125static DECLCALLBACK(void) supdrvGipSyncAndInvariantTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
126static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
127static void supdrvGipInitCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pCpu, uint64_t u64NanoTS, uint64_t uCpuHz);
128static void supdrvTscResetSamples(PSUPDRVDEVEXT pDevExt, bool fClearDeltas);
129#ifdef SUPDRV_USE_TSC_DELTA_THREAD
130static int supdrvTscDeltaThreadInit(PSUPDRVDEVEXT pDevExt);
131static void supdrvTscDeltaTerm(PSUPDRVDEVEXT pDevExt);
132static void supdrvTscDeltaThreadStartMeasurement(PSUPDRVDEVEXT pDevExt, bool fForceAll);
133#else
134static int supdrvMeasureInitialTscDeltas(PSUPDRVDEVEXT pDevExt);
135static int supdrvMeasureTscDeltaOne(PSUPDRVDEVEXT pDevExt, uint32_t idxWorker);
136#endif
137
138
139/*******************************************************************************
140* Global Variables *
141*******************************************************************************/
142DECLEXPORT(PSUPGLOBALINFOPAGE) g_pSUPGlobalInfoPage = NULL;
143
144
145
146/*
147 *
148 * Misc Common GIP Code
149 * Misc Common GIP Code
150 * Misc Common GIP Code
151 *
152 *
153 */
154
155
156/**
157 * Finds the GIP CPU index corresponding to @a idCpu.
158 *
159 * @returns GIP CPU array index, UINT32_MAX if not found.
160 * @param pGip The GIP.
161 * @param idCpu The CPU ID.
162 */
163static uint32_t supdrvGipFindCpuIndexForCpuId(PSUPGLOBALINFOPAGE pGip, RTCPUID idCpu)
164{
165 uint32_t i;
166 for (i = 0; i < pGip->cCpus; i++)
167 if (pGip->aCPUs[i].idCpu == idCpu)
168 return i;
169 return UINT32_MAX;
170}
171
172
173
174/*
175 *
176 * GIP Mapping and Unmapping Related Code.
177 * GIP Mapping and Unmapping Related Code.
178 * GIP Mapping and Unmapping Related Code.
179 *
180 *
181 */
182
183
184/**
185 * (Re-)initializes the per-cpu structure prior to starting or resuming the GIP
186 * updating.
187 *
188 * @param pGip Pointer to the GIP.
189 * @param pGipCpu The per CPU structure for this CPU.
190 * @param u64NanoTS The current time.
191 */
192static void supdrvGipReInitCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
193{
194 /*
195 * Here we don't really care about applying the TSC delta. The re-initialization of this
196 * value is not relevant especially while (re)starting the GIP as the first few ones will
197 * be ignored anyway, see supdrvGipDoUpdateCpu().
198 */
199 pGipCpu->u64TSC = ASMReadTSC() - pGipCpu->u32UpdateIntervalTSC;
200 pGipCpu->u64NanoTS = u64NanoTS;
201}
202
203
204/**
205 * Set the current TSC and NanoTS value for the CPU.
206 *
207 * @param idCpu The CPU ID. Unused - we have to use the APIC ID.
208 * @param pvUser1 Pointer to the ring-0 GIP mapping.
209 * @param pvUser2 Pointer to the variable holding the current time.
210 */
211static DECLCALLBACK(void) supdrvGipReInitCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
212{
213 PSUPGLOBALINFOPAGE pGip = (PSUPGLOBALINFOPAGE)pvUser1;
214 unsigned iCpu = pGip->aiCpuFromApicId[ASMGetApicId()];
215
216 if (RT_LIKELY(iCpu < pGip->cCpus && pGip->aCPUs[iCpu].idCpu == idCpu))
217 supdrvGipReInitCpu(pGip, &pGip->aCPUs[iCpu], *(uint64_t *)pvUser2);
218
219 NOREF(pvUser2);
220 NOREF(idCpu);
221}
222
223
224/**
225 * State structure for supdrvGipDetectGetGipCpuCallback.
226 */
227typedef struct SUPDRVGIPDETECTGETCPU
228{
229 /** Bitmap of APIC IDs that has been seen (initialized to zero).
230 * Used to detect duplicate APIC IDs (paranoia). */
231 uint8_t volatile bmApicId[256 / 8];
232 /** Mask of supported GIP CPU getter methods (SUPGIPGETCPU_XXX) (all bits set
233 * initially). The callback clears the methods not detected. */
234 uint32_t volatile fSupported;
235 /** The first callback detecting any kind of range issues (initialized to
236 * NIL_RTCPUID). */
237 RTCPUID volatile idCpuProblem;
238} SUPDRVGIPDETECTGETCPU;
239/** Pointer to state structure for supdrvGipDetectGetGipCpuCallback. */
240typedef SUPDRVGIPDETECTGETCPU *PSUPDRVGIPDETECTGETCPU;
241
242
243/**
244 * Checks for alternative ways of getting the CPU ID.
245 *
246 * This also checks the APIC ID, CPU ID and CPU set index values against the
247 * GIP tables.
248 *
249 * @param idCpu The CPU ID. Unused - we have to use the APIC ID.
250 * @param pvUser1 Pointer to the state structure.
251 * @param pvUser2 Pointer to the GIP.
252 */
253static DECLCALLBACK(void) supdrvGipDetectGetGipCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
254{
255 PSUPDRVGIPDETECTGETCPU pState = (PSUPDRVGIPDETECTGETCPU)pvUser1;
256 PSUPGLOBALINFOPAGE pGip = (PSUPGLOBALINFOPAGE)pvUser2;
257 uint32_t fSupported = 0;
258 uint16_t idApic;
259 int iCpuSet;
260
261 AssertMsg(idCpu == RTMpCpuId(), ("idCpu=%#x RTMpCpuId()=%#x\n", idCpu, RTMpCpuId())); /* paranoia^3 */
262
263 /*
264 * Check that the CPU ID and CPU set index are interchangable.
265 */
266 iCpuSet = RTMpCpuIdToSetIndex(idCpu);
267 if ((RTCPUID)iCpuSet == idCpu)
268 {
269 AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
270 if ( iCpuSet >= 0
271 && iCpuSet < RTCPUSET_MAX_CPUS
272 && RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS))
273 {
274 /*
275 * Check whether the IDTR.LIMIT contains a CPU number.
276 */
277#ifdef RT_ARCH_X86
278 uint16_t const cbIdt = sizeof(X86DESC64SYSTEM) * 256;
279#else
280 uint16_t const cbIdt = sizeof(X86DESCGATE) * 256;
281#endif
282 RTIDTR Idtr;
283 ASMGetIDTR(&Idtr);
284 if (Idtr.cbIdt >= cbIdt)
285 {
286 uint32_t uTmp = Idtr.cbIdt - cbIdt;
287 uTmp &= RTCPUSET_MAX_CPUS - 1;
288 if (uTmp == idCpu)
289 {
290 RTIDTR Idtr2;
291 ASMGetIDTR(&Idtr2);
292 if (Idtr2.cbIdt == Idtr.cbIdt)
293 fSupported |= SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS;
294 }
295 }
296
297 /*
298 * Check whether RDTSCP is an option.
299 */
300 if (ASMHasCpuId())
301 {
302 if ( ASMIsValidExtRange(ASMCpuId_EAX(UINT32_C(0x80000000)))
303 && (ASMCpuId_EDX(UINT32_C(0x80000001)) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) )
304 {
305 uint32_t uAux;
306 ASMReadTscWithAux(&uAux);
307 if ((uAux & (RTCPUSET_MAX_CPUS - 1)) == idCpu)
308 {
309 ASMNopPause();
310 ASMReadTscWithAux(&uAux);
311 if ((uAux & (RTCPUSET_MAX_CPUS - 1)) == idCpu)
312 fSupported |= SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS;
313 }
314 }
315 }
316 }
317 }
318
319 /*
320 * Check that the APIC ID is unique.
321 */
322 idApic = ASMGetApicId();
323 if (RT_LIKELY( idApic < RT_ELEMENTS(pGip->aiCpuFromApicId)
324 && !ASMAtomicBitTestAndSet(pState->bmApicId, idApic)))
325 fSupported |= SUPGIPGETCPU_APIC_ID;
326 else
327 {
328 AssertCompile(sizeof(pState->bmApicId) * 8 == RT_ELEMENTS(pGip->aiCpuFromApicId));
329 ASMAtomicCmpXchgU32(&pState->idCpuProblem, idCpu, NIL_RTCPUID);
330 LogRel(("supdrvGipDetectGetGipCpuCallback: idCpu=%#x iCpuSet=%d idApic=%#x - duplicate APIC ID.\n",
331 idCpu, iCpuSet, idApic));
332 }
333
334 /*
335 * Check that the iCpuSet is within the expected range.
336 */
337 if (RT_UNLIKELY( iCpuSet < 0
338 || (unsigned)iCpuSet >= RTCPUSET_MAX_CPUS
339 || (unsigned)iCpuSet >= RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
340 {
341 ASMAtomicCmpXchgU32(&pState->idCpuProblem, idCpu, NIL_RTCPUID);
342 LogRel(("supdrvGipDetectGetGipCpuCallback: idCpu=%#x iCpuSet=%d idApic=%#x - CPU set index is out of range.\n",
343 idCpu, iCpuSet, idApic));
344 }
345 else
346 {
347 RTCPUID idCpu2 = RTMpCpuIdFromSetIndex(iCpuSet);
348 if (RT_UNLIKELY(idCpu2 != idCpu))
349 {
350 ASMAtomicCmpXchgU32(&pState->idCpuProblem, idCpu, NIL_RTCPUID);
351 LogRel(("supdrvGipDetectGetGipCpuCallback: idCpu=%#x iCpuSet=%d idApic=%#x - CPU id/index roundtrip problem: %#x\n",
352 idCpu, iCpuSet, idApic, idCpu2));
353 }
354 }
355
356 /*
357 * Update the supported feature mask before we return.
358 */
359 ASMAtomicAndU32(&pState->fSupported, fSupported);
360
361 NOREF(pvUser2);
362}
363
364
365/**
366 * Increase the timer freqency on hosts where this is possible (NT).
367 *
368 * The idea is that more interrupts is better for us... Also, it's better than
369 * we increase the timer frequence, because we might end up getting inaccurate
370 * callbacks if someone else does it.
371 *
372 * @param pDevExt Sets u32SystemTimerGranularityGrant if increased.
373 */
374static void supdrvGipRequestHigherTimerFrequencyFromSystem(PSUPDRVDEVEXT pDevExt)
375{
376 if (pDevExt->u32SystemTimerGranularityGrant == 0)
377 {
378 uint32_t u32SystemResolution;
379 if ( RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
380 || RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
381 || RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
382 || RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
383 )
384 {
385 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
386 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
387 }
388 }
389}
390
391
392/**
393 * Undoes supdrvGipRequestHigherTimerFrequencyFromSystem.
394 *
395 * @param pDevExt Clears u32SystemTimerGranularityGrant.
396 */
397static void supdrvGipReleaseHigherTimerFrequencyFromSystem(PSUPDRVDEVEXT pDevExt)
398{
399 if (pDevExt->u32SystemTimerGranularityGrant)
400 {
401 int rc2 = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant);
402 AssertRC(rc2);
403 pDevExt->u32SystemTimerGranularityGrant = 0;
404 }
405}
406
407
408/**
409 * Maps the GIP into userspace and/or get the physical address of the GIP.
410 *
411 * @returns IPRT status code.
412 * @param pSession Session to which the GIP mapping should belong.
413 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
414 * @param pHCPhysGip Where to store the physical address. (optional)
415 *
416 * @remark There is no reference counting on the mapping, so one call to this function
417 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
418 * and remove the session as a GIP user.
419 */
420SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
421{
422 int rc;
423 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
424 RTR3PTR pGipR3 = NIL_RTR3PTR;
425 RTHCPHYS HCPhys = NIL_RTHCPHYS;
426 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
427
428 /*
429 * Validate
430 */
431 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
432 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
433 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
434
435#ifdef SUPDRV_USE_MUTEX_FOR_GIP
436 RTSemMutexRequest(pDevExt->mtxGip, RT_INDEFINITE_WAIT);
437#else
438 RTSemFastMutexRequest(pDevExt->mtxGip);
439#endif
440 if (pDevExt->pGip)
441 {
442 /*
443 * Map it?
444 */
445 rc = VINF_SUCCESS;
446 if (ppGipR3)
447 {
448 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
449 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
450 RTMEM_PROT_READ, RTR0ProcHandleSelf());
451 if (RT_SUCCESS(rc))
452 pGipR3 = RTR0MemObjAddressR3(pSession->GipMapObjR3);
453 }
454
455 /*
456 * Get physical address.
457 */
458 if (pHCPhysGip && RT_SUCCESS(rc))
459 HCPhys = pDevExt->HCPhysGip;
460
461 /*
462 * Reference globally.
463 */
464 if (!pSession->fGipReferenced && RT_SUCCESS(rc))
465 {
466 pSession->fGipReferenced = 1;
467 pDevExt->cGipUsers++;
468 if (pDevExt->cGipUsers == 1)
469 {
470 PSUPGLOBALINFOPAGE pGipR0 = pDevExt->pGip;
471 uint64_t u64NanoTS;
472
473 /*
474 * GIP starts/resumes updating again. On windows we bump the
475 * host timer frequency to make sure we don't get stuck in guest
476 * mode and to get better timer (and possibly clock) accuracy.
477 */
478 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
479
480 supdrvGipRequestHigherTimerFrequencyFromSystem(pDevExt);
481
482 /*
483 * document me
484 */
485 if (pGipR0->aCPUs[0].u32TransactionId != 2 /* not the first time */)
486 {
487 unsigned i;
488 for (i = 0; i < pGipR0->cCpus; i++)
489 ASMAtomicUoWriteU32(&pGipR0->aCPUs[i].u32TransactionId,
490 (pGipR0->aCPUs[i].u32TransactionId + GIP_UPDATEHZ_RECALC_FREQ * 2)
491 & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
492 ASMAtomicWriteU64(&pGipR0->u64NanoTSLastUpdateHz, 0);
493 }
494
495 /*
496 * document me
497 */
498 u64NanoTS = RTTimeSystemNanoTS() - pGipR0->u32UpdateIntervalNS;
499 if ( pGipR0->u32Mode == SUPGIPMODE_INVARIANT_TSC
500 || pGipR0->u32Mode == SUPGIPMODE_SYNC_TSC
501 || RTMpGetOnlineCount() == 1)
502 supdrvGipReInitCpu(pGipR0, &pGipR0->aCPUs[0], u64NanoTS);
503 else
504 RTMpOnAll(supdrvGipReInitCpuCallback, pGipR0, &u64NanoTS);
505
506 /*
507 * Detect alternative ways to figure the CPU ID in ring-3 and
508 * raw-mode context. Check the sanity of the APIC IDs, CPU IDs,
509 * and CPU set indexes while we're at it.
510 */
511 if (RT_SUCCESS(rc))
512 {
513 SUPDRVGIPDETECTGETCPU DetectState;
514 RT_BZERO((void *)&DetectState.bmApicId, sizeof(DetectState.bmApicId));
515 DetectState.fSupported = UINT32_MAX;
516 DetectState.idCpuProblem = NIL_RTCPUID;
517 rc = RTMpOnAll(supdrvGipDetectGetGipCpuCallback, &DetectState, pGipR0);
518 if (DetectState.idCpuProblem == NIL_RTCPUID)
519 {
520 if ( DetectState.fSupported != UINT32_MAX
521 && DetectState.fSupported != 0)
522 {
523 if (pGipR0->fGetGipCpu != DetectState.fSupported)
524 {
525 pGipR0->fGetGipCpu = DetectState.fSupported;
526 LogRel(("SUPR0GipMap: fGetGipCpu=%#x\n", DetectState.fSupported));
527 }
528 }
529 else
530 {
531 LogRel(("SUPR0GipMap: No supported ways of getting the APIC ID or CPU number in ring-3! (%#x)\n",
532 DetectState.fSupported));
533 rc = VERR_UNSUPPORTED_CPU;
534 }
535 }
536 else
537 {
538 LogRel(("SUPR0GipMap: APIC ID, CPU ID or CPU set index problem detected on CPU #%u (%#x)!\n",
539 DetectState.idCpuProblem, DetectState.idCpuProblem));
540 rc = VERR_INVALID_CPU_ID;
541 }
542 }
543
544 /*
545 * Start the GIP timer if all is well..
546 */
547 if (RT_SUCCESS(rc))
548 {
549#ifndef DO_NOT_START_GIP
550 rc = RTTimerStart(pDevExt->pGipTimer, 0 /* fire ASAP */); AssertRC(rc);
551#endif
552 rc = VINF_SUCCESS;
553 }
554
555 /*
556 * Bail out on error.
557 */
558 if (RT_FAILURE(rc))
559 {
560 LogRel(("SUPR0GipMap: failed rc=%Rrc\n", rc));
561 pDevExt->cGipUsers = 0;
562 pSession->fGipReferenced = 0;
563 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
564 {
565 int rc2 = RTR0MemObjFree(pSession->GipMapObjR3, false); AssertRC(rc2);
566 if (RT_SUCCESS(rc2))
567 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
568 }
569 HCPhys = NIL_RTHCPHYS;
570 pGipR3 = NIL_RTR3PTR;
571 }
572 }
573 }
574 }
575 else
576 {
577 rc = VERR_GENERAL_FAILURE;
578 Log(("SUPR0GipMap: GIP is not available!\n"));
579 }
580#ifdef SUPDRV_USE_MUTEX_FOR_GIP
581 RTSemMutexRelease(pDevExt->mtxGip);
582#else
583 RTSemFastMutexRelease(pDevExt->mtxGip);
584#endif
585
586 /*
587 * Write returns.
588 */
589 if (pHCPhysGip)
590 *pHCPhysGip = HCPhys;
591 if (ppGipR3)
592 *ppGipR3 = pGipR3;
593
594#ifdef DEBUG_DARWIN_GIP
595 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)pGipR3));
596#else
597 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)pGipR3));
598#endif
599 return rc;
600}
601
602
603/**
604 * Unmaps any user mapping of the GIP and terminates all GIP access
605 * from this session.
606 *
607 * @returns IPRT status code.
608 * @param pSession Session to which the GIP mapping should belong.
609 */
610SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
611{
612 int rc = VINF_SUCCESS;
613 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
614#ifdef DEBUG_DARWIN_GIP
615 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
616 pSession,
617 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
618 pSession->GipMapObjR3));
619#else
620 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
621#endif
622 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
623
624#ifdef SUPDRV_USE_MUTEX_FOR_GIP
625 RTSemMutexRequest(pDevExt->mtxGip, RT_INDEFINITE_WAIT);
626#else
627 RTSemFastMutexRequest(pDevExt->mtxGip);
628#endif
629
630 /*
631 * Unmap anything?
632 */
633 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
634 {
635 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
636 AssertRC(rc);
637 if (RT_SUCCESS(rc))
638 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
639 }
640
641 /*
642 * Dereference global GIP.
643 */
644 if (pSession->fGipReferenced && !rc)
645 {
646 pSession->fGipReferenced = 0;
647 if ( pDevExt->cGipUsers > 0
648 && !--pDevExt->cGipUsers)
649 {
650 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
651#ifndef DO_NOT_START_GIP
652 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
653#endif
654 supdrvGipReleaseHigherTimerFrequencyFromSystem(pDevExt);
655 }
656 }
657
658#ifdef SUPDRV_USE_MUTEX_FOR_GIP
659 RTSemMutexRelease(pDevExt->mtxGip);
660#else
661 RTSemFastMutexRelease(pDevExt->mtxGip);
662#endif
663
664 return rc;
665}
666
667
668/**
669 * Gets the GIP pointer.
670 *
671 * @returns Pointer to the GIP or NULL.
672 */
673SUPDECL(PSUPGLOBALINFOPAGE) SUPGetGIP(void)
674{
675 return g_pSUPGlobalInfoPage;
676}
677
678
679
680
681
682/*
683 *
684 *
685 * GIP Initialization, Termination and CPU Offline / Online Related Code.
686 * GIP Initialization, Termination and CPU Offline / Online Related Code.
687 * GIP Initialization, Termination and CPU Offline / Online Related Code.
688 *
689 *
690 */
691
692/**
693 * Used by supdrvInitRefineInvariantTscFreqTimer and supdrvGipInitMeasureTscFreq
694 * to update the TSC frequency related GIP variables.
695 *
696 * @param pGip The GIP.
697 * @param nsElapsed The number of nano seconds elapsed.
698 * @param cElapsedTscTicks The corresponding number of TSC ticks.
699 * @param iTick The tick number for debugging.
700 */
701static void supdrvGipInitSetCpuFreq(PSUPGLOBALINFOPAGE pGip, uint64_t nsElapsed, uint64_t cElapsedTscTicks, uint32_t iTick)
702{
703 /*
704 * Calculate the frequency.
705 */
706 uint64_t uCpuHz;
707 if ( cElapsedTscTicks < UINT64_MAX / RT_NS_1SEC
708 && nsElapsed < UINT32_MAX)
709 uCpuHz = ASMMultU64ByU32DivByU32(cElapsedTscTicks, RT_NS_1SEC, (uint32_t)nsElapsed);
710 else
711 {
712 RTUINT128U CpuHz, Tmp, Divisor;
713 CpuHz.s.Lo = CpuHz.s.Hi = 0;
714 RTUInt128MulU64ByU64(&Tmp, cElapsedTscTicks, RT_NS_1SEC_64);
715 RTUInt128Div(&CpuHz, &Tmp, RTUInt128AssignU64(&Divisor, nsElapsed));
716 uCpuHz = CpuHz.s.Lo;
717 }
718
719 /*
720 * Update the GIP.
721 */
722 ASMAtomicWriteU64(&pGip->u64CpuHz, uCpuHz);
723 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
724 {
725 ASMAtomicWriteU64(&pGip->aCPUs[0].u64CpuHz, uCpuHz);
726
727 /* For inspecting the frequency calcs using tstGIP-2, debugger or similar. */
728 if (iTick + 1 < pGip->cCpus)
729 ASMAtomicWriteU64(&pGip->aCPUs[iTick + 1].u64CpuHz, uCpuHz);
730 }
731}
732
733
734/**
735 * Timer callback function for TSC frequency refinement in invariant GIP mode.
736 *
737 * This is started during driver init and fires once
738 * GIP_TSC_REFINE_PERIOD_IN_SECS seconds later.
739 *
740 * @param pTimer The timer.
741 * @param pvUser Opaque pointer to the device instance data.
742 * @param iTick The timer tick.
743 */
744static DECLCALLBACK(void) supdrvInitRefineInvariantTscFreqTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
745{
746 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
747 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
748 RTCPUID idCpu;
749 uint64_t cNsElapsed;
750 uint64_t cTscTicksElapsed;
751 uint64_t nsNow;
752 uint64_t uTsc;
753 RTCCUINTREG fEFlags;
754
755 /* Paranoia. */
756 AssertReturnVoid(pGip);
757 AssertReturnVoid(pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC);
758
759 /*
760 * If we got a power event, stop the refinement process.
761 */
762 if (pDevExt->fInvTscRefinePowerEvent)
763 {
764 int rc = RTTimerStop(pTimer); AssertRC(rc);
765 return;
766 }
767
768 /*
769 * Read the TSC and time, noting which CPU we are on.
770 *
771 * Don't bother spinning until RTTimeSystemNanoTS changes, since on
772 * systems where it matters we're in a context where we cannot waste that
773 * much time (DPC watchdog, called from clock interrupt).
774 */
775 fEFlags = ASMIntDisableFlags();
776 uTsc = ASMReadTSC();
777 nsNow = RTTimeSystemNanoTS();
778 idCpu = RTMpCpuId();
779 ASMSetFlags(fEFlags);
780
781 cNsElapsed = nsNow - pDevExt->nsStartInvarTscRefine;
782 cTscTicksElapsed = uTsc - pDevExt->uTscStartInvarTscRefine;
783
784 /*
785 * If the above measurement was taken on a different CPU than the one we
786 * started the process on, cTscTicksElapsed will need to be adjusted with
787 * the TSC deltas of both the CPUs.
788 *
789 * We ASSUME that the delta calculation process takes less time than the
790 * TSC frequency refinement timer. If it doesn't, we'll complain and
791 * drop the frequency refinement.
792 *
793 * Note! We cannot entirely trust enmUseTscDelta here because it's
794 * downgraded after each delta calculation.
795 */
796 if ( idCpu != pDevExt->idCpuInvarTscRefine
797 && pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
798 {
799 uint32_t iStartCpuSet = RTMpCpuIdToSetIndex(pDevExt->idCpuInvarTscRefine);
800 uint32_t iStopCpuSet = RTMpCpuIdToSetIndex(idCpu);
801 uint16_t iStartGipCpu = iStartCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
802 ? pGip->aiCpuFromCpuSetIdx[iStartCpuSet] : UINT16_MAX;
803 uint16_t iStopGipCpu = iStopCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
804 ? pGip->aiCpuFromCpuSetIdx[iStopCpuSet] : UINT16_MAX;
805 int64_t iStartTscDelta = iStartGipCpu < pGip->cCpus ? pGip->aCPUs[iStartGipCpu].i64TSCDelta : INT64_MAX;
806 int64_t iStopTscDelta = iStopGipCpu < pGip->cCpus ? pGip->aCPUs[iStopGipCpu].i64TSCDelta : INT64_MAX;
807 if (RT_LIKELY(iStartTscDelta != INT64_MAX && iStopTscDelta != INT64_MAX))
808 {
809 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO)
810 {
811 /* cTscTicksElapsed = (uTsc - iStopTscDelta) - (pDevExt->uTscStartInvarTscRefine - iStartTscDelta); */
812 cTscTicksElapsed += iStartTscDelta - iStopTscDelta;
813 }
814 }
815 /*
816 * Allow 5 times the refinement period to elapse before we give up on the TSC delta
817 * calculations.
818 */
819 else if (cNsElapsed > GIP_TSC_REFINE_PERIOD_IN_SECS * 5 * RT_NS_1SEC_64)
820 {
821 SUPR0Printf("vboxdrv: Failed to refine invariant TSC frequency because deltas are unavailable after %u (%u) seconds\n",
822 (uint32_t)(cNsElapsed / RT_NS_1SEC), GIP_TSC_REFINE_PERIOD_IN_SECS);
823 SUPR0Printf("vboxdrv: start: %u, %u, %#llx stop: %u, %u, %#llx\n",
824 iStartCpuSet, iStartGipCpu, iStartTscDelta, iStopCpuSet, iStopGipCpu, iStopTscDelta);
825 int rc = RTTimerStop(pTimer); AssertRC(rc);
826 return;
827 }
828 }
829
830 /*
831 * Calculate and update the CPU frequency variables in GIP.
832 *
833 * If there is a GIP user already and we've already refined the frequency
834 * a couple of times, don't update it as we want a stable frequency value
835 * for all VMs.
836 */
837 if ( pDevExt->cGipUsers == 0
838 || cNsElapsed < RT_NS_1SEC * 2)
839 {
840 supdrvGipInitSetCpuFreq(pGip, cNsElapsed, cTscTicksElapsed, (uint32_t)iTick);
841
842 /*
843 * Stop the timer once we've reached the defined refinement period.
844 */
845 if (cNsElapsed > GIP_TSC_REFINE_PERIOD_IN_SECS * RT_NS_1SEC_64)
846 {
847 int rc = RTTimerStop(pTimer);
848 AssertRC(rc);
849 }
850 }
851 else
852 {
853 int rc = RTTimerStop(pTimer);
854 AssertRC(rc);
855 }
856}
857
858
859/**
860 * @callback_method_impl{FNRTPOWERNOTIFICATION}
861 */
862static DECLCALLBACK(void) supdrvGipPowerNotificationCallback(RTPOWEREVENT enmEvent, void *pvUser)
863{
864 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
865 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
866
867 /*
868 * If the TSC frequency refinement timer is running, we need to cancel it so it
869 * doesn't screw up the frequency after a long suspend.
870 *
871 * Recalculate all TSC-deltas on host resume as it may have changed, seen
872 * on Windows 7 running on the Dell Optiplex Intel Core i5-3570.
873 */
874 if (enmEvent == RTPOWEREVENT_RESUME)
875 {
876 ASMAtomicWriteBool(&pDevExt->fInvTscRefinePowerEvent, true);
877 if ( RT_LIKELY(pGip)
878 && pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED
879 && !supdrvOSAreCpusOfflinedOnSuspend())
880 {
881#ifdef SUPDRV_USE_TSC_DELTA_THREAD
882 supdrvTscDeltaThreadStartMeasurement(pDevExt, true /* fForceAll */);
883#else
884 RTCpuSetCopy(&pDevExt->TscDeltaCpuSet, &pGip->OnlineCpuSet);
885 supdrvMeasureInitialTscDeltas(pDevExt);
886#endif
887 }
888 }
889 else if (enmEvent == RTPOWEREVENT_SUSPEND)
890 ASMAtomicWriteBool(&pDevExt->fInvTscRefinePowerEvent, true);
891}
892
893
894/**
895 * Start the TSC-frequency refinment timer for the invariant TSC GIP mode.
896 *
897 * We cannot use this in the synchronous and asynchronous tsc GIP modes because
898 * the CPU may change the TSC frequence between now and when the timer fires
899 * (supdrvInitAsyncRefineTscTimer).
900 *
901 * @param pDevExt Pointer to the device instance data.
902 * @param pGip Pointer to the GIP.
903 */
904static void supdrvGipInitStartTimerForRefiningInvariantTscFreq(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip)
905{
906 uint64_t u64NanoTS;
907 RTCCUINTREG fEFlags;
908 int rc;
909
910 /*
911 * Register a power management callback.
912 */
913 pDevExt->fInvTscRefinePowerEvent = false;
914 rc = RTPowerNotificationRegister(supdrvGipPowerNotificationCallback, pDevExt);
915 AssertRC(rc); /* ignore */
916
917 /*
918 * Record the TSC and NanoTS as the starting anchor point for refinement
919 * of the TSC. We try get as close to a clock tick as possible on systems
920 * which does not provide high resolution time.
921 */
922 u64NanoTS = RTTimeSystemNanoTS();
923 while (RTTimeSystemNanoTS() == u64NanoTS)
924 ASMNopPause();
925
926 fEFlags = ASMIntDisableFlags();
927 pDevExt->uTscStartInvarTscRefine = ASMReadTSC();
928 pDevExt->nsStartInvarTscRefine = RTTimeSystemNanoTS();
929 pDevExt->idCpuInvarTscRefine = RTMpCpuId();
930 ASMSetFlags(fEFlags);
931
932 /*
933 * Create a timer that runs on the same CPU so we won't have a depencency
934 * on the TSC-delta and can run in parallel to it. On systems that does not
935 * implement CPU specific timers we'll apply deltas in the timer callback,
936 * just like we do for CPUs going offline.
937 *
938 * The longer the refinement interval the better the accuracy, at least in
939 * theory. If it's too long though, ring-3 may already be starting its
940 * first VMs before we're done. On most systems we will be loading the
941 * support driver during boot and VMs won't be started for a while yet,
942 * it is really only a problem during development (especially with
943 * on-demand driver starting on windows).
944 *
945 * To avoid wasting time doing a long supdrvGipInitMeasureTscFreq() call
946 * to calculate the frequency during driver loading, the timer is set
947 * to fire after 200 ms the first time. It will then reschedule itself
948 * to fire every second until GIP_TSC_REFINE_PERIOD_IN_SECS has been
949 * reached or it notices that there is a user land client with GIP
950 * mapped (we want a stable frequency for all VMs).
951 */
952 rc = RTTimerCreateEx(&pDevExt->pInvarTscRefineTimer, RT_NS_1SEC,
953 RTTIMER_FLAGS_CPU(RTMpCpuIdToSetIndex(pDevExt->idCpuInvarTscRefine)),
954 supdrvInitRefineInvariantTscFreqTimer, pDevExt);
955 if (RT_SUCCESS(rc))
956 {
957 rc = RTTimerStart(pDevExt->pInvarTscRefineTimer, 2*RT_NS_100MS);
958 if (RT_SUCCESS(rc))
959 return;
960 RTTimerDestroy(pDevExt->pInvarTscRefineTimer);
961 }
962
963 if (rc == VERR_CPU_OFFLINE || rc == VERR_NOT_SUPPORTED)
964 {
965 rc = RTTimerCreateEx(&pDevExt->pInvarTscRefineTimer, RT_NS_1SEC, RTTIMER_FLAGS_CPU_ANY,
966 supdrvInitRefineInvariantTscFreqTimer, pDevExt);
967 if (RT_SUCCESS(rc))
968 {
969 rc = RTTimerStart(pDevExt->pInvarTscRefineTimer, 2*RT_NS_100MS);
970 if (RT_SUCCESS(rc))
971 return;
972 RTTimerDestroy(pDevExt->pInvarTscRefineTimer);
973 }
974 }
975
976 pDevExt->pInvarTscRefineTimer = NULL;
977 OSDBGPRINT(("vboxdrv: Failed to create or start TSC frequency refinement timer: rc=%Rrc\n", rc));
978}
979
980
981/**
982 * @callback_method_impl{PFNRTMPWORKER,
983 * RTMpOnSpecific callback for reading TSC and time on the CPU we started
984 * the measurements on.}
985 */
986DECLCALLBACK(void) supdrvGipInitReadTscAndNanoTsOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
987{
988 RTCCUINTREG fEFlags = ASMIntDisableFlags();
989 uint64_t *puTscStop = (uint64_t *)pvUser1;
990 uint64_t *pnsStop = (uint64_t *)pvUser2;
991
992 *puTscStop = ASMReadTSC();
993 *pnsStop = RTTimeSystemNanoTS();
994
995 ASMSetFlags(fEFlags);
996}
997
998
999/**
1000 * Measures the TSC frequency of the system.
1001 *
1002 * The TSC frequency can vary on systems which are not reported as invariant.
1003 * On such systems the object of this function is to find out what the nominal,
1004 * maximum TSC frequency under 'normal' CPU operation.
1005 *
1006 * @returns VBox status code.
1007 * @param pDevExt Pointer to the device instance.
1008 * @param pGip Pointer to the GIP.
1009 * @param fRough Set if we're doing the rough calculation that the
1010 * TSC measuring code needs, where accuracy isn't all
1011 * that important (too high is better than to low).
1012 * When clear we try for best accuracy that we can
1013 * achieve in reasonably short time.
1014 */
1015static int supdrvGipInitMeasureTscFreq(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, bool fRough)
1016{
1017 uint32_t nsTimerIncr = RTTimerGetSystemGranularity();
1018 int cTriesLeft = fRough ? 4 : 2;
1019 while (cTriesLeft-- > 0)
1020 {
1021 RTCCUINTREG fEFlags;
1022 uint64_t nsStart;
1023 uint64_t nsStop;
1024 uint64_t uTscStart;
1025 uint64_t uTscStop;
1026 RTCPUID idCpuStart;
1027 RTCPUID idCpuStop;
1028
1029 /*
1030 * Synchronize with the host OS clock tick on systems without high
1031 * resolution time API (older Windows version for example).
1032 */
1033 nsStart = RTTimeSystemNanoTS();
1034 while (RTTimeSystemNanoTS() == nsStart)
1035 ASMNopPause();
1036
1037 /*
1038 * Read the TSC and current time, noting which CPU we're on.
1039 */
1040 fEFlags = ASMIntDisableFlags();
1041 uTscStart = ASMReadTSC();
1042 nsStart = RTTimeSystemNanoTS();
1043 idCpuStart = RTMpCpuId();
1044 ASMSetFlags(fEFlags);
1045
1046 /*
1047 * Delay for a while.
1048 */
1049 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)
1050 {
1051 /*
1052 * Sleep-wait since the TSC frequency is constant, it eases host load.
1053 * Shorter interval produces more variance in the frequency (esp. Windows).
1054 */
1055 uint64_t msElapsed = 0;
1056 uint64_t msDelay = ( ((fRough ? 16 : 200) * RT_NS_1MS + nsTimerIncr - 1) / nsTimerIncr * nsTimerIncr - RT_NS_100US )
1057 / RT_NS_1MS;
1058 do
1059 {
1060 RTThreadSleep((RTMSINTERVAL)(msDelay - msElapsed));
1061 nsStop = RTTimeSystemNanoTS();
1062 msElapsed = (nsStop - nsStart) / RT_NS_1MS;
1063 } while (msElapsed < msDelay);
1064
1065 while (RTTimeSystemNanoTS() == nsStop)
1066 ASMNopPause();
1067 }
1068 else
1069 {
1070 /*
1071 * Busy-wait keeping the frequency up.
1072 */
1073 do
1074 {
1075 ASMNopPause();
1076 nsStop = RTTimeSystemNanoTS();
1077 } while (nsStop - nsStart < RT_NS_100MS);
1078 }
1079
1080 /*
1081 * Read the TSC and time again.
1082 */
1083 fEFlags = ASMIntDisableFlags();
1084 uTscStop = ASMReadTSC();
1085 nsStop = RTTimeSystemNanoTS();
1086 idCpuStop = RTMpCpuId();
1087 ASMSetFlags(fEFlags);
1088
1089 /*
1090 * If the CPU changes things get a bit complicated and what we
1091 * can get away with depends on the GIP mode / TSC reliablity.
1092 */
1093 if (idCpuStop != idCpuStart)
1094 {
1095 bool fDoXCall = false;
1096
1097 /*
1098 * Synchronous TSC mode: we're probably fine as it's unlikely
1099 * that we were rescheduled because of TSC throttling or power
1100 * management reasons, so just go ahead.
1101 */
1102 if (pGip->u32Mode == SUPGIPMODE_SYNC_TSC)
1103 {
1104 /* Probably ok, maybe we should retry once?. */
1105 Assert(pGip->enmUseTscDelta == SUPGIPUSETSCDELTA_NOT_APPLICABLE);
1106 }
1107 /*
1108 * If we're just doing the rough measurement, do the cross call and
1109 * get on with things (we don't have deltas!).
1110 */
1111 else if (fRough)
1112 fDoXCall = true;
1113 /*
1114 * Invariant TSC mode: It doesn't matter if we have delta available
1115 * for both CPUs. That is not something we can assume at this point.
1116 *
1117 * Note! We cannot necessarily trust enmUseTscDelta here because it's
1118 * downgraded after each delta calculation and the delta
1119 * calculations may not be complete yet.
1120 */
1121 else if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)
1122 {
1123/** @todo This section of code is never reached atm, consider dropping it later on... */
1124 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
1125 {
1126 uint32_t iStartCpuSet = RTMpCpuIdToSetIndex(idCpuStart);
1127 uint32_t iStopCpuSet = RTMpCpuIdToSetIndex(idCpuStop);
1128 uint16_t iStartGipCpu = iStartCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
1129 ? pGip->aiCpuFromCpuSetIdx[iStartCpuSet] : UINT16_MAX;
1130 uint16_t iStopGipCpu = iStopCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
1131 ? pGip->aiCpuFromCpuSetIdx[iStopCpuSet] : UINT16_MAX;
1132 int64_t iStartTscDelta = iStartGipCpu < pGip->cCpus ? pGip->aCPUs[iStartGipCpu].i64TSCDelta : INT64_MAX;
1133 int64_t iStopTscDelta = iStopGipCpu < pGip->cCpus ? pGip->aCPUs[iStopGipCpu].i64TSCDelta : INT64_MAX;
1134 if (RT_LIKELY(iStartTscDelta != INT64_MAX && iStopTscDelta != INT64_MAX))
1135 {
1136 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO)
1137 {
1138 uTscStart -= iStartTscDelta;
1139 uTscStop -= iStopTscDelta;
1140 }
1141 }
1142 /*
1143 * Invalid CPU indexes are not caused by online/offline races, so
1144 * we have to trigger driver load failure if that happens as GIP
1145 * and IPRT assumptions are busted on this system.
1146 */
1147 else if (iStopGipCpu >= pGip->cCpus || iStartGipCpu >= pGip->cCpus)
1148 {
1149 SUPR0Printf("vboxdrv: Unexpected CPU index in supdrvGipInitMeasureTscFreq.\n");
1150 SUPR0Printf("vboxdrv: start: %u, %u, %#llx stop: %u, %u, %#llx\n",
1151 iStartCpuSet, iStartGipCpu, iStartTscDelta, iStopCpuSet, iStopGipCpu, iStopTscDelta);
1152 return VERR_INVALID_CPU_INDEX;
1153 }
1154 /*
1155 * No valid deltas. We retry, if we're on our last retry
1156 * we do the cross call instead just to get a result. The
1157 * frequency will be refined in a few seconds anyways.
1158 */
1159 else if (cTriesLeft > 0)
1160 continue;
1161 else
1162 fDoXCall = true;
1163 }
1164 }
1165 /*
1166 * Asynchronous TSC mode: This is bad as the reason we usually
1167 * use this mode is to deal with variable TSC frequencies and
1168 * deltas. So, we need to get the TSC from the same CPU as
1169 * started it, we also need to keep that CPU busy. So, retry
1170 * and fall back to the cross call on the last attempt.
1171 */
1172 else
1173 {
1174 Assert(pGip->u32Mode == SUPGIPMODE_ASYNC_TSC);
1175 if (cTriesLeft > 0)
1176 continue;
1177 fDoXCall = true;
1178 }
1179
1180 if (fDoXCall)
1181 {
1182 /*
1183 * Try read the TSC and timestamp on the start CPU.
1184 */
1185 int rc = RTMpOnSpecific(idCpuStart, supdrvGipInitReadTscAndNanoTsOnCpu, &uTscStop, &nsStop);
1186 if (RT_FAILURE(rc) && (!fRough || cTriesLeft > 0))
1187 continue;
1188 }
1189 }
1190
1191 /*
1192 * Calculate the TSC frequency and update it (shared with the refinement timer).
1193 */
1194 supdrvGipInitSetCpuFreq(pGip, nsStop - nsStart, uTscStop - uTscStart, 0);
1195 return VINF_SUCCESS;
1196 }
1197
1198 Assert(!fRough);
1199 return VERR_SUPDRV_TSC_FREQ_MEASUREMENT_FAILED;
1200}
1201
1202
1203/**
1204 * Finds our (@a idCpu) entry, or allocates a new one if not found.
1205 *
1206 * @returns Index of the CPU in the cache set.
1207 * @param pGip The GIP.
1208 * @param idCpu The CPU ID.
1209 */
1210static uint32_t supdrvGipFindOrAllocCpuIndexForCpuId(PSUPGLOBALINFOPAGE pGip, RTCPUID idCpu)
1211{
1212 uint32_t i, cTries;
1213
1214 /*
1215 * ASSUMES that CPU IDs are constant.
1216 */
1217 for (i = 0; i < pGip->cCpus; i++)
1218 if (pGip->aCPUs[i].idCpu == idCpu)
1219 return i;
1220
1221 cTries = 0;
1222 do
1223 {
1224 for (i = 0; i < pGip->cCpus; i++)
1225 {
1226 bool fRc;
1227 ASMAtomicCmpXchgSize(&pGip->aCPUs[i].idCpu, idCpu, NIL_RTCPUID, fRc);
1228 if (fRc)
1229 return i;
1230 }
1231 } while (cTries++ < 32);
1232 AssertReleaseFailed();
1233 return i - 1;
1234}
1235
1236
1237/**
1238 * The calling CPU should be accounted as online, update GIP accordingly.
1239 *
1240 * This is used by supdrvGipCreate() as well as supdrvGipMpEvent().
1241 *
1242 * @param pDevExt The device extension.
1243 * @param idCpu The CPU ID.
1244 */
1245static void supdrvGipMpEventOnlineOrInitOnCpu(PSUPDRVDEVEXT pDevExt, RTCPUID idCpu)
1246{
1247 int iCpuSet = 0;
1248 uint16_t idApic = UINT16_MAX;
1249 uint32_t i = 0;
1250 uint64_t u64NanoTS = 0;
1251 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
1252
1253 AssertPtrReturnVoid(pGip);
1254 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1255 AssertRelease(idCpu == RTMpCpuId());
1256 Assert(pGip->cPossibleCpus == RTMpGetCount());
1257
1258 /*
1259 * Do this behind a spinlock with interrupts disabled as this can fire
1260 * on all CPUs simultaneously, see @bugref{6110}.
1261 */
1262 RTSpinlockAcquire(pDevExt->hGipSpinlock);
1263
1264 /*
1265 * Update the globals.
1266 */
1267 ASMAtomicWriteU16(&pGip->cPresentCpus, RTMpGetPresentCount());
1268 ASMAtomicWriteU16(&pGip->cOnlineCpus, RTMpGetOnlineCount());
1269 iCpuSet = RTMpCpuIdToSetIndex(idCpu);
1270 if (iCpuSet >= 0)
1271 {
1272 Assert(RTCpuSetIsMemberByIndex(&pGip->PossibleCpuSet, iCpuSet));
1273 RTCpuSetAddByIndex(&pGip->OnlineCpuSet, iCpuSet);
1274 RTCpuSetAddByIndex(&pGip->PresentCpuSet, iCpuSet);
1275 }
1276
1277 /*
1278 * Update the entry.
1279 */
1280 u64NanoTS = RTTimeSystemNanoTS() - pGip->u32UpdateIntervalNS;
1281 i = supdrvGipFindOrAllocCpuIndexForCpuId(pGip, idCpu);
1282
1283 supdrvGipInitCpu(pGip, &pGip->aCPUs[i], u64NanoTS, pGip->u64CpuHz);
1284
1285 idApic = ASMGetApicId();
1286 ASMAtomicWriteU16(&pGip->aCPUs[i].idApic, idApic);
1287 ASMAtomicWriteS16(&pGip->aCPUs[i].iCpuSet, (int16_t)iCpuSet);
1288 ASMAtomicWriteSize(&pGip->aCPUs[i].idCpu, idCpu);
1289
1290 /*
1291 * Update the APIC ID and CPU set index mappings.
1292 */
1293 ASMAtomicWriteU16(&pGip->aiCpuFromApicId[idApic], i);
1294 ASMAtomicWriteU16(&pGip->aiCpuFromCpuSetIdx[iCpuSet], i);
1295
1296 /* Add this CPU to this set of CPUs we need to calculate the TSC-delta for. */
1297 RTCpuSetAddByIndex(&pDevExt->TscDeltaCpuSet, RTMpCpuIdToSetIndex(idCpu));
1298
1299 /* Update the Mp online/offline counter. */
1300 ASMAtomicIncU32(&pDevExt->cMpOnOffEvents);
1301
1302 /* Commit it. */
1303 ASMAtomicWriteSize(&pGip->aCPUs[i].enmState, SUPGIPCPUSTATE_ONLINE);
1304
1305 RTSpinlockRelease(pDevExt->hGipSpinlock);
1306}
1307
1308
1309/**
1310 * RTMpOnSpecific callback wrapper for supdrvGipMpEventOnlineOrInitOnCpu().
1311 *
1312 * @param idCpu The CPU ID we are running on.
1313 * @param pvUser1 Opaque pointer to the device instance data.
1314 * @param pvUser2 Not used.
1315 */
1316static DECLCALLBACK(void) supdrvGipMpEventOnlineCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1317{
1318 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser1;
1319 NOREF(pvUser2);
1320 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu);
1321}
1322
1323
1324/**
1325 * The CPU should be accounted as offline, update the GIP accordingly.
1326 *
1327 * This is used by supdrvGipMpEvent.
1328 *
1329 * @param pDevExt The device extension.
1330 * @param idCpu The CPU ID.
1331 */
1332static void supdrvGipMpEventOffline(PSUPDRVDEVEXT pDevExt, RTCPUID idCpu)
1333{
1334 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
1335 int iCpuSet;
1336 unsigned i;
1337
1338 AssertPtrReturnVoid(pGip);
1339 RTSpinlockAcquire(pDevExt->hGipSpinlock);
1340
1341 iCpuSet = RTMpCpuIdToSetIndex(idCpu);
1342 AssertReturnVoid(iCpuSet >= 0);
1343
1344 i = pGip->aiCpuFromCpuSetIdx[iCpuSet];
1345 AssertReturnVoid(i < pGip->cCpus);
1346 AssertReturnVoid(pGip->aCPUs[i].idCpu == idCpu);
1347
1348 Assert(RTCpuSetIsMemberByIndex(&pGip->PossibleCpuSet, iCpuSet));
1349 RTCpuSetDelByIndex(&pGip->OnlineCpuSet, iCpuSet);
1350
1351 /* Update the Mp online/offline counter. */
1352 ASMAtomicIncU32(&pDevExt->cMpOnOffEvents);
1353
1354 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
1355 {
1356 /* Reset the TSC delta, we will recalculate it lazily. */
1357 ASMAtomicWriteS64(&pGip->aCPUs[i].i64TSCDelta, INT64_MAX);
1358 /* Remove this CPU from the set of CPUs that we have obtained the TSC deltas. */
1359 RTCpuSetDelByIndex(&pDevExt->TscDeltaObtainedCpuSet, iCpuSet);
1360 }
1361
1362 /* Commit it. */
1363 ASMAtomicWriteSize(&pGip->aCPUs[i].enmState, SUPGIPCPUSTATE_OFFLINE);
1364
1365 RTSpinlockRelease(pDevExt->hGipSpinlock);
1366}
1367
1368
1369/**
1370 * Multiprocessor event notification callback.
1371 *
1372 * This is used to make sure that the GIP master gets passed on to
1373 * another CPU. It also updates the associated CPU data.
1374 *
1375 * @param enmEvent The event.
1376 * @param idCpu The cpu it applies to.
1377 * @param pvUser Pointer to the device extension.
1378 */
1379static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
1380{
1381 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
1382 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
1383
1384 if (pGip)
1385 {
1386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1387 switch (enmEvent)
1388 {
1389 case RTMPEVENT_ONLINE:
1390 {
1391 RTThreadPreemptDisable(&PreemptState);
1392 if (idCpu == RTMpCpuId())
1393 {
1394 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu);
1395 RTThreadPreemptRestore(&PreemptState);
1396 }
1397 else
1398 {
1399 RTThreadPreemptRestore(&PreemptState);
1400 RTMpOnSpecific(idCpu, supdrvGipMpEventOnlineCallback, pDevExt, NULL /* pvUser2 */);
1401 }
1402
1403 /*
1404 * Recompute TSC-delta for the newly online'd CPU.
1405 */
1406 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
1407 {
1408#ifdef SUPDRV_USE_TSC_DELTA_THREAD
1409 supdrvTscDeltaThreadStartMeasurement(pDevExt, false /* fForceAll */);
1410#else
1411 uint32_t iCpu = supdrvGipFindOrAllocCpuIndexForCpuId(pGip, idCpu);
1412 supdrvMeasureTscDeltaOne(pDevExt, iCpu);
1413#endif
1414 }
1415 break;
1416 }
1417
1418 case RTMPEVENT_OFFLINE:
1419 supdrvGipMpEventOffline(pDevExt, idCpu);
1420 break;
1421 }
1422 }
1423
1424 /*
1425 * Make sure there is a master GIP.
1426 */
1427 if (enmEvent == RTMPEVENT_OFFLINE)
1428 {
1429 RTCPUID idGipMaster = ASMAtomicReadU32(&pDevExt->idGipMaster);
1430 if (idGipMaster == idCpu)
1431 {
1432 /*
1433 * The GIP master is going offline, find a new one.
1434 */
1435 bool fIgnored;
1436 unsigned i;
1437 RTCPUID idNewGipMaster = NIL_RTCPUID;
1438 RTCPUSET OnlineCpus;
1439 RTMpGetOnlineSet(&OnlineCpus);
1440
1441 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
1442 if (RTCpuSetIsMemberByIndex(&OnlineCpus, i))
1443 {
1444 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
1445 if (idCurCpu != idGipMaster)
1446 {
1447 idNewGipMaster = idCurCpu;
1448 break;
1449 }
1450 }
1451
1452 Log(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
1453 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
1454 NOREF(fIgnored);
1455 }
1456 }
1457}
1458
1459
1460/**
1461 * On CPU initialization callback for RTMpOnAll.
1462 *
1463 * @param idCpu The CPU ID.
1464 * @param pvUser1 The device extension.
1465 * @param pvUser2 The GIP.
1466 */
1467static DECLCALLBACK(void) supdrvGipInitOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1468{
1469 /* This is good enough, even though it will update some of the globals a
1470 bit to much. */
1471 supdrvGipMpEventOnlineOrInitOnCpu((PSUPDRVDEVEXT)pvUser1, idCpu);
1472}
1473
1474
1475/**
1476 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
1477 *
1478 * @param idCpu Ignored.
1479 * @param pvUser1 Where to put the TSC.
1480 * @param pvUser2 Ignored.
1481 */
1482static DECLCALLBACK(void) supdrvGipInitDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1483{
1484 Assert(RTMpCpuIdToSetIndex(idCpu) == (intptr_t)pvUser2);
1485 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
1486}
1487
1488
1489/**
1490 * Determine if Async GIP mode is required because of TSC drift.
1491 *
1492 * When using the default/normal timer code it is essential that the time stamp counter
1493 * (TSC) runs never backwards, that is, a read operation to the counter should return
1494 * a bigger value than any previous read operation. This is guaranteed by the latest
1495 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
1496 * case we have to choose the asynchronous timer mode.
1497 *
1498 * @param poffMin Pointer to the determined difference between different
1499 * cores (optional, can be NULL).
1500 * @return false if the time stamp counters appear to be synchronized, true otherwise.
1501 */
1502static bool supdrvGipInitDetermineAsyncTsc(uint64_t *poffMin)
1503{
1504 /*
1505 * Just iterate all the cpus 8 times and make sure that the TSC is
1506 * ever increasing. We don't bother taking TSC rollover into account.
1507 */
1508 int iEndCpu = RTMpGetArraySize();
1509 int iCpu;
1510 int cLoops = 8;
1511 bool fAsync = false;
1512 int rc = VINF_SUCCESS;
1513 uint64_t offMax = 0;
1514 uint64_t offMin = ~(uint64_t)0;
1515 uint64_t PrevTsc = ASMReadTSC();
1516
1517 while (cLoops-- > 0)
1518 {
1519 for (iCpu = 0; iCpu < iEndCpu; iCpu++)
1520 {
1521 uint64_t CurTsc;
1522 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvGipInitDetermineAsyncTscWorker,
1523 &CurTsc, (void *)(uintptr_t)iCpu);
1524 if (RT_SUCCESS(rc))
1525 {
1526 if (CurTsc <= PrevTsc)
1527 {
1528 fAsync = true;
1529 offMin = offMax = PrevTsc - CurTsc;
1530 Log(("supdrvGipInitDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
1531 iCpu, cLoops, CurTsc, PrevTsc));
1532 break;
1533 }
1534
1535 /* Gather statistics (except the first time). */
1536 if (iCpu != 0 || cLoops != 7)
1537 {
1538 uint64_t off = CurTsc - PrevTsc;
1539 if (off < offMin)
1540 offMin = off;
1541 if (off > offMax)
1542 offMax = off;
1543 Log2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
1544 }
1545
1546 /* Next */
1547 PrevTsc = CurTsc;
1548 }
1549 else if (rc == VERR_NOT_SUPPORTED)
1550 break;
1551 else
1552 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
1553 }
1554
1555 /* broke out of the loop. */
1556 if (iCpu < iEndCpu)
1557 break;
1558 }
1559
1560 if (poffMin)
1561 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
1562 Log(("supdrvGipInitDetermineAsyncTsc: returns %d; iEndCpu=%d rc=%d offMin=%llx offMax=%llx\n",
1563 fAsync, iEndCpu, rc, offMin, offMax));
1564#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
1565 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
1566#endif
1567 return fAsync;
1568}
1569
1570
1571/**
1572 * supdrvGipInit() worker that determines the GIP TSC mode.
1573 *
1574 * @returns The most suitable TSC mode.
1575 * @param pDevExt Pointer to the device instance data.
1576 */
1577static SUPGIPMODE supdrvGipInitDetermineTscMode(PSUPDRVDEVEXT pDevExt)
1578{
1579 uint64_t u64DiffCoresIgnored;
1580 uint32_t uEAX, uEBX, uECX, uEDX;
1581
1582 /*
1583 * Establish whether the CPU advertises TSC as invariant, we need that in
1584 * a couple of places below.
1585 */
1586 bool fInvariantTsc = false;
1587 if (ASMHasCpuId())
1588 {
1589 uEAX = ASMCpuId_EAX(0x80000000);
1590 if (ASMIsValidExtRange(uEAX) && uEAX >= 0x80000007)
1591 {
1592 uEDX = ASMCpuId_EDX(0x80000007);
1593 if (uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR)
1594 fInvariantTsc = true;
1595 }
1596 }
1597
1598 /*
1599 * On single CPU systems, we don't need to consider ASYNC mode.
1600 */
1601 if (RTMpGetCount() <= 1)
1602 return fInvariantTsc ? SUPGIPMODE_INVARIANT_TSC : SUPGIPMODE_SYNC_TSC;
1603
1604 /*
1605 * Allow the user and/or OS specific bits to force async mode.
1606 */
1607 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
1608 return SUPGIPMODE_ASYNC_TSC;
1609
1610 /*
1611 * Use invariant mode if the CPU says TSC is invariant.
1612 */
1613 if (fInvariantTsc)
1614 return SUPGIPMODE_INVARIANT_TSC;
1615
1616 /*
1617 * TSC is not invariant and we're on SMP, this presents two problems:
1618 *
1619 * (1) There might be a skew between the CPU, so that cpu0
1620 * returns a TSC that is slightly different from cpu1.
1621 * This screw may be due to (2), bad TSC initialization
1622 * or slightly different TSC rates.
1623 *
1624 * (2) Power management (and other things) may cause the TSC
1625 * to run at a non-constant speed, and cause the speed
1626 * to be different on the cpus. This will result in (1).
1627 *
1628 * If any of the above is detected, we will have to use ASYNC mode.
1629 */
1630 /* (1). Try check for current differences between the cpus. */
1631 if (supdrvGipInitDetermineAsyncTsc(&u64DiffCoresIgnored))
1632 return SUPGIPMODE_ASYNC_TSC;
1633
1634 /* (2) If it's an AMD CPU with power management, we won't trust its TSC. */
1635 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1636 if ( ASMIsValidStdRange(uEAX)
1637 && ASMIsAmdCpuEx(uEBX, uECX, uEDX))
1638 {
1639 /* Check for APM support. */
1640 uEAX = ASMCpuId_EAX(0x80000000);
1641 if (ASMIsValidExtRange(uEAX) && uEAX >= 0x80000007)
1642 {
1643 uEDX = ASMCpuId_EDX(0x80000007);
1644 if (uEDX & 0x3e) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
1645 return SUPGIPMODE_ASYNC_TSC;
1646 }
1647 }
1648
1649 return SUPGIPMODE_SYNC_TSC;
1650}
1651
1652
1653/**
1654 * Initializes per-CPU GIP information.
1655 *
1656 * @param pGip Pointer to the GIP.
1657 * @param pCpu Pointer to which GIP CPU to initalize.
1658 * @param u64NanoTS The current nanosecond timestamp.
1659 * @param uCpuHz The CPU frequency to set, 0 if the caller doesn't know.
1660 */
1661static void supdrvGipInitCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pCpu, uint64_t u64NanoTS, uint64_t uCpuHz)
1662{
1663 pCpu->u32TransactionId = 2;
1664 pCpu->u64NanoTS = u64NanoTS;
1665 pCpu->u64TSC = ASMReadTSC();
1666 pCpu->u64TSCSample = GIP_TSC_DELTA_RSVD;
1667 pCpu->i64TSCDelta = pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED ? INT64_MAX : 0;
1668
1669 ASMAtomicWriteSize(&pCpu->enmState, SUPGIPCPUSTATE_INVALID);
1670 ASMAtomicWriteSize(&pCpu->idCpu, NIL_RTCPUID);
1671 ASMAtomicWriteS16(&pCpu->iCpuSet, -1);
1672 ASMAtomicWriteU16(&pCpu->idApic, UINT16_MAX);
1673
1674 /*
1675 * The first time we're called, we don't have a CPU frequency handy,
1676 * so pretend it's a 4 GHz CPU. On CPUs that are online, we'll get
1677 * called again and at that point we have a more plausible CPU frequency
1678 * value handy. The frequency history will also be adjusted again on
1679 * the 2nd timer callout (maybe we can skip that now?).
1680 */
1681 if (!uCpuHz)
1682 {
1683 pCpu->u64CpuHz = _4G - 1;
1684 pCpu->u32UpdateIntervalTSC = (uint32_t)((_4G - 1) / pGip->u32UpdateHz);
1685 }
1686 else
1687 {
1688 pCpu->u64CpuHz = uCpuHz;
1689 pCpu->u32UpdateIntervalTSC = (uint32_t)(uCpuHz / pGip->u32UpdateHz);
1690 }
1691 pCpu->au32TSCHistory[0]
1692 = pCpu->au32TSCHistory[1]
1693 = pCpu->au32TSCHistory[2]
1694 = pCpu->au32TSCHistory[3]
1695 = pCpu->au32TSCHistory[4]
1696 = pCpu->au32TSCHistory[5]
1697 = pCpu->au32TSCHistory[6]
1698 = pCpu->au32TSCHistory[7]
1699 = pCpu->u32UpdateIntervalTSC;
1700}
1701
1702
1703/**
1704 * Initializes the GIP data.
1705 *
1706 * @param pDevExt Pointer to the device instance data.
1707 * @param pGip Pointer to the read-write kernel mapping of the GIP.
1708 * @param HCPhys The physical address of the GIP.
1709 * @param u64NanoTS The current nanosecond timestamp.
1710 * @param uUpdateHz The update frequency.
1711 * @param uUpdateIntervalNS The update interval in nanoseconds.
1712 * @param cCpus The CPU count.
1713 */
1714static void supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys,
1715 uint64_t u64NanoTS, unsigned uUpdateHz, unsigned uUpdateIntervalNS, unsigned cCpus)
1716{
1717 size_t const cbGip = RT_ALIGN_Z(RT_OFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]), PAGE_SIZE);
1718 unsigned i;
1719#ifdef DEBUG_DARWIN_GIP
1720 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d cCpus=%u\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz, cCpus));
1721#else
1722 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d cCpus=%u\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz, cCpus));
1723#endif
1724
1725 /*
1726 * Initialize the structure.
1727 */
1728 memset(pGip, 0, cbGip);
1729
1730 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
1731 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
1732 pGip->u32Mode = supdrvGipInitDetermineTscMode(pDevExt);
1733 if ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC
1734 /*|| pGip->u32Mode == SUPGIPMODE_SYNC_TSC */)
1735 pGip->enmUseTscDelta = supdrvOSAreTscDeltasInSync() /* Allow OS override (windows). */
1736 ? SUPGIPUSETSCDELTA_ZERO_CLAIMED : SUPGIPUSETSCDELTA_PRACTICALLY_ZERO /* downgrade later */;
1737 else
1738 pGip->enmUseTscDelta = SUPGIPUSETSCDELTA_NOT_APPLICABLE;
1739 pGip->cCpus = (uint16_t)cCpus;
1740 pGip->cPages = (uint16_t)(cbGip / PAGE_SIZE);
1741 pGip->u32UpdateHz = uUpdateHz;
1742 pGip->u32UpdateIntervalNS = uUpdateIntervalNS;
1743 pGip->fGetGipCpu = SUPGIPGETCPU_APIC_ID;
1744 RTCpuSetEmpty(&pGip->OnlineCpuSet);
1745 RTCpuSetEmpty(&pGip->PresentCpuSet);
1746 RTMpGetSet(&pGip->PossibleCpuSet);
1747 pGip->cOnlineCpus = RTMpGetOnlineCount();
1748 pGip->cPresentCpus = RTMpGetPresentCount();
1749 pGip->cPossibleCpus = RTMpGetCount();
1750 pGip->idCpuMax = RTMpGetMaxCpuId();
1751 for (i = 0; i < RT_ELEMENTS(pGip->aiCpuFromApicId); i++)
1752 pGip->aiCpuFromApicId[i] = UINT16_MAX;
1753 for (i = 0; i < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx); i++)
1754 pGip->aiCpuFromCpuSetIdx[i] = UINT16_MAX;
1755 for (i = 0; i < cCpus; i++)
1756 supdrvGipInitCpu(pGip, &pGip->aCPUs[i], u64NanoTS, 0 /*uCpuHz*/);
1757
1758 /*
1759 * Link it to the device extension.
1760 */
1761 pDevExt->pGip = pGip;
1762 pDevExt->HCPhysGip = HCPhys;
1763 pDevExt->cGipUsers = 0;
1764}
1765
1766
1767/**
1768 * Creates the GIP.
1769 *
1770 * @returns VBox status code.
1771 * @param pDevExt Instance data. GIP stuff may be updated.
1772 */
1773int VBOXCALL supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
1774{
1775 PSUPGLOBALINFOPAGE pGip;
1776 RTHCPHYS HCPhysGip;
1777 uint32_t u32SystemResolution;
1778 uint32_t u32Interval;
1779 uint32_t u32MinInterval;
1780 uint32_t uMod;
1781 unsigned cCpus;
1782 int rc;
1783
1784 LogFlow(("supdrvGipCreate:\n"));
1785
1786 /*
1787 * Assert order.
1788 */
1789 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
1790 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
1791 Assert(!pDevExt->pGipTimer);
1792#ifdef SUPDRV_USE_MUTEX_FOR_GIP
1793 Assert(pDevExt->mtxGip != NIL_RTSEMMUTEX);
1794 Assert(pDevExt->mtxTscDelta != NIL_RTSEMMUTEX);
1795#else
1796 Assert(pDevExt->mtxGip != NIL_RTSEMFASTMUTEX);
1797 Assert(pDevExt->mtxTscDelta != NIL_RTSEMFASTMUTEX);
1798#endif
1799
1800 /*
1801 * Check the CPU count.
1802 */
1803 cCpus = RTMpGetArraySize();
1804 if ( cCpus > RTCPUSET_MAX_CPUS
1805 || cCpus > 256 /* ApicId is used for the mappings */)
1806 {
1807 SUPR0Printf("VBoxDrv: Too many CPUs (%u) for the GIP (max %u)\n", cCpus, RT_MIN(RTCPUSET_MAX_CPUS, 256));
1808 return VERR_TOO_MANY_CPUS;
1809 }
1810
1811 /*
1812 * Allocate a contiguous set of pages with a default kernel mapping.
1813 */
1814 rc = RTR0MemObjAllocCont(&pDevExt->GipMemObj, RT_UOFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]), false /*fExecutable*/);
1815 if (RT_FAILURE(rc))
1816 {
1817 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
1818 return rc;
1819 }
1820 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
1821 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
1822
1823 /*
1824 * Find a reasonable update interval and initialize the structure.
1825 */
1826 supdrvGipRequestHigherTimerFrequencyFromSystem(pDevExt);
1827 /** @todo figure out why using a 100Ms interval upsets timekeeping in VMs.
1828 * See @bugref{6710}. */
1829 u32MinInterval = RT_NS_10MS;
1830 u32SystemResolution = RTTimerGetSystemGranularity();
1831 u32Interval = u32MinInterval;
1832 uMod = u32MinInterval % u32SystemResolution;
1833 if (uMod)
1834 u32Interval += u32SystemResolution - uMod;
1835
1836 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), RT_NS_1SEC / u32Interval /*=Hz*/, u32Interval, cCpus);
1837
1838 /*
1839 * Important sanity check...
1840 */
1841 if (RT_UNLIKELY( pGip->enmUseTscDelta == SUPGIPUSETSCDELTA_ZERO_CLAIMED
1842 && pGip->u32Mode == SUPGIPMODE_ASYNC_TSC
1843 && !supdrvOSGetForcedAsyncTscMode(pDevExt)))
1844 {
1845 OSDBGPRINT(("supdrvGipCreate: Host-OS/user claims the TSC-deltas are zero but we detected async. TSC! Bad.\n"));
1846 return VERR_INTERNAL_ERROR_2;
1847 }
1848
1849 /* It doesn't make sense to do TSC-delta detection on systems we detect as async. */
1850 AssertReturn( pGip->u32Mode != SUPGIPMODE_ASYNC_TSC
1851 || pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED, VERR_INTERNAL_ERROR_3);
1852
1853 /*
1854 * Do the TSC frequency measurements.
1855 *
1856 * If we're in invariant TSC mode, just to a quick preliminary measurement
1857 * that the TSC-delta measurement code can use to yield cross calls.
1858 *
1859 * If we're in any of the other two modes, neither which require MP init,
1860 * notifications or deltas for the job, do the full measurement now so
1861 * that supdrvGipInitOnCpu() can populate the TSC interval and history
1862 * array with more reasonable values.
1863 */
1864 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)
1865 {
1866 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, true /*fRough*/); /* cannot fail */
1867 supdrvGipInitStartTimerForRefiningInvariantTscFreq(pDevExt, pGip);
1868 }
1869 else
1870 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, false /*fRough*/);
1871 if (RT_SUCCESS(rc))
1872 {
1873 /*
1874 * Start TSC-delta measurement thread before we start getting MP
1875 * events that will try kick it into action (includes the
1876 * RTMpOnAll/supdrvGipInitOnCpu call below).
1877 */
1878 RTCpuSetEmpty(&pDevExt->TscDeltaCpuSet);
1879 RTCpuSetEmpty(&pDevExt->TscDeltaObtainedCpuSet);
1880#ifdef SUPDRV_USE_TSC_DELTA_THREAD
1881 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
1882 rc = supdrvTscDeltaThreadInit(pDevExt);
1883#endif
1884 if (RT_SUCCESS(rc))
1885 {
1886 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
1887 if (RT_SUCCESS(rc))
1888 {
1889 /*
1890 * Do GIP initialization on all online CPUs. Wake up the
1891 * TSC-delta thread afterwards.
1892 */
1893 rc = RTMpOnAll(supdrvGipInitOnCpu, pDevExt, pGip);
1894 if (RT_SUCCESS(rc))
1895 {
1896#ifdef SUPDRV_USE_TSC_DELTA_THREAD
1897 supdrvTscDeltaThreadStartMeasurement(pDevExt, true /* fForceAll */);
1898#else
1899 uint16_t iCpu;
1900 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
1901 {
1902 /*
1903 * Measure the TSC deltas now that we have MP notifications.
1904 */
1905 int cTries = 5;
1906 do
1907 {
1908 rc = supdrvMeasureInitialTscDeltas(pDevExt);
1909 if ( rc != VERR_TRY_AGAIN
1910 && rc != VERR_CPU_OFFLINE)
1911 break;
1912 } while (--cTries > 0);
1913 for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
1914 Log(("supdrvTscDeltaInit: cpu[%u] delta %lld\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta));
1915 }
1916 else
1917 {
1918 for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
1919 AssertMsg(!pGip->aCPUs[iCpu].i64TSCDelta, ("iCpu=%u %lld mode=%d\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta, pGip->u32Mode));
1920 }
1921 if (RT_SUCCESS(rc))
1922#endif
1923 {
1924 /*
1925 * Create the timer.
1926 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
1927 */
1928 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
1929 {
1930 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL,
1931 supdrvGipAsyncTimer, pDevExt);
1932 if (rc == VERR_NOT_SUPPORTED)
1933 {
1934 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
1935 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
1936 }
1937 }
1938 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
1939 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0 /* fFlags */,
1940 supdrvGipSyncAndInvariantTimer, pDevExt);
1941 if (RT_SUCCESS(rc))
1942 {
1943 /*
1944 * We're good.
1945 */
1946 Log(("supdrvGipCreate: %u ns interval.\n", u32Interval));
1947 supdrvGipReleaseHigherTimerFrequencyFromSystem(pDevExt);
1948
1949 g_pSUPGlobalInfoPage = pGip;
1950 return VINF_SUCCESS;
1951 }
1952
1953 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %u ns interval. rc=%Rrc\n", u32Interval, rc));
1954 Assert(!pDevExt->pGipTimer);
1955 }
1956 }
1957 else
1958 OSDBGPRINT(("supdrvGipCreate: RTMpOnAll failed. rc=%Rrc\n", rc));
1959 }
1960 else
1961 OSDBGPRINT(("supdrvGipCreate: failed to register MP event notfication. rc=%Rrc\n", rc));
1962 }
1963 else
1964 OSDBGPRINT(("supdrvGipCreate: supdrvTscDeltaInit failed. rc=%Rrc\n", rc));
1965 }
1966 else
1967 OSDBGPRINT(("supdrvGipCreate: supdrvMeasureInitialTscDeltas failed. rc=%Rrc\n", rc));
1968
1969 /* Releases timer frequency increase too. */
1970 supdrvGipDestroy(pDevExt);
1971 return rc;
1972}
1973
1974
1975/**
1976 * Invalidates the GIP data upon termination.
1977 *
1978 * @param pGip Pointer to the read-write kernel mapping of the GIP.
1979 */
1980static void supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
1981{
1982 unsigned i;
1983 pGip->u32Magic = 0;
1984 for (i = 0; i < pGip->cCpus; i++)
1985 {
1986 pGip->aCPUs[i].u64NanoTS = 0;
1987 pGip->aCPUs[i].u64TSC = 0;
1988 pGip->aCPUs[i].iTSCHistoryHead = 0;
1989 pGip->aCPUs[i].u64TSCSample = 0;
1990 pGip->aCPUs[i].i64TSCDelta = INT64_MAX;
1991 }
1992}
1993
1994
1995/**
1996 * Terminates the GIP.
1997 *
1998 * @param pDevExt Instance data. GIP stuff may be updated.
1999 */
2000void VBOXCALL supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
2001{
2002 int rc;
2003#ifdef DEBUG_DARWIN_GIP
2004 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
2005 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
2006 pDevExt->pGipTimer, pDevExt->GipMemObj));
2007#endif
2008
2009 /*
2010 * Stop receiving MP notifications before tearing anything else down.
2011 */
2012 RTMpNotificationDeregister(supdrvGipMpEvent, pDevExt);
2013
2014#ifdef SUPDRV_USE_TSC_DELTA_THREAD
2015 /*
2016 * Terminate the TSC-delta measurement thread and resources.
2017 */
2018 supdrvTscDeltaTerm(pDevExt);
2019#endif
2020
2021 /*
2022 * Destroy the TSC-refinement timer.
2023 */
2024 if (pDevExt->pInvarTscRefineTimer)
2025 {
2026 RTTimerDestroy(pDevExt->pInvarTscRefineTimer);
2027 pDevExt->pInvarTscRefineTimer = NULL;
2028 }
2029
2030 /*
2031 * Invalid the GIP data.
2032 */
2033 if (pDevExt->pGip)
2034 {
2035 supdrvGipTerm(pDevExt->pGip);
2036 pDevExt->pGip = NULL;
2037 }
2038 g_pSUPGlobalInfoPage = NULL;
2039
2040 /*
2041 * Destroy the timer and free the GIP memory object.
2042 */
2043 if (pDevExt->pGipTimer)
2044 {
2045 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
2046 pDevExt->pGipTimer = NULL;
2047 }
2048
2049 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
2050 {
2051 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
2052 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
2053 }
2054
2055 /*
2056 * Finally, make sure we've release the system timer resolution request
2057 * if one actually succeeded and is still pending.
2058 */
2059 supdrvGipReleaseHigherTimerFrequencyFromSystem(pDevExt);
2060}
2061
2062
2063
2064
2065/*
2066 *
2067 *
2068 * GIP Update Timer Related Code
2069 * GIP Update Timer Related Code
2070 * GIP Update Timer Related Code
2071 *
2072 *
2073 */
2074
2075
2076/**
2077 * Worker routine for supdrvGipUpdate() and supdrvGipUpdatePerCpu() that
2078 * updates all the per cpu data except the transaction id.
2079 *
2080 * @param pDevExt The device extension.
2081 * @param pGipCpu Pointer to the per cpu data.
2082 * @param u64NanoTS The current time stamp.
2083 * @param u64TSC The current TSC.
2084 * @param iTick The current timer tick.
2085 *
2086 * @remarks Can be called with interrupts disabled!
2087 */
2088static void supdrvGipDoUpdateCpu(PSUPDRVDEVEXT pDevExt, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS, uint64_t u64TSC, uint64_t iTick)
2089{
2090 uint64_t u64TSCDelta;
2091 uint32_t u32UpdateIntervalTSC;
2092 uint32_t u32UpdateIntervalTSCSlack;
2093 unsigned iTSCHistoryHead;
2094 uint64_t u64CpuHz;
2095 uint32_t u32TransactionId;
2096
2097 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2098 AssertPtrReturnVoid(pGip);
2099
2100 /* Delta between this and the previous update. */
2101 ASMAtomicUoWriteU32(&pGipCpu->u32PrevUpdateIntervalNS, (uint32_t)(u64NanoTS - pGipCpu->u64NanoTS));
2102
2103 /*
2104 * Update the NanoTS.
2105 */
2106 ASMAtomicWriteU64(&pGipCpu->u64NanoTS, u64NanoTS);
2107
2108 /*
2109 * Calc TSC delta.
2110 */
2111 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
2112 ASMAtomicWriteU64(&pGipCpu->u64TSC, u64TSC);
2113
2114 /*
2115 * We don't need to keep realculating the frequency when it's invariant, so
2116 * the remainder of this function is only for the sync and async TSC modes.
2117 */
2118 if (pGip->u32Mode != SUPGIPMODE_INVARIANT_TSC)
2119 {
2120 if (u64TSCDelta >> 32)
2121 {
2122 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
2123 pGipCpu->cErrors++;
2124 }
2125
2126 /*
2127 * On the 2nd and 3rd callout, reset the history with the current TSC
2128 * interval since the values entered by supdrvGipInit are totally off.
2129 * The interval on the 1st callout completely unreliable, the 2nd is a bit
2130 * better, while the 3rd should be most reliable.
2131 */
2132 /** @todo Could we drop this now that we initializes the history
2133 * with nominal TSC frequency values? */
2134 u32TransactionId = pGipCpu->u32TransactionId;
2135 if (RT_UNLIKELY( ( u32TransactionId == 5
2136 || u32TransactionId == 7)
2137 && ( iTick == 2
2138 || iTick == 3) ))
2139 {
2140 unsigned i;
2141 for (i = 0; i < RT_ELEMENTS(pGipCpu->au32TSCHistory); i++)
2142 ASMAtomicUoWriteU32(&pGipCpu->au32TSCHistory[i], (uint32_t)u64TSCDelta);
2143 }
2144
2145 /*
2146 * Validate the NanoTS deltas between timer fires with an arbitrary threshold of 0.5%.
2147 * Wait until we have at least one full history since the above history reset. The
2148 * assumption is that the majority of the previous history values will be tolerable.
2149 * See @bugref{6710} comment #67.
2150 */
2151 /** @todo Could we drop the fuding there now that we initializes the history
2152 * with nominal TSC frequency values? */
2153 if ( u32TransactionId > 23 /* 7 + (8 * 2) */
2154 && pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
2155 {
2156 uint32_t uNanoTsThreshold = pGip->u32UpdateIntervalNS / 200;
2157 if ( pGipCpu->u32PrevUpdateIntervalNS > pGip->u32UpdateIntervalNS + uNanoTsThreshold
2158 || pGipCpu->u32PrevUpdateIntervalNS < pGip->u32UpdateIntervalNS - uNanoTsThreshold)
2159 {
2160 uint32_t u32;
2161 u32 = pGipCpu->au32TSCHistory[0];
2162 u32 += pGipCpu->au32TSCHistory[1];
2163 u32 += pGipCpu->au32TSCHistory[2];
2164 u32 += pGipCpu->au32TSCHistory[3];
2165 u32 >>= 2;
2166 u64TSCDelta = pGipCpu->au32TSCHistory[4];
2167 u64TSCDelta += pGipCpu->au32TSCHistory[5];
2168 u64TSCDelta += pGipCpu->au32TSCHistory[6];
2169 u64TSCDelta += pGipCpu->au32TSCHistory[7];
2170 u64TSCDelta >>= 2;
2171 u64TSCDelta += u32;
2172 u64TSCDelta >>= 1;
2173 }
2174 }
2175
2176 /*
2177 * TSC History.
2178 */
2179 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
2180 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
2181 ASMAtomicWriteU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
2182 ASMAtomicWriteU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
2183
2184 /*
2185 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
2186 *
2187 * On Windows, we have an occasional (but recurring) sour value that messed up
2188 * the history but taking only 1 interval reduces the precision overall.
2189 */
2190 if ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC
2191 || pGip->u32UpdateHz >= 1000)
2192 {
2193 uint32_t u32;
2194 u32 = pGipCpu->au32TSCHistory[0];
2195 u32 += pGipCpu->au32TSCHistory[1];
2196 u32 += pGipCpu->au32TSCHistory[2];
2197 u32 += pGipCpu->au32TSCHistory[3];
2198 u32 >>= 2;
2199 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
2200 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
2201 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
2202 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
2203 u32UpdateIntervalTSC >>= 2;
2204 u32UpdateIntervalTSC += u32;
2205 u32UpdateIntervalTSC >>= 1;
2206
2207 /* Value chosen for a 2GHz Athlon64 running linux 2.6.10/11. */
2208 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
2209 }
2210 else if (pGip->u32UpdateHz >= 90)
2211 {
2212 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
2213 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
2214 u32UpdateIntervalTSC >>= 1;
2215
2216 /* value chosen on a 2GHz thinkpad running windows */
2217 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
2218 }
2219 else
2220 {
2221 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
2222
2223 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
2224 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
2225 }
2226 ASMAtomicWriteU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
2227
2228 /*
2229 * CpuHz.
2230 */
2231 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, RT_NS_1SEC);
2232 u64CpuHz /= pGip->u32UpdateIntervalNS;
2233 ASMAtomicWriteU64(&pGipCpu->u64CpuHz, u64CpuHz);
2234 }
2235}
2236
2237
2238/**
2239 * Updates the GIP.
2240 *
2241 * @param pDevExt The device extension.
2242 * @param u64NanoTS The current nanosecond timesamp.
2243 * @param u64TSC The current TSC timesamp.
2244 * @param idCpu The CPU ID.
2245 * @param iTick The current timer tick.
2246 *
2247 * @remarks Can be called with interrupts disabled!
2248 */
2249static void supdrvGipUpdate(PSUPDRVDEVEXT pDevExt, uint64_t u64NanoTS, uint64_t u64TSC, RTCPUID idCpu, uint64_t iTick)
2250{
2251 /*
2252 * Determine the relevant CPU data.
2253 */
2254 PSUPGIPCPU pGipCpu;
2255 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2256 AssertPtrReturnVoid(pGip);
2257
2258 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
2259 pGipCpu = &pGip->aCPUs[0];
2260 else
2261 {
2262 unsigned iCpu = pGip->aiCpuFromApicId[ASMGetApicId()];
2263 if (RT_UNLIKELY(iCpu >= pGip->cCpus))
2264 return;
2265 pGipCpu = &pGip->aCPUs[iCpu];
2266 if (RT_UNLIKELY(pGipCpu->idCpu != idCpu))
2267 return;
2268 }
2269
2270 /*
2271 * Start update transaction.
2272 */
2273 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
2274 {
2275 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
2276 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
2277 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
2278 pGipCpu->cErrors++;
2279 return;
2280 }
2281
2282 /*
2283 * Recalc the update frequency every 0x800th time.
2284 */
2285 if ( pGip->u32Mode != SUPGIPMODE_INVARIANT_TSC /* cuz we're not recalculating the frequency on invariants hosts. */
2286 && !(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
2287 {
2288 if (pGip->u64NanoTSLastUpdateHz)
2289 {
2290#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
2291 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
2292 uint32_t u32UpdateHz = (uint32_t)((RT_NS_1SEC_64 * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
2293 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
2294 {
2295 /** @todo r=ramshankar: Changing u32UpdateHz might screw up TSC frequency
2296 * calculation on non-invariant hosts if it changes the history decision
2297 * taken in supdrvGipDoUpdateCpu(). */
2298 uint64_t u64Interval = u64Delta / GIP_UPDATEHZ_RECALC_FREQ;
2299 ASMAtomicWriteU32(&pGip->u32UpdateHz, u32UpdateHz);
2300 ASMAtomicWriteU32(&pGip->u32UpdateIntervalNS, (uint32_t)u64Interval);
2301 }
2302#endif
2303 }
2304 ASMAtomicWriteU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS | 1);
2305 }
2306
2307 /*
2308 * Update the data.
2309 */
2310 supdrvGipDoUpdateCpu(pDevExt, pGipCpu, u64NanoTS, u64TSC, iTick);
2311
2312 /*
2313 * Complete transaction.
2314 */
2315 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
2316}
2317
2318
2319/**
2320 * Updates the per cpu GIP data for the calling cpu.
2321 *
2322 * @param pDevExt The device extension.
2323 * @param u64NanoTS The current nanosecond timesamp.
2324 * @param u64TSC The current TSC timesamp.
2325 * @param idCpu The CPU ID.
2326 * @param idApic The APIC id for the CPU index.
2327 * @param iTick The current timer tick.
2328 *
2329 * @remarks Can be called with interrupts disabled!
2330 */
2331static void supdrvGipUpdatePerCpu(PSUPDRVDEVEXT pDevExt, uint64_t u64NanoTS, uint64_t u64TSC,
2332 RTCPUID idCpu, uint8_t idApic, uint64_t iTick)
2333{
2334 uint32_t iCpu;
2335 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2336
2337 /*
2338 * Avoid a potential race when a CPU online notification doesn't fire on
2339 * the onlined CPU but the tick creeps in before the event notification is
2340 * run.
2341 */
2342 if (RT_UNLIKELY(iTick == 1))
2343 {
2344 iCpu = supdrvGipFindOrAllocCpuIndexForCpuId(pGip, idCpu);
2345 if (pGip->aCPUs[iCpu].enmState == SUPGIPCPUSTATE_OFFLINE)
2346 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu);
2347 }
2348
2349 iCpu = pGip->aiCpuFromApicId[idApic];
2350 if (RT_LIKELY(iCpu < pGip->cCpus))
2351 {
2352 PSUPGIPCPU pGipCpu = &pGip->aCPUs[iCpu];
2353 if (pGipCpu->idCpu == idCpu)
2354 {
2355 /*
2356 * Start update transaction.
2357 */
2358 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
2359 {
2360 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
2361 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
2362 pGipCpu->cErrors++;
2363 return;
2364 }
2365
2366 /*
2367 * Update the data.
2368 */
2369 supdrvGipDoUpdateCpu(pDevExt, pGipCpu, u64NanoTS, u64TSC, iTick);
2370
2371 /*
2372 * Complete transaction.
2373 */
2374 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
2375 }
2376 }
2377}
2378
2379
2380/**
2381 * Timer callback function for the sync and invariant GIP modes.
2382 *
2383 * @param pTimer The timer.
2384 * @param pvUser Opaque pointer to the device extension.
2385 * @param iTick The timer tick.
2386 */
2387static DECLCALLBACK(void) supdrvGipSyncAndInvariantTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
2388{
2389 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
2390 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2391 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
2392 uint64_t u64TSC = ASMReadTSC();
2393 uint64_t u64NanoTS = RTTimeSystemNanoTS();
2394
2395 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO)
2396 {
2397 /*
2398 * The calculations in supdrvGipUpdate() is somewhat timing sensitive,
2399 * missing timer ticks is not an option for GIP because the GIP users
2400 * will end up incrementing the time in 1ns per time getter call until
2401 * there is a complete timer update. So, if the delta has yet to be
2402 * calculated, we just pretend it is zero for now (the GIP users
2403 * probably won't have it for a wee while either and will do the same).
2404 *
2405 * We could maybe on some platforms try cross calling a CPU with a
2406 * working delta here, but it's not worth the hassle since the
2407 * likelyhood of this happening is really low. On Windows, Linux, and
2408 * Solaris timers fire on the CPU they were registered/started on.
2409 * Darwin timers doesn't necessarily (they are high priority threads).
2410 */
2411 uint32_t iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
2412 uint16_t iGipCpu = RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))
2413 ? pGip->aiCpuFromCpuSetIdx[iCpuSet] : UINT16_MAX;
2414 Assert(!ASMIntAreEnabled());
2415 if (RT_LIKELY(iGipCpu < pGip->cCpus))
2416 {
2417 int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta;
2418 if (iTscDelta != INT64_MAX)
2419 u64TSC -= iTscDelta;
2420 }
2421 }
2422
2423 supdrvGipUpdate(pDevExt, u64NanoTS, u64TSC, NIL_RTCPUID, iTick);
2424
2425 ASMSetFlags(fOldFlags);
2426}
2427
2428
2429/**
2430 * Timer callback function for async GIP mode.
2431 * @param pTimer The timer.
2432 * @param pvUser Opaque pointer to the device extension.
2433 * @param iTick The timer tick.
2434 */
2435static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
2436{
2437 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
2438 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
2439 RTCPUID idCpu = RTMpCpuId();
2440 uint64_t u64TSC = ASMReadTSC();
2441 uint64_t NanoTS = RTTimeSystemNanoTS();
2442
2443 /** @todo reset the transaction number and whatnot when iTick == 1. */
2444 if (pDevExt->idGipMaster == idCpu)
2445 supdrvGipUpdate(pDevExt, NanoTS, u64TSC, idCpu, iTick);
2446 else
2447 supdrvGipUpdatePerCpu(pDevExt, NanoTS, u64TSC, idCpu, ASMGetApicId(), iTick);
2448
2449 ASMSetFlags(fOldFlags);
2450}
2451
2452
2453
2454
2455/*
2456 *
2457 *
2458 * TSC Delta Measurements And Related Code
2459 * TSC Delta Measurements And Related Code
2460 * TSC Delta Measurements And Related Code
2461 *
2462 *
2463 */
2464
2465
2466/*
2467 * Select TSC delta measurement algorithm.
2468 */
2469#if 0
2470# define GIP_TSC_DELTA_METHOD_1
2471#else
2472# define GIP_TSC_DELTA_METHOD_2
2473#endif
2474
2475/** For padding variables to keep them away from other cache lines. Better too
2476 * large than too small!
2477 * @remarks Current AMD64 and x86 CPUs seems to use 64 bytes. There are claims
2478 * that NetBurst had 128 byte cache lines while the 486 thru Pentium
2479 * III had 32 bytes cache lines. */
2480#define GIP_TSC_DELTA_CACHE_LINE_SIZE 128
2481
2482
2483/**
2484 * TSC delta measurment algorithm \#2 result entry.
2485 */
2486typedef struct SUPDRVTSCDELTAMETHOD2ENTRY
2487{
2488 uint32_t iSeqMine;
2489 uint32_t iSeqOther;
2490 uint64_t uTsc;
2491} SUPDRVTSCDELTAMETHOD2ENTRY;
2492
2493/**
2494 * TSC delta measurment algorithm \#2 Data.
2495 */
2496typedef struct SUPDRVTSCDELTAMETHOD2
2497{
2498 /** Padding to make sure the iCurSeqNo is in its own cache line. */
2499 uint64_t au64CacheLinePaddingBefore[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t)];
2500 /** The current sequence number of this worker. */
2501 uint32_t volatile iCurSeqNo;
2502 /** Padding to make sure the iCurSeqNo is in its own cache line. */
2503 uint32_t au64CacheLinePaddingAfter[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint32_t) - 1];
2504 /** Result table. */
2505 SUPDRVTSCDELTAMETHOD2ENTRY aResults[64];
2506} SUPDRVTSCDELTAMETHOD2;
2507/** Pointer to the data for TSC delta mesurment algorithm \#2 .*/
2508typedef SUPDRVTSCDELTAMETHOD2 *PSUPDRVTSCDELTAMETHOD2;
2509
2510
2511/**
2512 * The TSC delta synchronization struct, version 2.
2513 *
2514 * The syncrhonization variable is completely isolated in its own cache line
2515 * (provided our max cache line size estimate is correct).
2516 */
2517typedef struct SUPTSCDELTASYNC2
2518{
2519 /** Padding to make sure the uVar1 is in its own cache line. */
2520 uint64_t au64CacheLinePaddingBefore[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t)];
2521
2522 /** The synchronization variable, holds values GIP_TSC_DELTA_SYNC_*. */
2523 volatile uint32_t uSyncVar;
2524 /** Sequence synchronizing variable used for post 'GO' synchronization. */
2525 volatile uint32_t uSyncSeq;
2526
2527 /** Padding to make sure the uVar1 is in its own cache line. */
2528 uint64_t au64CacheLinePaddingAfter[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t) - 2];
2529
2530 /** Start RDTSC value. Put here mainly to save stack space. */
2531 uint64_t uTscStart;
2532 /** Copy of SUPDRVGIPTSCDELTARGS::cMaxTscTicks. */
2533 uint64_t cMaxTscTicks;
2534} SUPTSCDELTASYNC2;
2535AssertCompileSize(SUPTSCDELTASYNC2, GIP_TSC_DELTA_CACHE_LINE_SIZE * 2 + sizeof(uint64_t));
2536typedef SUPTSCDELTASYNC2 *PSUPTSCDELTASYNC2;
2537
2538/** Prestart wait. */
2539#define GIP_TSC_DELTA_SYNC2_PRESTART_WAIT UINT32_C(0x0ffe)
2540/** Prestart aborted. */
2541#define GIP_TSC_DELTA_SYNC2_PRESTART_ABORT UINT32_C(0x0fff)
2542/** Ready (on your mark). */
2543#define GIP_TSC_DELTA_SYNC2_READY UINT32_C(0x1000)
2544/** Steady (get set). */
2545#define GIP_TSC_DELTA_SYNC2_STEADY UINT32_C(0x1001)
2546/** Go! */
2547#define GIP_TSC_DELTA_SYNC2_GO UINT32_C(0x1002)
2548/** Used by the verfication test. */
2549#define GIP_TSC_DELTA_SYNC2_GO_GO UINT32_C(0x1003)
2550
2551/** We reached the time limit. */
2552#define GIP_TSC_DELTA_SYNC2_TIMEOUT UINT32_C(0x1ffe)
2553/** The other party won't touch the sync struct ever again. */
2554#define GIP_TSC_DELTA_SYNC2_FINAL UINT32_C(0x1fff)
2555
2556
2557/**
2558 * Argument package/state passed by supdrvMeasureTscDeltaOne() to the RTMpOn
2559 * callback worker.
2560 * @todo add
2561 */
2562typedef struct SUPDRVGIPTSCDELTARGS
2563{
2564 /** The device extension. */
2565 PSUPDRVDEVEXT pDevExt;
2566 /** Pointer to the GIP CPU array entry for the worker. */
2567 PSUPGIPCPU pWorker;
2568 /** Pointer to the GIP CPU array entry for the master. */
2569 PSUPGIPCPU pMaster;
2570 /** The maximum number of ticks to spend in supdrvMeasureTscDeltaCallback.
2571 * (This is what we need a rough TSC frequency for.) */
2572 uint64_t cMaxTscTicks;
2573 /** Used to abort synchronization setup. */
2574 bool volatile fAbortSetup;
2575
2576 /** Padding to make sure the master variables live in its own cache lines. */
2577 uint64_t au64CacheLinePaddingBefore[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t)];
2578
2579 /** @name Master
2580 * @{ */
2581 /** The time the master spent in the MP worker. */
2582 uint64_t cElapsedMasterTscTicks;
2583 /** The iTry value when stopped at. */
2584 uint32_t iTry;
2585 /** Set if the run timed out. */
2586 bool volatile fTimedOut;
2587 /** Pointer to the master's synchronization struct (on stack). */
2588 PSUPTSCDELTASYNC2 volatile pSyncMaster;
2589 /** Master data union. */
2590 union
2591 {
2592 /** Data (master) for delta verification. */
2593 struct
2594 {
2595 /** Verification test TSC values for the master. */
2596 uint64_t volatile auTscs[32];
2597 } Verify;
2598 /** Data (master) for measurement method \#2. */
2599 struct
2600 {
2601 /** Data and sequence number. */
2602 SUPDRVTSCDELTAMETHOD2 Data;
2603 /** The lag setting for the next run. */
2604 bool fLag;
2605 /** Number of hits. */
2606 uint32_t cHits;
2607 } M2;
2608 } uMaster;
2609 /** The verifier verdict, VINF_SUCCESS if ok, VERR_OUT_OF_RANGE if not,
2610 * VERR_TRY_AGAIN on timeout. */
2611 int32_t rcVerify;
2612#ifdef TSCDELTA_VERIFY_WITH_STATS
2613 /** The maximum difference between TSC read during delta verification. */
2614 int64_t cMaxVerifyTscTicks;
2615 /** The minimum difference between two TSC reads during verification. */
2616 int64_t cMinVerifyTscTicks;
2617 /** The bad TSC diff, worker relative to master (= worker - master).
2618 * Negative value means the worker is behind the master. */
2619 int64_t iVerifyBadTscDiff;
2620#endif
2621 /** @} */
2622
2623 /** Padding to make sure the worker variables live is in its own cache line. */
2624 uint64_t au64CacheLinePaddingBetween[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t)];
2625
2626 /** @name Proletarian
2627 * @{ */
2628 /** Pointer to the worker's synchronization struct (on stack). */
2629 PSUPTSCDELTASYNC2 volatile pSyncWorker;
2630 /** The time the worker spent in the MP worker. */
2631 uint64_t cElapsedWorkerTscTicks;
2632 /** Worker data union. */
2633 union
2634 {
2635 /** Data (worker) for delta verification. */
2636 struct
2637 {
2638 /** Verification test TSC values for the worker. */
2639 uint64_t volatile auTscs[32];
2640 } Verify;
2641 /** Data (worker) for measurement method \#2. */
2642 struct
2643 {
2644 /** Data and sequence number. */
2645 SUPDRVTSCDELTAMETHOD2 Data;
2646 /** The lag setting for the next run (set by master). */
2647 bool fLag;
2648 } M2;
2649 } uWorker;
2650 /** @} */
2651
2652 /** Padding to make sure the above is in its own cache line. */
2653 uint64_t au64CacheLinePaddingAfter[GIP_TSC_DELTA_CACHE_LINE_SIZE / sizeof(uint64_t)];
2654} SUPDRVGIPTSCDELTARGS;
2655typedef SUPDRVGIPTSCDELTARGS *PSUPDRVGIPTSCDELTARGS;
2656
2657
2658/** @name Macros that implements the basic synchronization steps common to
2659 * the algorithms.
2660 *
2661 * Must be used from loop as the timeouts are implemented via 'break' statements
2662 * at the moment.
2663 *
2664 * @{
2665 */
2666#if defined(DEBUG_bird) /* || defined(VBOX_STRICT) */
2667# define TSCDELTA_DBG_VARS() uint32_t iDbgCounter
2668# define TSCDELTA_DBG_START_LOOP() do { iDbgCounter = 0; } while (0)
2669# define TSCDELTA_DBG_CHECK_LOOP() \
2670 do { iDbgCounter++; if ((iDbgCounter & UINT32_C(0x01ffffff)) == 0) RT_BREAKPOINT(); } while (0)
2671#else
2672# define TSCDELTA_DBG_VARS() ((void)0)
2673# define TSCDELTA_DBG_START_LOOP() ((void)0)
2674# define TSCDELTA_DBG_CHECK_LOOP() ((void)0)
2675#endif
2676#if 0
2677# define TSCDELTA_DBG_SYNC_MSG(a_Args) SUPR0Printf a_Args
2678#else
2679# define TSCDELTA_DBG_SYNC_MSG(a_Args) ((void)0)
2680#endif
2681#if 0
2682# define TSCDELTA_DBG_SYNC_MSG2(a_Args) SUPR0Printf a_Args
2683#else
2684# define TSCDELTA_DBG_SYNC_MSG2(a_Args) ((void)0)
2685#endif
2686#if 0
2687# define TSCDELTA_DBG_SYNC_MSG9(a_Args) SUPR0Printf a_Args
2688#else
2689# define TSCDELTA_DBG_SYNC_MSG9(a_Args) ((void)0)
2690#endif
2691
2692
2693static bool supdrvTscDeltaSync2_Before(PSUPTSCDELTASYNC2 pMySync, PSUPTSCDELTASYNC2 pOtherSync,
2694 bool fIsMaster, PRTCCUINTREG pfEFlags, PSUPDRVGIPTSCDELTARGS pArgs)
2695{
2696 uint32_t iMySeq = fIsMaster ? 0 : 256;
2697 uint32_t const iMaxSeq = iMySeq + 16; /* For the last loop, darn linux/freebsd C-ishness. */
2698 uint32_t u32Tmp;
2699 uint32_t iSync2Loops = 0;
2700 RTCCUINTREG fEFlags;
2701 TSCDELTA_DBG_VARS();
2702
2703 *pfEFlags = X86_EFL_IF | X86_EFL_1; /* should shut up most nagging compilers. */
2704
2705 /*
2706 * The master tells the worker to get on it's mark.
2707 */
2708 if (fIsMaster)
2709 {
2710 if (RT_LIKELY(ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_STEADY, GIP_TSC_DELTA_SYNC2_READY)))
2711 { /* likely*/ }
2712 else
2713 {
2714 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #1 uSyncVar=%#x\n", fIsMaster ? "master" : "worker", pOtherSync->uSyncVar));
2715 return false;
2716 }
2717 }
2718
2719 /*
2720 * Wait for the on your mark signal (ack in the master case). We process timeouts here.
2721 */
2722 ASMAtomicWriteU32(&(pMySync)->uSyncSeq, 0);
2723 for (;;)
2724 {
2725 fEFlags = ASMIntDisableFlags();
2726 u32Tmp = ASMAtomicReadU32(&pMySync->uSyncVar);
2727 if (u32Tmp == GIP_TSC_DELTA_SYNC2_STEADY)
2728 break;
2729 ASMSetFlags(fEFlags);
2730 ASMNopPause();
2731
2732 /* Abort? */
2733 if (u32Tmp != GIP_TSC_DELTA_SYNC2_READY)
2734 {
2735 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #2 u32Tmp=%#x\n", fIsMaster ? "master" : "worker", u32Tmp));
2736 return false;
2737 }
2738
2739 /* Check for timeouts every so often (not every loop in case RDTSC is
2740 trapping or something). Must check the first time around. */
2741#if 0 /* For debugging the timeout paths. */
2742 static uint32_t volatile xxx;
2743#endif
2744 if ( ( (iSync2Loops & 0x3ff) == 0
2745 && ASMReadTSC() - pMySync->uTscStart > pMySync->cMaxTscTicks)
2746#if 0 /* This is crazy, I know, but enable this code and the results are markedly better when enabled on the 1.4GHz AMD (debug). */
2747 || (!fIsMaster && (++xxx & 0xf) == 0)
2748#endif
2749 )
2750 {
2751 /* Try switch our own state into timeout mode so the master cannot tell us to 'GO',
2752 ignore the timeout if we've got the go ahead already (simpler). */
2753 if (ASMAtomicCmpXchgU32(&pMySync->uSyncVar, GIP_TSC_DELTA_SYNC2_TIMEOUT, GIP_TSC_DELTA_SYNC2_READY))
2754 {
2755 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: timeout\n", fIsMaster ? "master" : "worker"));
2756 ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_TIMEOUT, GIP_TSC_DELTA_SYNC2_STEADY);
2757 ASMAtomicWriteBool(&pArgs->fTimedOut, true);
2758 return false;
2759 }
2760 }
2761 iSync2Loops++;
2762 }
2763
2764 /*
2765 * Interrupts are now disabled and will remain disabled until we do
2766 * TSCDELTA_MASTER_SYNC_AFTER / TSCDELTA_OTHER_SYNC_AFTER.
2767 */
2768 *pfEFlags = fEFlags;
2769
2770 /*
2771 * The worker tells the master that it is on its mark and that the master
2772 * need to get into position as well.
2773 */
2774 if (!fIsMaster)
2775 {
2776 if (RT_LIKELY(ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_STEADY, GIP_TSC_DELTA_SYNC2_READY)))
2777 { /* likely */ }
2778 else
2779 {
2780 ASMSetFlags(fEFlags);
2781 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #3 uSyncVar=%#x\n", fIsMaster ? "master" : "worker", pOtherSync->uSyncVar));
2782 return false;
2783 }
2784 }
2785
2786 /*
2787 * The master sends the 'go' to the worker and wait for ACK.
2788 */
2789 if (fIsMaster)
2790 {
2791 if (RT_LIKELY(ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO, GIP_TSC_DELTA_SYNC2_STEADY)))
2792 { /* likely */ }
2793 else
2794 {
2795 ASMSetFlags(fEFlags);
2796 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #4 uSyncVar=%#x\n", fIsMaster ? "master" : "worker", pOtherSync->uSyncVar));
2797 return false;
2798 }
2799 }
2800
2801 /*
2802 * Wait for the 'go' signal (ack in the master case).
2803 */
2804 TSCDELTA_DBG_START_LOOP();
2805 for (;;)
2806 {
2807 u32Tmp = ASMAtomicReadU32(&pMySync->uSyncVar);
2808 if (u32Tmp == GIP_TSC_DELTA_SYNC2_GO)
2809 break;
2810 if (RT_LIKELY(u32Tmp == GIP_TSC_DELTA_SYNC2_STEADY))
2811 { /* likely */ }
2812 else
2813 {
2814 ASMSetFlags(fEFlags);
2815 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #5 u32Tmp=%#x\n", fIsMaster ? "master" : "worker", u32Tmp));
2816 return false;
2817 }
2818
2819 TSCDELTA_DBG_CHECK_LOOP();
2820 ASMNopPause();
2821 }
2822
2823 /*
2824 * The worker acks the 'go' (shouldn't fail).
2825 */
2826 if (!fIsMaster)
2827 {
2828 if (RT_LIKELY(ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO, GIP_TSC_DELTA_SYNC2_STEADY)))
2829 { /* likely */ }
2830 else
2831 {
2832 ASMSetFlags(fEFlags);
2833 TSCDELTA_DBG_SYNC_MSG(("sync/before/%s: #6 uSyncVar=%#x\n", fIsMaster ? "master" : "worker", pOtherSync->uSyncVar));
2834 return false;
2835 }
2836 }
2837
2838 /*
2839 * Try enter mostly lockstep execution with it.
2840 */
2841 for (;;)
2842 {
2843 uint32_t iOtherSeq1, iOtherSeq2;
2844 ASMCompilerBarrier();
2845 ASMSerializeInstruction();
2846
2847 ASMAtomicWriteU32(&pMySync->uSyncSeq, iMySeq);
2848 ASMNopPause();
2849 iOtherSeq1 = ASMAtomicXchgU32(&pOtherSync->uSyncSeq, iMySeq);
2850 ASMNopPause();
2851 iOtherSeq2 = ASMAtomicReadU32(&pMySync->uSyncSeq);
2852
2853 ASMCompilerBarrier();
2854 if (iOtherSeq1 == iOtherSeq2)
2855 return true;
2856
2857 /* Did the other guy give up? Should we give up? */
2858 if ( iOtherSeq1 == UINT32_MAX
2859 || iOtherSeq2 == UINT32_MAX)
2860 return true;
2861 if (++iMySeq >= iMaxSeq)
2862 {
2863 ASMAtomicWriteU32(&pMySync->uSyncSeq, UINT32_MAX);
2864 return true;
2865 }
2866 ASMNopPause();
2867 }
2868}
2869
2870#define TSCDELTA_MASTER_SYNC_BEFORE(a_pMySync, a_pOtherSync, a_pfEFlags, a_pArgs) \
2871 if (RT_LIKELY(supdrvTscDeltaSync2_Before(a_pMySync, a_pOtherSync, true /*fIsMaster*/, a_pfEFlags, a_pArgs))) \
2872 { /*likely*/ } \
2873 else if (true) \
2874 { \
2875 TSCDELTA_DBG_SYNC_MSG9(("sync/before/master: #89\n")); \
2876 break; \
2877 } else do {} while (0)
2878#define TSCDELTA_OTHER_SYNC_BEFORE(a_pMySync, a_pOtherSync, a_pfEFlags, a_pArgs) \
2879 if (RT_LIKELY(supdrvTscDeltaSync2_Before(a_pMySync, a_pOtherSync, false /*fIsMaster*/, a_pfEFlags, a_pArgs))) \
2880 { /*likely*/ } \
2881 else if (true) \
2882 { \
2883 TSCDELTA_DBG_SYNC_MSG9(("sync/before/other: #89\n")); \
2884 break; \
2885 } else do {} while (0)
2886
2887
2888static bool supdrvTscDeltaSync2_After(PSUPTSCDELTASYNC2 pMySync, PSUPTSCDELTASYNC2 pOtherSync,
2889 bool fIsMaster, RTCCUINTREG fEFlags)
2890{
2891 TSCDELTA_DBG_VARS();
2892
2893 /*
2894 * Wait for the 'ready' signal. In the master's case, this means the
2895 * worker has completed its data collection, while in the worker's case it
2896 * means the master is done processing the data and it's time for the next
2897 * loop iteration (or whatever).
2898 */
2899 ASMSetFlags(fEFlags);
2900 TSCDELTA_DBG_START_LOOP();
2901 for (;;)
2902 {
2903 uint32_t u32Tmp = ASMAtomicReadU32(&pMySync->uSyncVar);
2904 if ( u32Tmp == GIP_TSC_DELTA_SYNC2_READY
2905 || (u32Tmp == GIP_TSC_DELTA_SYNC2_STEADY && !fIsMaster) /* kicked twice => race */ )
2906 return true;
2907 ASMNopPause();
2908 if (RT_LIKELY(u32Tmp == GIP_TSC_DELTA_SYNC2_GO))
2909 { /* likely */}
2910 else
2911 {
2912 TSCDELTA_DBG_SYNC_MSG(("sync/after/other: #1 u32Tmp=%#x\n", u32Tmp));
2913 return false; /* shouldn't ever happen! */
2914 }
2915 TSCDELTA_DBG_CHECK_LOOP();
2916 ASMNopPause();
2917 }
2918}
2919
2920#define TSCDELTA_MASTER_SYNC_AFTER(a_pMySync, a_pOtherSync, a_fEFlags) \
2921 if (RT_LIKELY(supdrvTscDeltaSync2_After(a_pMySync, a_pOtherSync, true /*fIsMaster*/, a_fEFlags))) \
2922 { /* likely */ } \
2923 else if (true) \
2924 { \
2925 TSCDELTA_DBG_SYNC_MSG9(("sync/after/master: #97\n")); \
2926 break; \
2927 } else do {} while (0)
2928
2929#define TSCDELTA_MASTER_KICK_OTHER_OUT_OF_AFTER(a_pMySync, a_pOtherSync) \
2930 /* \
2931 * Tell the worker that we're done processing the data and ready for the next round. \
2932 */ \
2933 if (RT_LIKELY(ASMAtomicCmpXchgU32(&(a_pOtherSync)->uSyncVar, GIP_TSC_DELTA_SYNC2_READY, GIP_TSC_DELTA_SYNC2_GO))) \
2934 { /* likely */ } \
2935 else if (true)\
2936 { \
2937 TSCDELTA_DBG_SYNC_MSG(("sync/after/master: #99 uSyncVar=%#x\n", (a_pOtherSync)->uSyncVar)); \
2938 break; \
2939 } else do {} while (0)
2940
2941#define TSCDELTA_OTHER_SYNC_AFTER(a_pMySync, a_pOtherSync, a_fEFlags) \
2942 if (true) { \
2943 /* \
2944 * Tell the master that we're done collecting data and wait for the next round to start. \
2945 */ \
2946 if (RT_LIKELY(ASMAtomicCmpXchgU32(&(a_pOtherSync)->uSyncVar, GIP_TSC_DELTA_SYNC2_READY, GIP_TSC_DELTA_SYNC2_GO))) \
2947 { /* likely */ } \
2948 else \
2949 { \
2950 ASMSetFlags(a_fEFlags); \
2951 TSCDELTA_DBG_SYNC_MSG(("sync/after/other: #0 uSyncVar=%#x\n", (a_pOtherSync)->uSyncVar)); \
2952 break; \
2953 } \
2954 if (RT_LIKELY(supdrvTscDeltaSync2_After(a_pMySync, a_pOtherSync, false /*fIsMaster*/, a_fEFlags))) \
2955 { /* likely */ } \
2956 else \
2957 { \
2958 TSCDELTA_DBG_SYNC_MSG9(("sync/after/other: #98\n")); \
2959 break; \
2960 } \
2961 } else do {} while (0)
2962/** @} */
2963
2964
2965#ifdef GIP_TSC_DELTA_METHOD_1
2966/**
2967 * TSC delta measurment algorithm \#1 (GIP_TSC_DELTA_METHOD_1).
2968 *
2969 *
2970 * We ignore the first few runs of the loop in order to prime the
2971 * cache. Also, we need to be careful about using 'pause' instruction
2972 * in critical busy-wait loops in this code - it can cause undesired
2973 * behaviour with hyperthreading.
2974 *
2975 * We try to minimize the measurement error by computing the minimum
2976 * read time of the compare statement in the worker by taking TSC
2977 * measurements across it.
2978 *
2979 * It must be noted that the computed minimum read time is mostly to
2980 * eliminate huge deltas when the worker is too early and doesn't by
2981 * itself help produce more accurate deltas. We allow two times the
2982 * computed minimum as an arbibtrary acceptable threshold. Therefore,
2983 * it is still possible to get negative deltas where there are none
2984 * when the worker is earlier. As long as these occasional negative
2985 * deltas are lower than the time it takes to exit guest-context and
2986 * the OS to reschedule EMT on a different CPU we won't expose a TSC
2987 * that jumped backwards. It is because of the existence of the
2988 * negative deltas we don't recompute the delta with the master and
2989 * worker interchanged to eliminate the remaining measurement error.
2990 *
2991 *
2992 * @param pArgs The argument/state data.
2993 * @param pMySync My synchronization structure.
2994 * @param pOtherSync My partner's synchronization structure.
2995 * @param fIsMaster Set if master, clear if worker.
2996 * @param iTry The attempt number.
2997 */
2998static void supdrvTscDeltaMethod1Loop(PSUPDRVGIPTSCDELTARGS pArgs, PSUPTSCDELTASYNC2 pMySync, PSUPTSCDELTASYNC2 pOtherSync,
2999 bool fIsMaster, uint32_t iTry)
3000{
3001 PSUPGIPCPU pGipCpuWorker = pArgs->pWorker;
3002 PSUPGIPCPU pGipCpuMaster = pArgs->pMaster;
3003 uint64_t uMinCmpReadTime = UINT64_MAX;
3004 unsigned iLoop;
3005 NOREF(iTry);
3006
3007 for (iLoop = 0; iLoop < GIP_TSC_DELTA_LOOPS; iLoop++)
3008 {
3009 RTCCUINTREG fEFlags;
3010 if (fIsMaster)
3011 {
3012 /*
3013 * The master.
3014 */
3015 AssertMsg(pGipCpuMaster->u64TSCSample == GIP_TSC_DELTA_RSVD,
3016 ("%#llx idMaster=%#x idWorker=%#x (idGipMaster=%#x)\n",
3017 pGipCpuMaster->u64TSCSample, pGipCpuMaster->idCpu, pGipCpuWorker->idCpu, pArgs->pDevExt->idGipMaster));
3018 TSCDELTA_MASTER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3019
3020 do
3021 {
3022 ASMSerializeInstruction();
3023 ASMAtomicWriteU64(&pGipCpuMaster->u64TSCSample, ASMReadTSC());
3024 } while (pGipCpuMaster->u64TSCSample == GIP_TSC_DELTA_RSVD);
3025
3026 TSCDELTA_MASTER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3027
3028 /* Process the data. */
3029 if (iLoop > GIP_TSC_DELTA_PRIMER_LOOPS + GIP_TSC_DELTA_READ_TIME_LOOPS)
3030 {
3031 if (pGipCpuWorker->u64TSCSample != GIP_TSC_DELTA_RSVD)
3032 {
3033 int64_t iDelta = pGipCpuWorker->u64TSCSample
3034 - (pGipCpuMaster->u64TSCSample - pGipCpuMaster->i64TSCDelta);
3035 if ( iDelta >= GIP_TSC_DELTA_INITIAL_MASTER_VALUE
3036 ? iDelta < pGipCpuWorker->i64TSCDelta
3037 : iDelta > pGipCpuWorker->i64TSCDelta || pGipCpuWorker->i64TSCDelta == INT64_MAX)
3038 pGipCpuWorker->i64TSCDelta = iDelta;
3039 }
3040 }
3041
3042 /* Reset our TSC sample and tell the worker to move on. */
3043 ASMAtomicWriteU64(&pGipCpuMaster->u64TSCSample, GIP_TSC_DELTA_RSVD);
3044 TSCDELTA_MASTER_KICK_OTHER_OUT_OF_AFTER(pMySync, pOtherSync);
3045 }
3046 else
3047 {
3048 /*
3049 * The worker.
3050 */
3051 uint64_t uTscWorker;
3052 uint64_t uTscWorkerFlushed;
3053 uint64_t uCmpReadTime;
3054
3055 ASMAtomicReadU64(&pGipCpuMaster->u64TSCSample); /* Warm the cache line. */
3056 TSCDELTA_OTHER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3057
3058 /*
3059 * Keep reading the TSC until we notice that the master has read his. Reading
3060 * the TSC -after- the master has updated the memory is way too late. We thus
3061 * compensate by trying to measure how long it took for the worker to notice
3062 * the memory flushed from the master.
3063 */
3064 do
3065 {
3066 ASMSerializeInstruction();
3067 uTscWorker = ASMReadTSC();
3068 } while (pGipCpuMaster->u64TSCSample == GIP_TSC_DELTA_RSVD);
3069 ASMSerializeInstruction();
3070 uTscWorkerFlushed = ASMReadTSC();
3071
3072 uCmpReadTime = uTscWorkerFlushed - uTscWorker;
3073 if (iLoop > GIP_TSC_DELTA_PRIMER_LOOPS + GIP_TSC_DELTA_READ_TIME_LOOPS)
3074 {
3075 /* This is totally arbitrary a.k.a I don't like it but I have no better ideas for now. */
3076 if (uCmpReadTime < (uMinCmpReadTime << 1))
3077 {
3078 ASMAtomicWriteU64(&pGipCpuWorker->u64TSCSample, uTscWorker);
3079 if (uCmpReadTime < uMinCmpReadTime)
3080 uMinCmpReadTime = uCmpReadTime;
3081 }
3082 else
3083 ASMAtomicWriteU64(&pGipCpuWorker->u64TSCSample, GIP_TSC_DELTA_RSVD);
3084 }
3085 else if (iLoop > GIP_TSC_DELTA_PRIMER_LOOPS)
3086 {
3087 if (uCmpReadTime < uMinCmpReadTime)
3088 uMinCmpReadTime = uCmpReadTime;
3089 }
3090
3091 TSCDELTA_OTHER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3092 }
3093 }
3094
3095 TSCDELTA_DBG_SYNC_MSG9(("sync/method1loop/%s: #92 iLoop=%u MyState=%#x\n", fIsMaster ? "master" : "worker", iLoop,
3096 pMySync->uSyncVar));
3097
3098 /*
3099 * We must reset the worker TSC sample value in case it gets picked as a
3100 * GIP master later on (it's trashed above, naturally).
3101 */
3102 if (!fIsMaster)
3103 ASMAtomicWriteU64(&pGipCpuWorker->u64TSCSample, GIP_TSC_DELTA_RSVD);
3104}
3105#endif /* GIP_TSC_DELTA_METHOD_1 */
3106
3107
3108#ifdef GIP_TSC_DELTA_METHOD_2
3109/*
3110 * TSC delta measurement algorithm \#2 configuration and code - Experimental!!
3111 */
3112
3113# define GIP_TSC_DELTA_M2_LOOPS (7 + GIP_TSC_DELTA_M2_PRIMER_LOOPS)
3114# define GIP_TSC_DELTA_M2_PRIMER_LOOPS 0
3115
3116
3117static void supdrvTscDeltaMethod2ProcessDataOnMaster(PSUPDRVGIPTSCDELTARGS pArgs, uint32_t iLoop)
3118{
3119 int64_t iMasterTscDelta = pArgs->pMaster->i64TSCDelta;
3120 int64_t iBestDelta = pArgs->pWorker->i64TSCDelta;
3121 uint32_t idxResult;
3122 uint32_t cHits = 0;
3123
3124 /*
3125 * Look for matching entries in the master and worker tables.
3126 */
3127 for (idxResult = 0; idxResult < RT_ELEMENTS(pArgs->uMaster.M2.Data.aResults); idxResult++)
3128 {
3129 uint32_t idxOther = pArgs->uMaster.M2.Data.aResults[idxResult].iSeqOther;
3130 if (idxOther & 1)
3131 {
3132 idxOther >>= 1;
3133 if (idxOther < RT_ELEMENTS(pArgs->uWorker.M2.Data.aResults))
3134 {
3135 if (pArgs->uWorker.M2.Data.aResults[idxOther].iSeqOther == pArgs->uMaster.M2.Data.aResults[idxResult].iSeqMine)
3136 {
3137 int64_t iDelta;
3138 iDelta = pArgs->uWorker.M2.Data.aResults[idxOther].uTsc
3139 - (pArgs->uMaster.M2.Data.aResults[idxResult].uTsc - iMasterTscDelta);
3140 if ( iDelta >= GIP_TSC_DELTA_INITIAL_MASTER_VALUE
3141 ? iDelta < iBestDelta
3142 : iDelta > iBestDelta || iBestDelta == INT64_MAX)
3143 iBestDelta = iDelta;
3144 cHits++;
3145 }
3146 }
3147 }
3148 }
3149
3150 /*
3151 * Save the results.
3152 */
3153 if (cHits > 2)
3154 pArgs->pWorker->i64TSCDelta = iBestDelta;
3155 pArgs->uMaster.M2.cHits += cHits;
3156}
3157
3158
3159/**
3160 * The core function of the 2nd TSC delta mesurment algorithm.
3161 *
3162 * The idea here is that we have the two CPUs execute the exact same code
3163 * collecting a largish set of TSC samples. The code has one data dependency on
3164 * the other CPU which intention it is to synchronize the execution as well as
3165 * help cross references the two sets of TSC samples (the sequence numbers).
3166 *
3167 * The @a fLag parameter is used to modify the execution a tiny bit on one or
3168 * both of the CPUs. When @a fLag differs between the CPUs, it is thought that
3169 * it will help with making the CPUs enter lock step execution occationally.
3170 *
3171 */
3172static void supdrvTscDeltaMethod2CollectData(PSUPDRVTSCDELTAMETHOD2 pMyData, uint32_t volatile *piOtherSeqNo, bool fLag)
3173{
3174 SUPDRVTSCDELTAMETHOD2ENTRY *pEntry = &pMyData->aResults[0];
3175 uint32_t cLeft = RT_ELEMENTS(pMyData->aResults);
3176
3177 ASMAtomicWriteU32(&pMyData->iCurSeqNo, 0);
3178 ASMSerializeInstruction();
3179 while (cLeft-- > 0)
3180 {
3181 uint64_t uTsc;
3182 uint32_t iSeqMine = ASMAtomicIncU32(&pMyData->iCurSeqNo);
3183 uint32_t iSeqOther = ASMAtomicReadU32(piOtherSeqNo);
3184 ASMCompilerBarrier();
3185 ASMSerializeInstruction(); /* Way better result than with ASMMemoryFenceSSE2() in this position! */
3186 uTsc = ASMReadTSC();
3187 ASMAtomicIncU32(&pMyData->iCurSeqNo);
3188 ASMCompilerBarrier();
3189 ASMSerializeInstruction();
3190 pEntry->iSeqMine = iSeqMine;
3191 pEntry->iSeqOther = iSeqOther;
3192 pEntry->uTsc = uTsc;
3193 pEntry++;
3194 ASMSerializeInstruction();
3195 if (fLag)
3196 ASMNopPause();
3197 }
3198}
3199
3200
3201/**
3202 * TSC delta measurment algorithm \#2 (GIP_TSC_DELTA_METHOD_2).
3203 *
3204 * See supdrvTscDeltaMethod2CollectData for algorithm details.
3205 *
3206 * @param pArgs The argument/state data.
3207 * @param pMySync My synchronization structure.
3208 * @param pOtherSync My partner's synchronization structure.
3209 * @param fIsMaster Set if master, clear if worker.
3210 * @param iTry The attempt number.
3211 */
3212static void supdrvTscDeltaMethod2Loop(PSUPDRVGIPTSCDELTARGS pArgs, PSUPTSCDELTASYNC2 pMySync, PSUPTSCDELTASYNC2 pOtherSync,
3213 bool fIsMaster, uint32_t iTry)
3214{
3215 unsigned iLoop;
3216
3217 for (iLoop = 0; iLoop < GIP_TSC_DELTA_M2_LOOPS; iLoop++)
3218 {
3219 RTCCUINTREG fEFlags;
3220 if (fIsMaster)
3221 {
3222 /*
3223 * Adjust the loop lag fudge.
3224 */
3225# if GIP_TSC_DELTA_M2_PRIMER_LOOPS > 0
3226 if (iLoop < GIP_TSC_DELTA_M2_PRIMER_LOOPS)
3227 {
3228 /* Lag during the priming to be nice to everyone.. */
3229 pArgs->uMaster.M2.fLag = true;
3230 pArgs->uWorker.M2.fLag = true;
3231 }
3232 else
3233# endif
3234 if (iLoop < (GIP_TSC_DELTA_M2_LOOPS - GIP_TSC_DELTA_M2_PRIMER_LOOPS) / 4)
3235 {
3236 /* 25 % of the body without lagging. */
3237 pArgs->uMaster.M2.fLag = false;
3238 pArgs->uWorker.M2.fLag = false;
3239 }
3240 else if (iLoop < (GIP_TSC_DELTA_M2_LOOPS - GIP_TSC_DELTA_M2_PRIMER_LOOPS) / 4 * 2)
3241 {
3242 /* 25 % of the body with both lagging. */
3243 pArgs->uMaster.M2.fLag = true;
3244 pArgs->uWorker.M2.fLag = true;
3245 }
3246 else
3247 {
3248 /* 50% of the body with alternating lag. */
3249 pArgs->uMaster.M2.fLag = (iLoop & 1) == 0;
3250 pArgs->uWorker.M2.fLag= (iLoop & 1) == 1;
3251 }
3252
3253 /*
3254 * Sync up with the worker and collect data.
3255 */
3256 TSCDELTA_MASTER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3257 supdrvTscDeltaMethod2CollectData(&pArgs->uMaster.M2.Data, &pArgs->uWorker.M2.Data.iCurSeqNo, pArgs->uMaster.M2.fLag);
3258 TSCDELTA_MASTER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3259
3260 /*
3261 * Process the data.
3262 */
3263# if GIP_TSC_DELTA_M2_PRIMER_LOOPS > 0
3264 if (iLoop >= GIP_TSC_DELTA_M2_PRIMER_LOOPS)
3265# endif
3266 supdrvTscDeltaMethod2ProcessDataOnMaster(pArgs, iLoop);
3267
3268 TSCDELTA_MASTER_KICK_OTHER_OUT_OF_AFTER(pMySync, pOtherSync);
3269 }
3270 else
3271 {
3272 /*
3273 * The worker.
3274 */
3275 TSCDELTA_OTHER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3276 supdrvTscDeltaMethod2CollectData(&pArgs->uWorker.M2.Data, &pArgs->uMaster.M2.Data.iCurSeqNo, pArgs->uWorker.M2.fLag);
3277 TSCDELTA_OTHER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3278 }
3279 }
3280}
3281
3282#endif /* GIP_TSC_DELTA_METHOD_2 */
3283
3284
3285
3286static int supdrvTscDeltaVerify(PSUPDRVGIPTSCDELTARGS pArgs, PSUPTSCDELTASYNC2 pMySync,
3287 PSUPTSCDELTASYNC2 pOtherSync, bool fIsMaster, int64_t iWorkerTscDelta)
3288{
3289 /*PSUPGIPCPU pGipCpuWorker = pArgs->pWorker; - unused */
3290 PSUPGIPCPU pGipCpuMaster = pArgs->pMaster;
3291 uint32_t i;
3292 TSCDELTA_DBG_VARS();
3293
3294 for (;;)
3295 {
3296 RTCCUINTREG fEFlags;
3297 AssertCompile((RT_ELEMENTS(pArgs->uMaster.Verify.auTscs) & 1) == 0);
3298 AssertCompile(RT_ELEMENTS(pArgs->uMaster.Verify.auTscs) == RT_ELEMENTS(pArgs->uWorker.Verify.auTscs));
3299
3300 if (fIsMaster)
3301 {
3302 uint64_t uTscWorker;
3303 TSCDELTA_MASTER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3304
3305 /*
3306 * Collect TSC, master goes first.
3307 */
3308 for (i = 0; i < RT_ELEMENTS(pArgs->uMaster.Verify.auTscs); i += 2)
3309 {
3310 /* Read, kick & wait #1. */
3311 uint64_t register uTsc = ASMReadTSC();
3312 ASMAtomicWriteU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO_GO);
3313 ASMSerializeInstruction();
3314 pArgs->uMaster.Verify.auTscs[i] = uTsc;
3315 TSCDELTA_DBG_START_LOOP();
3316 while (ASMAtomicReadU32(&pMySync->uSyncVar) == GIP_TSC_DELTA_SYNC2_GO)
3317 {
3318 TSCDELTA_DBG_CHECK_LOOP();
3319 ASMNopPause();
3320 }
3321
3322 /* Read, kick & wait #2. */
3323 uTsc = ASMReadTSC();
3324 ASMAtomicWriteU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO);
3325 ASMSerializeInstruction();
3326 pArgs->uMaster.Verify.auTscs[i + 1] = uTsc;
3327 TSCDELTA_DBG_START_LOOP();
3328 while (ASMAtomicReadU32(&pMySync->uSyncVar) == GIP_TSC_DELTA_SYNC2_GO_GO)
3329 {
3330 TSCDELTA_DBG_CHECK_LOOP();
3331 ASMNopPause();
3332 }
3333 }
3334
3335 TSCDELTA_MASTER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3336
3337 /*
3338 * Process the data.
3339 */
3340#ifdef TSCDELTA_VERIFY_WITH_STATS
3341 pArgs->cMaxVerifyTscTicks = INT64_MIN;
3342 pArgs->cMinVerifyTscTicks = INT64_MAX;
3343 pArgs->iVerifyBadTscDiff = 0;
3344#endif
3345 ASMAtomicWriteS32(&pArgs->rcVerify, VINF_SUCCESS);
3346 uTscWorker = 0;
3347 for (i = 0; i < RT_ELEMENTS(pArgs->uMaster.Verify.auTscs); i++)
3348 {
3349 /* Master vs previous worker entry. */
3350 uint64_t uTscMaster = pArgs->uMaster.Verify.auTscs[i] - pGipCpuMaster->i64TSCDelta;
3351 int64_t iDiff;
3352 if (i > 0)
3353 {
3354 iDiff = uTscMaster - uTscWorker;
3355#ifdef TSCDELTA_VERIFY_WITH_STATS
3356 if (iDiff > pArgs->cMaxVerifyTscTicks)
3357 pArgs->cMaxVerifyTscTicks = iDiff;
3358 if (iDiff < pArgs->cMinVerifyTscTicks)
3359 pArgs->cMinVerifyTscTicks = iDiff;
3360#endif
3361 if (iDiff < 0)
3362 {
3363#ifdef TSCDELTA_VERIFY_WITH_STATS
3364 pArgs->iVerifyBadTscDiff = -iDiff;
3365#endif
3366 ASMAtomicWriteS32(&pArgs->rcVerify, VERR_OUT_OF_RANGE);
3367 break;
3368 }
3369 }
3370
3371 /* Worker vs master. */
3372 uTscWorker = pArgs->uWorker.Verify.auTscs[i] - iWorkerTscDelta;
3373 iDiff = uTscWorker - uTscMaster;
3374#ifdef TSCDELTA_VERIFY_WITH_STATS
3375 if (iDiff > pArgs->cMaxVerifyTscTicks)
3376 pArgs->cMaxVerifyTscTicks = iDiff;
3377 if (iDiff < pArgs->cMinVerifyTscTicks)
3378 pArgs->cMinVerifyTscTicks = iDiff;
3379#endif
3380 if (iDiff < 0)
3381 {
3382#ifdef TSCDELTA_VERIFY_WITH_STATS
3383 pArgs->iVerifyBadTscDiff = iDiff;
3384#endif
3385 ASMAtomicWriteS32(&pArgs->rcVerify, VERR_OUT_OF_RANGE);
3386 break;
3387 }
3388 }
3389
3390 /* Done. */
3391 TSCDELTA_MASTER_KICK_OTHER_OUT_OF_AFTER(pMySync, pOtherSync);
3392 }
3393 else
3394 {
3395 /*
3396 * The worker, master leads.
3397 */
3398 TSCDELTA_OTHER_SYNC_BEFORE(pMySync, pOtherSync, &fEFlags, pArgs);
3399
3400 for (i = 0; i < RT_ELEMENTS(pArgs->uWorker.Verify.auTscs); i += 2)
3401 {
3402 uint64_t register uTsc;
3403
3404 /* Wait, Read and Kick #1. */
3405 TSCDELTA_DBG_START_LOOP();
3406 while (ASMAtomicReadU32(&pMySync->uSyncVar) == GIP_TSC_DELTA_SYNC2_GO)
3407 {
3408 TSCDELTA_DBG_CHECK_LOOP();
3409 ASMNopPause();
3410 }
3411 uTsc = ASMReadTSC();
3412 ASMAtomicWriteU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO_GO);
3413 ASMSerializeInstruction();
3414 pArgs->uWorker.Verify.auTscs[i] = uTsc;
3415
3416 /* Wait, Read and Kick #2. */
3417 TSCDELTA_DBG_START_LOOP();
3418 while (ASMAtomicReadU32(&pMySync->uSyncVar) == GIP_TSC_DELTA_SYNC2_GO_GO)
3419 {
3420 TSCDELTA_DBG_CHECK_LOOP();
3421 ASMNopPause();
3422 }
3423 uTsc = ASMReadTSC();
3424 ASMAtomicWriteU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_GO);
3425 ASMSerializeInstruction();
3426 pArgs->uWorker.Verify.auTscs[i + 1] = uTsc;
3427 }
3428
3429 TSCDELTA_OTHER_SYNC_AFTER(pMySync, pOtherSync, fEFlags);
3430 }
3431 return pArgs->rcVerify;
3432 }
3433
3434 /*
3435 * Timed out, please retry.
3436 */
3437 ASMAtomicWriteS32(&pArgs->rcVerify, VERR_TRY_AGAIN);
3438 return VERR_TIMEOUT;
3439}
3440
3441
3442
3443/**
3444 * Handles the special abort procedure during synchronization setup in
3445 * supdrvMeasureTscDeltaCallbackUnwrapped().
3446 *
3447 * @returns 0 (dummy, ignored)
3448 * @param pArgs Pointer to argument/state data.
3449 * @param pMySync Pointer to my sync structure.
3450 * @param fIsMaster Set if we're the master, clear if worker.
3451 * @param fTimeout Set if it's a timeout.
3452 */
3453DECL_NO_INLINE(static, int)
3454supdrvMeasureTscDeltaCallbackAbortSyncSetup(PSUPDRVGIPTSCDELTARGS pArgs, PSUPTSCDELTASYNC2 pMySync, bool fIsMaster, bool fTimeout)
3455{
3456 PSUPTSCDELTASYNC2 volatile *ppMySync = fIsMaster ? &pArgs->pSyncMaster : &pArgs->pSyncWorker;
3457 PSUPTSCDELTASYNC2 volatile *ppOtherSync = fIsMaster ? &pArgs->pSyncWorker : &pArgs->pSyncMaster;
3458 TSCDELTA_DBG_VARS();
3459
3460 /*
3461 * Clear our sync pointer and make sure the abort flag is set.
3462 */
3463 ASMAtomicWriteNullPtr(ppMySync);
3464 ASMAtomicWriteBool(&pArgs->fAbortSetup, true);
3465 if (fTimeout)
3466 ASMAtomicWriteBool(&pArgs->fTimedOut, true);
3467
3468 /*
3469 * Make sure the other party is out of there and won't be touching our
3470 * sync state again (would cause stack corruption).
3471 */
3472 TSCDELTA_DBG_START_LOOP();
3473 while (ASMAtomicReadPtrT(ppOtherSync, PSUPTSCDELTASYNC2) != NULL)
3474 {
3475 ASMNopPause();
3476 ASMNopPause();
3477 ASMNopPause();
3478 TSCDELTA_DBG_CHECK_LOOP();
3479 }
3480
3481 return 0;
3482}
3483
3484
3485/**
3486 * This is used by supdrvMeasureInitialTscDeltas() to read the TSC on two CPUs
3487 * and compute the delta between them.
3488 *
3489 * To reduce code size a good when timeout handling was added, a dummy return
3490 * value had to be added (saves 1-3 lines per timeout case), thus this
3491 * 'Unwrapped' function and the dummy 0 return value.
3492 *
3493 * @returns 0 (dummy, ignored)
3494 * @param idCpu The CPU we are current scheduled on.
3495 * @param pArgs Pointer to a parameter package.
3496 *
3497 * @remarks Measuring TSC deltas between the CPUs is tricky because we need to
3498 * read the TSC at exactly the same time on both the master and the
3499 * worker CPUs. Due to DMA, bus arbitration, cache locality,
3500 * contention, SMI, pipelining etc. there is no guaranteed way of
3501 * doing this on x86 CPUs.
3502 */
3503static int supdrvMeasureTscDeltaCallbackUnwrapped(RTCPUID idCpu, PSUPDRVGIPTSCDELTARGS pArgs)
3504{
3505 PSUPDRVDEVEXT pDevExt = pArgs->pDevExt;
3506 PSUPGIPCPU pGipCpuWorker = pArgs->pWorker;
3507 PSUPGIPCPU pGipCpuMaster = pArgs->pMaster;
3508 bool const fIsMaster = idCpu == pGipCpuMaster->idCpu;
3509 uint32_t iTry;
3510 PSUPTSCDELTASYNC2 volatile *ppMySync = fIsMaster ? &pArgs->pSyncMaster : &pArgs->pSyncWorker;
3511 PSUPTSCDELTASYNC2 volatile *ppOtherSync = fIsMaster ? &pArgs->pSyncWorker : &pArgs->pSyncMaster;
3512 SUPTSCDELTASYNC2 MySync;
3513 PSUPTSCDELTASYNC2 pOtherSync;
3514 int rc;
3515 TSCDELTA_DBG_VARS();
3516
3517 /* A bit of paranoia first. */
3518 if (!pGipCpuMaster || !pGipCpuWorker)
3519 return 0;
3520
3521 /*
3522 * If the CPU isn't part of the measurement, return immediately.
3523 */
3524 if ( !fIsMaster
3525 && idCpu != pGipCpuWorker->idCpu)
3526 return 0;
3527
3528 /*
3529 * Set up my synchronization stuff and wait for the other party to show up.
3530 *
3531 * We don't wait forever since the other party may be off fishing (offline,
3532 * spinning with ints disables, whatever), we must play nice to the rest of
3533 * the system as this context generally isn't one in which we will get
3534 * preempted and we may hold up a number of lower priority interrupts.
3535 */
3536 ASMAtomicWriteU32(&MySync.uSyncVar, GIP_TSC_DELTA_SYNC2_PRESTART_WAIT);
3537 ASMAtomicWritePtr(ppMySync, &MySync);
3538 MySync.uTscStart = ASMReadTSC();
3539 MySync.cMaxTscTicks = pArgs->cMaxTscTicks;
3540
3541 /* Look for the partner, might not be here yet... Special abort considerations. */
3542 iTry = 0;
3543 TSCDELTA_DBG_START_LOOP();
3544 while ((pOtherSync = ASMAtomicReadPtrT(ppOtherSync, PSUPTSCDELTASYNC2)) == NULL)
3545 {
3546 ASMNopPause();
3547 if ( ASMAtomicReadBool(&pArgs->fAbortSetup)
3548 || !RTMpIsCpuOnline(fIsMaster ? pGipCpuWorker->idCpu : pGipCpuWorker->idCpu) )
3549 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, false /*fTimeout*/);
3550 if ( (iTry++ & 0xff) == 0
3551 && ASMReadTSC() - MySync.uTscStart > pArgs->cMaxTscTicks)
3552 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, true /*fTimeout*/);
3553 TSCDELTA_DBG_CHECK_LOOP();
3554 ASMNopPause();
3555 }
3556
3557 /* I found my partner, waiting to be found... Special abort considerations. */
3558 if (fIsMaster)
3559 if (!ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_READY, GIP_TSC_DELTA_SYNC2_PRESTART_WAIT)) /* parnaoia */
3560 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, false /*fTimeout*/);
3561
3562 iTry = 0;
3563 TSCDELTA_DBG_START_LOOP();
3564 while (ASMAtomicReadU32(&MySync.uSyncVar) == GIP_TSC_DELTA_SYNC2_PRESTART_WAIT)
3565 {
3566 ASMNopPause();
3567 if (ASMAtomicReadBool(&pArgs->fAbortSetup))
3568 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, false /*fTimeout*/);
3569 if ( (iTry++ & 0xff) == 0
3570 && ASMReadTSC() - MySync.uTscStart > pArgs->cMaxTscTicks)
3571 {
3572 if ( fIsMaster
3573 && !ASMAtomicCmpXchgU32(&MySync.uSyncVar, GIP_TSC_DELTA_SYNC2_PRESTART_ABORT, GIP_TSC_DELTA_SYNC2_PRESTART_WAIT))
3574 break; /* race #1: slave has moved on, handle timeout in loop instead. */
3575 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, true /*fTimeout*/);
3576 }
3577 TSCDELTA_DBG_CHECK_LOOP();
3578 }
3579
3580 if (!fIsMaster)
3581 if (!ASMAtomicCmpXchgU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_READY, GIP_TSC_DELTA_SYNC2_PRESTART_WAIT)) /* race #1 */
3582 return supdrvMeasureTscDeltaCallbackAbortSyncSetup(pArgs, &MySync, fIsMaster, false /*fTimeout*/);
3583
3584/** @todo Add a resumable state to pArgs so we don't waste time if we time
3585 * out or something. Timeouts are legit, any of the two CPUs may get
3586 * interrupted. */
3587
3588 /*
3589 * Start by seeing if we have a zero delta between the two CPUs.
3590 * This should normally be the case.
3591 */
3592 rc = supdrvTscDeltaVerify(pArgs, &MySync, pOtherSync, fIsMaster, GIP_TSC_DELTA_INITIAL_MASTER_VALUE);
3593 if (RT_SUCCESS(rc))
3594 {
3595 if (fIsMaster)
3596 {
3597 ASMAtomicWriteS64(&pGipCpuWorker->i64TSCDelta, GIP_TSC_DELTA_INITIAL_MASTER_VALUE);
3598 RTCpuSetDelByIndex(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->iCpuSet);
3599 RTCpuSetAddByIndex(&pDevExt->TscDeltaObtainedCpuSet, pGipCpuWorker->iCpuSet);
3600 }
3601 }
3602 /*
3603 * If the verification didn't time out, do regular delta measurements.
3604 * We retry this until we get a reasonable value.
3605 */
3606 else if (rc != VERR_TIMEOUT)
3607 {
3608 Assert(pGipCpuWorker->i64TSCDelta == INT64_MAX);
3609 for (iTry = 0; iTry < 12; iTry++)
3610 {
3611 /*
3612 * Check the state before we start.
3613 */
3614 uint32_t u32Tmp = ASMAtomicReadU32(&MySync.uSyncVar);
3615 if ( u32Tmp != GIP_TSC_DELTA_SYNC2_READY
3616 && (fIsMaster || u32Tmp != GIP_TSC_DELTA_SYNC2_STEADY) /* worker may be late prepping for the next round */ )
3617 {
3618 TSCDELTA_DBG_SYNC_MSG(("sync/loop/%s: #0 iTry=%u MyState=%#x\n", fIsMaster ? "master" : "worker", iTry, u32Tmp));
3619 break;
3620 }
3621
3622 /*
3623 * Do the measurements.
3624 */
3625#ifdef GIP_TSC_DELTA_METHOD_1
3626 supdrvTscDeltaMethod1Loop(pArgs, &MySync, pOtherSync, fIsMaster, iTry);
3627#elif defined(GIP_TSC_DELTA_METHOD_2)
3628 supdrvTscDeltaMethod2Loop(pArgs, &MySync, pOtherSync, fIsMaster, iTry);
3629#else
3630# error "huh??"
3631#endif
3632
3633 /*
3634 * Check the state.
3635 */
3636 u32Tmp = ASMAtomicReadU32(&MySync.uSyncVar);
3637 if ( u32Tmp != GIP_TSC_DELTA_SYNC2_READY
3638 && (fIsMaster || u32Tmp != GIP_TSC_DELTA_SYNC2_STEADY) /* worker may be late prepping for the next round */ )
3639 {
3640 if (fIsMaster)
3641 TSCDELTA_DBG_SYNC_MSG(("sync/loop/master: #1 iTry=%u MyState=%#x\n", iTry, u32Tmp));
3642 else
3643 TSCDELTA_DBG_SYNC_MSG2(("sync/loop/worker: #1 iTry=%u MyState=%#x\n", iTry, u32Tmp));
3644 break;
3645 }
3646
3647 /*
3648 * Success? If so, stop trying. Master decides.
3649 */
3650 if (fIsMaster)
3651 {
3652 if (pGipCpuWorker->i64TSCDelta != INT64_MAX)
3653 {
3654 RTCpuSetDelByIndex(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->iCpuSet);
3655 RTCpuSetAddByIndex(&pDevExt->TscDeltaObtainedCpuSet, pGipCpuWorker->iCpuSet);
3656 TSCDELTA_DBG_SYNC_MSG2(("sync/loop/master: #9 iTry=%u MyState=%#x\n", iTry, MySync.uSyncVar));
3657 break;
3658 }
3659 }
3660 }
3661 if (fIsMaster)
3662 pArgs->iTry = iTry;
3663 }
3664
3665 /*
3666 * End the synchroniziation dance. We tell the other that we're done,
3667 * then wait for the same kind of reply.
3668 */
3669 ASMAtomicWriteU32(&pOtherSync->uSyncVar, GIP_TSC_DELTA_SYNC2_FINAL);
3670 ASMAtomicWriteNullPtr(ppMySync);
3671 iTry = 0;
3672 TSCDELTA_DBG_START_LOOP();
3673 while (ASMAtomicReadU32(&MySync.uSyncVar) != GIP_TSC_DELTA_SYNC2_FINAL)
3674 {
3675 iTry++;
3676 if ( iTry == 0
3677 && !RTMpIsCpuOnline(fIsMaster ? pGipCpuWorker->idCpu : pGipCpuWorker->idCpu))
3678 break; /* this really shouldn't happen. */
3679 TSCDELTA_DBG_CHECK_LOOP();
3680 ASMNopPause();
3681 }
3682
3683 /*
3684 * Collect some runtime stats.
3685 */
3686 if (fIsMaster)
3687 pArgs->cElapsedMasterTscTicks = ASMReadTSC() - MySync.uTscStart;
3688 else
3689 pArgs->cElapsedWorkerTscTicks = ASMReadTSC() - MySync.uTscStart;
3690 return 0;
3691}
3692
3693/**
3694 * Callback used by supdrvMeasureInitialTscDeltas() to read the TSC on two CPUs
3695 * and compute the delta between them.
3696 *
3697 * @param idCpu The CPU we are current scheduled on.
3698 * @param pvUser1 Pointer to a parameter package (SUPDRVGIPTSCDELTARGS).
3699 * @param pvUser2 Unused.
3700 */
3701static DECLCALLBACK(void) supdrvMeasureTscDeltaCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
3702{
3703 supdrvMeasureTscDeltaCallbackUnwrapped(idCpu, (PSUPDRVGIPTSCDELTARGS)pvUser1);
3704}
3705
3706
3707/**
3708 * Measures the TSC delta between the master GIP CPU and one specified worker
3709 * CPU.
3710 *
3711 * @returns VBox status code.
3712 * @retval VERR_SUPDRV_TSC_DELTA_MEASUREMENT_FAILED on pure measurement
3713 * failure.
3714 * @param pDevExt Pointer to the device instance data.
3715 * @param idxWorker The index of the worker CPU from the GIP's array of
3716 * CPUs.
3717 *
3718 * @remarks This must be called with preemption enabled!
3719 */
3720static int supdrvMeasureTscDeltaOne(PSUPDRVDEVEXT pDevExt, uint32_t idxWorker)
3721{
3722 int rc;
3723 int rc2;
3724 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
3725 RTCPUID idMaster = pDevExt->idGipMaster;
3726 PSUPGIPCPU pGipCpuWorker = &pGip->aCPUs[idxWorker];
3727 PSUPGIPCPU pGipCpuMaster;
3728 uint32_t iGipCpuMaster;
3729
3730 /* Validate input a bit. */
3731 AssertReturn(pGip, VERR_INVALID_PARAMETER);
3732 Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED);
3733 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3734
3735 /*
3736 * Don't attempt measuring the delta for the GIP master.
3737 */
3738 if (pGipCpuWorker->idCpu == idMaster)
3739 {
3740 if (pGipCpuWorker->i64TSCDelta == INT64_MAX) /* This shouldn't happen, but just in case. */
3741 ASMAtomicWriteS64(&pGipCpuWorker->i64TSCDelta, GIP_TSC_DELTA_INITIAL_MASTER_VALUE);
3742 return VINF_SUCCESS;
3743 }
3744
3745 /*
3746 * One measurement at at time, at least for now. We might be using
3747 * broadcast IPIs so, so be nice to the rest of the system.
3748 */
3749#ifdef SUPDRV_USE_MUTEX_FOR_GIP
3750 rc = RTSemMutexRequest(pDevExt->mtxTscDelta, RT_INDEFINITE_WAIT);
3751#else
3752 rc = RTSemFastMutexRequest(pDevExt->mtxTscDelta);
3753#endif
3754 if (RT_FAILURE(rc))
3755 return rc;
3756
3757 /*
3758 * If the CPU has hyper-threading and the APIC IDs of the master and worker are adjacent,
3759 * try pick a different master. (This fudge only works with multi core systems.)
3760 * ASSUMES related threads have adjacent APIC IDs. ASSUMES two threads per core.
3761 *
3762 * We skip this on AMDs for now as their HTT is different from intel's and
3763 * it doesn't seem to have any favorable effect on the results.
3764 *
3765 * If the master is offline, we need a new master too, so share the code.
3766 */
3767 iGipCpuMaster = supdrvGipFindCpuIndexForCpuId(pGip, idMaster);
3768 AssertReturn(iGipCpuMaster < pGip->cCpus, VERR_INVALID_CPU_ID);
3769 pGipCpuMaster = &pGip->aCPUs[iGipCpuMaster];
3770 if ( ( (pGipCpuMaster->idApic & ~1) == (pGipCpuWorker->idApic & ~1)
3771 && ASMHasCpuId()
3772 && ASMIsValidStdRange(ASMCpuId_EAX(0))
3773 && (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_HTT)
3774 && !ASMIsAmdCpu()
3775 && pGip->cOnlineCpus > 2)
3776 || !RTMpIsCpuOnline(idMaster) )
3777 {
3778 uint32_t i;
3779 for (i = 0; i < pGip->cCpus; i++)
3780 if ( i != iGipCpuMaster
3781 && i != idxWorker
3782 && pGip->aCPUs[i].enmState == SUPGIPCPUSTATE_ONLINE
3783 && pGip->aCPUs[i].i64TSCDelta != INT64_MAX
3784 && pGip->aCPUs[i].idCpu != NIL_RTCPUID
3785 && pGip->aCPUs[i].idCpu != idMaster /* paranoia starts here... */
3786 && pGip->aCPUs[i].idCpu != pGipCpuWorker->idCpu
3787 && pGip->aCPUs[i].idApic != pGipCpuWorker->idApic
3788 && pGip->aCPUs[i].idApic != pGipCpuMaster->idApic
3789 && RTMpIsCpuOnline(pGip->aCPUs[i].idCpu))
3790 {
3791 iGipCpuMaster = i;
3792 pGipCpuMaster = &pGip->aCPUs[i];
3793 idMaster = pGipCpuMaster->idCpu;
3794 break;
3795 }
3796 }
3797
3798 if (RTCpuSetIsMemberByIndex(&pGip->OnlineCpuSet, pGipCpuWorker->iCpuSet))
3799 {
3800 /*
3801 * Initialize data package for the RTMpOnPair callback.
3802 */
3803 PSUPDRVGIPTSCDELTARGS pArgs = (PSUPDRVGIPTSCDELTARGS)RTMemAllocZ(sizeof(*pArgs));
3804 if (pArgs)
3805 {
3806 pArgs->pWorker = pGipCpuWorker;
3807 pArgs->pMaster = pGipCpuMaster;
3808 pArgs->pDevExt = pDevExt;
3809 pArgs->pSyncMaster = NULL;
3810 pArgs->pSyncWorker = NULL;
3811 pArgs->cMaxTscTicks = ASMAtomicReadU64(&pGip->u64CpuHz) / 512; /* 1953 us */
3812
3813 /*
3814 * Do the RTMpOnPair call. We reset i64TSCDelta first so we
3815 * and supdrvMeasureTscDeltaCallback can use it as a success check.
3816 */
3817 /** @todo Store the i64TSCDelta result in pArgs first? Perhaps deals with
3818 * that when doing the restart loop reorg. */
3819 ASMAtomicWriteS64(&pGipCpuWorker->i64TSCDelta, INT64_MAX);
3820 rc = RTMpOnPair(pGipCpuMaster->idCpu, pGipCpuWorker->idCpu, RTMPON_F_CONCURRENT_EXEC,
3821 supdrvMeasureTscDeltaCallback, pArgs, NULL);
3822 if (RT_SUCCESS(rc))
3823 {
3824#if 0
3825 SUPR0Printf("mponpair ticks: %9llu %9llu max: %9llu iTry: %u%s\n", pArgs->cElapsedMasterTscTicks,
3826 pArgs->cElapsedWorkerTscTicks, pArgs->cMaxTscTicks, pArgs->iTry,
3827 pArgs->fTimedOut ? " timed out" :"");
3828#endif
3829#if 0
3830 SUPR0Printf("rcVerify=%d iVerifyBadTscDiff=%lld cMinVerifyTscTicks=%lld cMaxVerifyTscTicks=%lld\n",
3831 pArgs->rcVerify, pArgs->iVerifyBadTscDiff, pArgs->cMinVerifyTscTicks, pArgs->cMaxVerifyTscTicks);
3832#endif
3833 if (RT_LIKELY(pGipCpuWorker->i64TSCDelta != INT64_MAX))
3834 {
3835 /*
3836 * Work the TSC delta applicability rating. It starts
3837 * optimistic in supdrvGipInit, we downgrade it here.
3838 */
3839 SUPGIPUSETSCDELTA enmRating;
3840 if ( pGipCpuWorker->i64TSCDelta > GIP_TSC_DELTA_THRESHOLD_ROUGHLY_ZERO
3841 || pGipCpuWorker->i64TSCDelta < -GIP_TSC_DELTA_THRESHOLD_ROUGHLY_ZERO)
3842 enmRating = SUPGIPUSETSCDELTA_NOT_ZERO;
3843 else if ( pGipCpuWorker->i64TSCDelta > GIP_TSC_DELTA_THRESHOLD_PRACTICALLY_ZERO
3844 || pGipCpuWorker->i64TSCDelta < -GIP_TSC_DELTA_THRESHOLD_PRACTICALLY_ZERO)
3845 enmRating = SUPGIPUSETSCDELTA_ROUGHLY_ZERO;
3846 else
3847 enmRating = SUPGIPUSETSCDELTA_PRACTICALLY_ZERO;
3848 if (pGip->enmUseTscDelta < enmRating)
3849 {
3850 AssertCompile(sizeof(pGip->enmUseTscDelta) == sizeof(uint32_t));
3851 ASMAtomicWriteU32((uint32_t volatile *)&pGip->enmUseTscDelta, enmRating);
3852 }
3853 }
3854 else
3855 rc = VERR_SUPDRV_TSC_DELTA_MEASUREMENT_FAILED;
3856 }
3857 /** @todo return try-again if we get an offline CPU error. */
3858
3859 RTMemFree(pArgs);
3860 }
3861 else
3862 rc = VERR_NO_MEMORY;
3863 }
3864 else
3865 rc = VERR_CPU_OFFLINE;
3866
3867 /*
3868 * We're done now.
3869 */
3870#ifdef SUPDRV_USE_MUTEX_FOR_GIP
3871 rc2 = RTSemMutexRelease(pDevExt->mtxTscDelta); AssertRC(rc2);
3872#else
3873 rc2 = RTSemFastMutexRelease(pDevExt->mtxTscDelta); AssertRC(rc2);
3874#endif
3875 return rc;
3876}
3877
3878
3879/**
3880 * Resets the TSC-delta related TSC samples and optionally the deltas
3881 * themselves.
3882 *
3883 * @param pDevExt Pointer to the device instance data.
3884 * @param fResetTscDeltas Whether the TSC-deltas are also to be reset.
3885 *
3886 * @remarks This might be called while holding a spinlock!
3887 */
3888static void supdrvTscResetSamples(PSUPDRVDEVEXT pDevExt, bool fResetTscDeltas)
3889{
3890 unsigned iCpu;
3891 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
3892 for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
3893 {
3894 PSUPGIPCPU pGipCpu = &pGip->aCPUs[iCpu];
3895 ASMAtomicWriteU64(&pGipCpu->u64TSCSample, GIP_TSC_DELTA_RSVD);
3896 if (fResetTscDeltas)
3897 {
3898 RTCpuSetDelByIndex(&pDevExt->TscDeltaObtainedCpuSet, pGipCpu->iCpuSet);
3899 ASMAtomicWriteS64(&pGipCpu->i64TSCDelta, INT64_MAX);
3900 }
3901 }
3902}
3903
3904
3905/**
3906 * Picks an online CPU as the master TSC for TSC-delta computations.
3907 *
3908 * @returns VBox status code.
3909 * @param pDevExt Pointer to the device instance data.
3910 * @param pidxMaster Where to store the CPU array index of the chosen
3911 * master. Optional, can be NULL.
3912 */
3913static int supdrvTscPickMaster(PSUPDRVDEVEXT pDevExt, uint32_t *pidxMaster)
3914{
3915 /*
3916 * Pick the first CPU online as the master TSC and make it the new GIP master based
3917 * on the APIC ID.
3918 *
3919 * Technically we can simply use "idGipMaster" but doing this gives us master as CPU 0
3920 * in most cases making it nicer/easier for comparisons. It is safe to update the GIP
3921 * master as this point since the sync/async timer isn't created yet.
3922 */
3923 unsigned iCpu;
3924 uint32_t idxMaster = UINT32_MAX;
3925 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
3926 for (iCpu = 0; iCpu < RT_ELEMENTS(pGip->aiCpuFromApicId); iCpu++)
3927 {
3928 uint16_t idxCpu = pGip->aiCpuFromApicId[iCpu];
3929 if (idxCpu != UINT16_MAX)
3930 {
3931 PSUPGIPCPU pGipCpu = &pGip->aCPUs[idxCpu];
3932 if (RTCpuSetIsMemberByIndex(&pGip->OnlineCpuSet, pGipCpu->iCpuSet))
3933 {
3934 idxMaster = idxCpu;
3935 pGipCpu->i64TSCDelta = GIP_TSC_DELTA_INITIAL_MASTER_VALUE;
3936 ASMAtomicWriteSize(&pDevExt->idGipMaster, pGipCpu->idCpu);
3937 if (pidxMaster)
3938 *pidxMaster = idxMaster;
3939 return VINF_SUCCESS;
3940 }
3941 }
3942 }
3943 return VERR_CPU_OFFLINE;
3944}
3945
3946
3947/**
3948 * Performs the initial measurements of the TSC deltas between CPUs.
3949 *
3950 * This is called by supdrvGipCreate(), supdrvGipPowerNotificationCallback() or
3951 * triggered by it if threaded.
3952 *
3953 * @returns VBox status code.
3954 * @param pDevExt Pointer to the device instance data.
3955 *
3956 * @remarks Must be called only after supdrvGipInitOnCpu() as this function uses
3957 * idCpu, GIP's online CPU set which are populated in
3958 * supdrvGipInitOnCpu().
3959 */
3960static int supdrvMeasureInitialTscDeltas(PSUPDRVDEVEXT pDevExt)
3961{
3962 PSUPGIPCPU pGipCpuMaster;
3963 unsigned iCpu;
3964 unsigned iOddEven;
3965 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
3966 uint32_t idxMaster = UINT32_MAX;
3967 uint32_t cMpOnOffEvents = ASMAtomicReadU32(&pDevExt->cMpOnOffEvents);
3968
3969 Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED);
3970 supdrvTscResetSamples(pDevExt, true /* fClearDeltas */);
3971 int rc = supdrvTscPickMaster(pDevExt, &idxMaster);
3972 if (RT_FAILURE(rc))
3973 {
3974 SUPR0Printf("Failed to pick a CPU master for TSC-delta measurements rc=%Rrc\n", rc);
3975 return rc;
3976 }
3977 AssertReturn(idxMaster < pGip->cCpus, VERR_INVALID_CPU_INDEX);
3978 pGipCpuMaster = &pGip->aCPUs[idxMaster];
3979 Assert(pDevExt->idGipMaster == pGipCpuMaster->idCpu);
3980
3981 /*
3982 * If there is only a single CPU online we have nothing to do.
3983 */
3984 if (pGip->cOnlineCpus <= 1)
3985 {
3986 AssertReturn(pGip->cOnlineCpus > 0, VERR_INTERNAL_ERROR_5);
3987 return VINF_SUCCESS;
3988 }
3989
3990 /*
3991 * Loop thru the GIP CPU array and get deltas for each CPU (except the
3992 * master). We do the CPUs with the even numbered APIC IDs first so that
3993 * we've got alternative master CPUs to pick from on hyper-threaded systems.
3994 */
3995 for (iOddEven = 0; iOddEven < 2; iOddEven++)
3996 {
3997 for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
3998 {
3999 PSUPGIPCPU pGipCpuWorker = &pGip->aCPUs[iCpu];
4000 if ( iCpu != idxMaster
4001 && (iOddEven > 0 || (pGipCpuWorker->idApic & 1) == 0)
4002 && RTCpuSetIsMemberByIndex(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->iCpuSet))
4003 {
4004 rc = supdrvMeasureTscDeltaOne(pDevExt, iCpu);
4005 if (RT_FAILURE(rc))
4006 {
4007 SUPR0Printf("supdrvMeasureTscDeltaOne failed. rc=%d CPU[%u].idCpu=%u Master[%u].idCpu=%u\n", rc, iCpu,
4008 pGipCpuWorker->idCpu, idxMaster, pDevExt->idGipMaster, pGipCpuMaster->idCpu);
4009 break;
4010 }
4011
4012 if (ASMAtomicReadU32(&pDevExt->cMpOnOffEvents) != cMpOnOffEvents)
4013 {
4014 SUPR0Printf("One or more CPUs transitioned between online & offline states. I'm confused, retry...\n");
4015 rc = VERR_TRY_AGAIN;
4016 break;
4017 }
4018 }
4019 }
4020 }
4021
4022 return rc;
4023}
4024
4025
4026#ifdef SUPDRV_USE_TSC_DELTA_THREAD
4027
4028/**
4029 * Switches the TSC-delta measurement thread into the butchered state.
4030 *
4031 * @returns VBox status code.
4032 * @param pDevExt Pointer to the device instance data.
4033 * @param fSpinlockHeld Whether the TSC-delta spinlock is held or not.
4034 * @param pszFailed An error message to log.
4035 * @param rcFailed The error code to exit the thread with.
4036 */
4037static int supdrvTscDeltaThreadButchered(PSUPDRVDEVEXT pDevExt, bool fSpinlockHeld, const char *pszFailed, int rcFailed)
4038{
4039 if (!fSpinlockHeld)
4040 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4041
4042 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Butchered;
4043 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4044 OSDBGPRINT(("supdrvTscDeltaThreadButchered: %s. rc=%Rrc\n", rcFailed));
4045 return rcFailed;
4046}
4047
4048
4049/**
4050 * The TSC-delta measurement thread.
4051 *
4052 * @returns VBox status code.
4053 * @param hThread The thread handle.
4054 * @param pvUser Opaque pointer to the device instance data.
4055 */
4056static DECLCALLBACK(int) supdrvTscDeltaThread(RTTHREAD hThread, void *pvUser)
4057{
4058 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4059 uint32_t cConsecutiveTimeouts = 0;
4060 int rc = VERR_INTERNAL_ERROR_2;
4061 for (;;)
4062 {
4063 /*
4064 * Switch on the current state.
4065 */
4066 SUPDRVTSCDELTATHREADSTATE enmState;
4067 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4068 enmState = pDevExt->enmTscDeltaThreadState;
4069 switch (enmState)
4070 {
4071 case kTscDeltaThreadState_Creating:
4072 {
4073 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Listening;
4074 rc = RTSemEventSignal(pDevExt->hTscDeltaEvent);
4075 if (RT_FAILURE(rc))
4076 return supdrvTscDeltaThreadButchered(pDevExt, true /* fSpinlockHeld */, "RTSemEventSignal", rc);
4077 /* fall thru */
4078 }
4079
4080 case kTscDeltaThreadState_Listening:
4081 {
4082 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4083
4084 /* Simple adaptive timeout. */
4085 if (cConsecutiveTimeouts++ == 10)
4086 {
4087 if (pDevExt->cMsTscDeltaTimeout == 1) /* 10 ms */
4088 pDevExt->cMsTscDeltaTimeout = 10;
4089 else if (pDevExt->cMsTscDeltaTimeout == 10) /* +100 ms */
4090 pDevExt->cMsTscDeltaTimeout = 100;
4091 else if (pDevExt->cMsTscDeltaTimeout == 100) /* +1000 ms */
4092 pDevExt->cMsTscDeltaTimeout = 500;
4093 cConsecutiveTimeouts = 0;
4094 }
4095 rc = RTThreadUserWait(pDevExt->hTscDeltaThread, pDevExt->cMsTscDeltaTimeout);
4096 if ( RT_FAILURE(rc)
4097 && rc != VERR_TIMEOUT)
4098 return supdrvTscDeltaThreadButchered(pDevExt, false /* fSpinlockHeld */, "RTThreadUserWait", rc);
4099 RTThreadUserReset(pDevExt->hTscDeltaThread);
4100 break;
4101 }
4102
4103 case kTscDeltaThreadState_WaitAndMeasure:
4104 {
4105 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Measuring;
4106 rc = RTSemEventSignal(pDevExt->hTscDeltaEvent); /* (Safe on windows as long as spinlock isn't IRQ safe.) */
4107 if (RT_FAILURE(rc))
4108 return supdrvTscDeltaThreadButchered(pDevExt, true /* fSpinlockHeld */, "RTSemEventSignal", rc);
4109 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4110 pDevExt->cMsTscDeltaTimeout = 1;
4111 RTThreadSleep(1);
4112 /* fall thru */
4113 }
4114
4115 case kTscDeltaThreadState_Measuring:
4116 {
4117 cConsecutiveTimeouts = 0;
4118 if (pDevExt->fTscThreadRecomputeAllDeltas)
4119 {
4120 int cTries = 8;
4121 int cMsWaitPerTry = 10;
4122 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
4123 Assert(pGip);
4124 do
4125 {
4126 RTCpuSetCopy(&pDevExt->TscDeltaCpuSet, &pGip->OnlineCpuSet);
4127 rc = supdrvMeasureInitialTscDeltas(pDevExt);
4128 if ( RT_SUCCESS(rc)
4129 || ( RT_FAILURE(rc)
4130 && rc != VERR_TRY_AGAIN
4131 && rc != VERR_CPU_OFFLINE))
4132 {
4133 break;
4134 }
4135 RTThreadSleep(cMsWaitPerTry);
4136 } while (cTries-- > 0);
4137 pDevExt->fTscThreadRecomputeAllDeltas = false;
4138 }
4139 else
4140 {
4141 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
4142 unsigned iCpu;
4143
4144 /* Measure TSC-deltas only for the CPUs that are in the set. */
4145 rc = VINF_SUCCESS;
4146 for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
4147 {
4148 PSUPGIPCPU pGipCpuWorker = &pGip->aCPUs[iCpu];
4149 if (RTCpuSetIsMemberByIndex(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->iCpuSet))
4150 {
4151 if (pGipCpuWorker->i64TSCDelta == INT64_MAX)
4152 {
4153 int rc2 = supdrvMeasureTscDeltaOne(pDevExt, iCpu);
4154 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
4155 rc = rc2;
4156 }
4157 else
4158 {
4159 /*
4160 * The thread/someone must've called SUPR0TscDeltaMeasureBySetIndex(),
4161 * mark the delta as fine to get the timer thread off our back.
4162 */
4163 RTCpuSetDelByIndex(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->iCpuSet);
4164 RTCpuSetAddByIndex(&pDevExt->TscDeltaObtainedCpuSet, pGipCpuWorker->iCpuSet);
4165 }
4166 }
4167 }
4168 }
4169 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4170 if (pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring)
4171 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Listening;
4172 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4173 Assert(rc != VERR_NOT_AVAILABLE); /* VERR_NOT_AVAILABLE is used as init value, see supdrvTscDeltaThreadInit(). */
4174 ASMAtomicWriteS32(&pDevExt->rcTscDelta, rc);
4175 break;
4176 }
4177
4178 case kTscDeltaThreadState_Terminating:
4179 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Destroyed;
4180 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4181 return VINF_SUCCESS;
4182
4183 case kTscDeltaThreadState_Butchered:
4184 default:
4185 return supdrvTscDeltaThreadButchered(pDevExt, true /* fSpinlockHeld */, "Invalid state", VERR_INVALID_STATE);
4186 }
4187 }
4188
4189 return rc;
4190}
4191
4192
4193/**
4194 * Waits for the TSC-delta measurement thread to respond to a state change.
4195 *
4196 * @returns VINF_SUCCESS on success, VERR_TIMEOUT if it doesn't respond in time,
4197 * other error code on internal error.
4198 *
4199 * @param pThis Pointer to the grant service instance data.
4200 * @param enmCurState The current state.
4201 * @param enmNewState The new state we're waiting for it to enter.
4202 */
4203static int supdrvTscDeltaThreadWait(PSUPDRVDEVEXT pDevExt, SUPDRVTSCDELTATHREADSTATE enmCurState,
4204 SUPDRVTSCDELTATHREADSTATE enmNewState)
4205{
4206 /*
4207 * Wait a short while for the expected state transition.
4208 */
4209 int rc;
4210 RTSemEventWait(pDevExt->hTscDeltaEvent, RT_MS_1SEC);
4211 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4212 if (pDevExt->enmTscDeltaThreadState == enmNewState)
4213 {
4214 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4215 rc = VINF_SUCCESS;
4216 }
4217 else if (pDevExt->enmTscDeltaThreadState == enmCurState)
4218 {
4219 /*
4220 * Wait longer if the state has not yet transitioned to the one we want.
4221 */
4222 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4223 rc = RTSemEventWait(pDevExt->hTscDeltaEvent, 50 * RT_MS_1SEC);
4224 if ( RT_SUCCESS(rc)
4225 || rc == VERR_TIMEOUT)
4226 {
4227 /*
4228 * Check the state whether we've succeeded.
4229 */
4230 SUPDRVTSCDELTATHREADSTATE enmState;
4231 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4232 enmState = pDevExt->enmTscDeltaThreadState;
4233 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4234 if (enmState == enmNewState)
4235 rc = VINF_SUCCESS;
4236 else if (enmState == enmCurState)
4237 {
4238 rc = VERR_TIMEOUT;
4239 OSDBGPRINT(("supdrvTscDeltaThreadWait: timed out state transition. enmState=%d enmNewState=%d\n", enmState,
4240 enmNewState));
4241 }
4242 else
4243 {
4244 rc = VERR_INTERNAL_ERROR;
4245 OSDBGPRINT(("supdrvTscDeltaThreadWait: invalid state transition from %d to %d, expected %d\n", enmCurState,
4246 enmState, enmNewState));
4247 }
4248 }
4249 else
4250 OSDBGPRINT(("supdrvTscDeltaThreadWait: RTSemEventWait failed. rc=%Rrc\n", rc));
4251 }
4252 else
4253 {
4254 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4255 OSDBGPRINT(("supdrvTscDeltaThreadWait: invalid state transition from %d to %d\n", enmCurState, enmNewState));
4256 rc = VERR_INTERNAL_ERROR;
4257 }
4258
4259 return rc;
4260}
4261
4262
4263/**
4264 * Signals the TSC-delta thread to start measuring TSC-deltas.
4265 *
4266 * @param pDevExt Pointer to the device instance data.
4267 * @param fForceAll Force re-calculating TSC-deltas on all CPUs.
4268 */
4269static void supdrvTscDeltaThreadStartMeasurement(PSUPDRVDEVEXT pDevExt, bool fForceAll)
4270{
4271 if (pDevExt->hTscDeltaThread != NIL_RTTHREAD)
4272 {
4273 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4274 if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Listening
4275 || pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring)
4276 {
4277 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_WaitAndMeasure;
4278 if (fForceAll)
4279 pDevExt->fTscThreadRecomputeAllDeltas = true;
4280 }
4281 else if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_WaitAndMeasure
4282 && fForceAll)
4283 pDevExt->fTscThreadRecomputeAllDeltas = true;
4284 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4285 RTThreadUserSignal(pDevExt->hTscDeltaThread);
4286 }
4287}
4288
4289
4290/**
4291 * Terminates the actual thread running supdrvTscDeltaThread().
4292 *
4293 * This is an internal worker function for supdrvTscDeltaThreadInit() and
4294 * supdrvTscDeltaTerm().
4295 *
4296 * @param pDevExt Pointer to the device instance data.
4297 */
4298static void supdrvTscDeltaThreadTerminate(PSUPDRVDEVEXT pDevExt)
4299{
4300 int rc;
4301 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4302 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Terminating;
4303 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4304 RTThreadUserSignal(pDevExt->hTscDeltaThread);
4305 rc = RTThreadWait(pDevExt->hTscDeltaThread, 50 * RT_MS_1SEC, NULL /* prc */);
4306 if (RT_FAILURE(rc))
4307 {
4308 /* Signal a few more times before giving up. */
4309 int cTriesLeft = 5;
4310 while (--cTriesLeft > 0)
4311 {
4312 RTThreadUserSignal(pDevExt->hTscDeltaThread);
4313 rc = RTThreadWait(pDevExt->hTscDeltaThread, 2 * RT_MS_1SEC, NULL /* prc */);
4314 if (rc != VERR_TIMEOUT)
4315 break;
4316 }
4317 }
4318}
4319
4320
4321/**
4322 * Initializes and spawns the TSC-delta measurement thread.
4323 *
4324 * A thread is required for servicing re-measurement requests from events like
4325 * CPUs coming online, suspend/resume etc. as it cannot be done synchronously
4326 * under all contexts on all OSs.
4327 *
4328 * @returns VBox status code.
4329 * @param pDevExt Pointer to the device instance data.
4330 *
4331 * @remarks Must only be called -after- initializing GIP and setting up MP
4332 * notifications!
4333 */
4334static int supdrvTscDeltaThreadInit(PSUPDRVDEVEXT pDevExt)
4335{
4336 int rc;
4337 Assert(pDevExt->pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED);
4338 rc = RTSpinlockCreate(&pDevExt->hTscDeltaSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "VBoxTscSpnLck");
4339 if (RT_SUCCESS(rc))
4340 {
4341 rc = RTSemEventCreate(&pDevExt->hTscDeltaEvent);
4342 if (RT_SUCCESS(rc))
4343 {
4344 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_Creating;
4345 pDevExt->cMsTscDeltaTimeout = 1;
4346 rc = RTThreadCreate(&pDevExt->hTscDeltaThread, supdrvTscDeltaThread, pDevExt, 0 /* cbStack */,
4347 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "VBoxTscThread");
4348 if (RT_SUCCESS(rc))
4349 {
4350 rc = supdrvTscDeltaThreadWait(pDevExt, kTscDeltaThreadState_Creating, kTscDeltaThreadState_Listening);
4351 if (RT_SUCCESS(rc))
4352 {
4353 ASMAtomicWriteS32(&pDevExt->rcTscDelta, VERR_NOT_AVAILABLE);
4354 return rc;
4355 }
4356
4357 OSDBGPRINT(("supdrvTscDeltaInit: supdrvTscDeltaThreadWait failed. rc=%Rrc\n", rc));
4358 supdrvTscDeltaThreadTerminate(pDevExt);
4359 }
4360 else
4361 OSDBGPRINT(("supdrvTscDeltaInit: RTThreadCreate failed. rc=%Rrc\n", rc));
4362 RTSemEventDestroy(pDevExt->hTscDeltaEvent);
4363 pDevExt->hTscDeltaEvent = NIL_RTSEMEVENT;
4364 }
4365 else
4366 OSDBGPRINT(("supdrvTscDeltaInit: RTSemEventCreate failed. rc=%Rrc\n", rc));
4367 RTSpinlockDestroy(pDevExt->hTscDeltaSpinlock);
4368 pDevExt->hTscDeltaSpinlock = NIL_RTSPINLOCK;
4369 }
4370 else
4371 OSDBGPRINT(("supdrvTscDeltaInit: RTSpinlockCreate failed. rc=%Rrc\n", rc));
4372
4373 return rc;
4374}
4375
4376
4377/**
4378 * Terminates the TSC-delta measurement thread and cleanup.
4379 *
4380 * @param pDevExt Pointer to the device instance data.
4381 */
4382static void supdrvTscDeltaTerm(PSUPDRVDEVEXT pDevExt)
4383{
4384 if ( pDevExt->hTscDeltaSpinlock != NIL_RTSPINLOCK
4385 && pDevExt->hTscDeltaEvent != NIL_RTSEMEVENT)
4386 {
4387 supdrvTscDeltaThreadTerminate(pDevExt);
4388 }
4389
4390 if (pDevExt->hTscDeltaSpinlock != NIL_RTSPINLOCK)
4391 {
4392 RTSpinlockDestroy(pDevExt->hTscDeltaSpinlock);
4393 pDevExt->hTscDeltaSpinlock = NIL_RTSPINLOCK;
4394 }
4395
4396 if (pDevExt->hTscDeltaEvent != NIL_RTSEMEVENT)
4397 {
4398 RTSemEventDestroy(pDevExt->hTscDeltaEvent);
4399 pDevExt->hTscDeltaEvent = NIL_RTSEMEVENT;
4400 }
4401
4402 ASMAtomicWriteS32(&pDevExt->rcTscDelta, VERR_NOT_AVAILABLE);
4403}
4404
4405#endif /* SUPDRV_USE_TSC_DELTA_THREAD */
4406
4407/**
4408 * Measure the TSC delta for the CPU given by its CPU set index.
4409 *
4410 * @returns VBox status code.
4411 * @retval VERR_INTERRUPTED if interrupted while waiting.
4412 * @retval VERR_SUPDRV_TSC_DELTA_MEASUREMENT_FAILED if we were unable to get a
4413 * measurment.
4414 * @retval VERR_CPU_OFFLINE if the specified CPU is offline.
4415 *
4416 * @param pSession The caller's session. GIP must've been mapped.
4417 * @param iCpuSet The CPU set index of the CPU to measure.
4418 * @param fFlags Flags, SUP_TSCDELTA_MEASURE_F_XXX.
4419 * @param cMsWaitRetry Number of milliseconds to wait between each retry.
4420 * @param cMsWaitThread Number of milliseconds to wait for the thread to get
4421 * ready.
4422 * @param cTries Number of times to try, pass 0 for the default.
4423 */
4424SUPR0DECL(int) SUPR0TscDeltaMeasureBySetIndex(PSUPDRVSESSION pSession, uint32_t iCpuSet, uint32_t fFlags,
4425 RTMSINTERVAL cMsWaitRetry, RTMSINTERVAL cMsWaitThread, uint32_t cTries)
4426{
4427 PSUPDRVDEVEXT pDevExt;
4428 PSUPGLOBALINFOPAGE pGip;
4429 uint16_t iGipCpu;
4430 int rc;
4431#ifdef SUPDRV_USE_TSC_DELTA_THREAD
4432 uint64_t msTsStartWait;
4433 uint32_t iWaitLoop;
4434#endif
4435
4436 /*
4437 * Validate and adjust the input.
4438 */
4439 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4440 if (!pSession->fGipReferenced)
4441 return VERR_WRONG_ORDER;
4442
4443 pDevExt = pSession->pDevExt;
4444 AssertReturn(SUP_IS_DEVEXT_VALID(pDevExt), VERR_INVALID_PARAMETER);
4445
4446 pGip = pDevExt->pGip;
4447 AssertPtrReturn(pGip, VERR_INTERNAL_ERROR_2);
4448
4449 AssertReturn(iCpuSet < RTCPUSET_MAX_CPUS, VERR_INVALID_CPU_INDEX);
4450 AssertReturn(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx), VERR_INVALID_CPU_INDEX);
4451 iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
4452 AssertReturn(iGipCpu < pGip->cCpus, VERR_INVALID_CPU_INDEX);
4453
4454 if (fFlags & ~SUP_TSCDELTA_MEASURE_F_VALID_MASK)
4455 return VERR_INVALID_FLAGS;
4456
4457 /*
4458 * The request is a noop if the TSC delta isn't being used.
4459 */
4460 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED)
4461 return VINF_SUCCESS;
4462
4463 if (cTries == 0)
4464 cTries = 12;
4465 else if (cTries > 256)
4466 cTries = 256;
4467
4468 if (cMsWaitRetry == 0)
4469 cMsWaitRetry = 2;
4470 else if (cMsWaitRetry > 1000)
4471 cMsWaitRetry = 1000;
4472
4473#ifdef SUPDRV_USE_TSC_DELTA_THREAD
4474 /*
4475 * Has the TSC already been measured and we're not forced to redo it?
4476 */
4477 if ( pGip->aCPUs[iGipCpu].i64TSCDelta != INT64_MAX
4478 && !(fFlags & SUP_TSCDELTA_MEASURE_F_FORCE))
4479 return VINF_SUCCESS;
4480
4481 /*
4482 * Asynchronous request? Forward it to the thread, no waiting.
4483 */
4484 if (fFlags & SUP_TSCDELTA_MEASURE_F_ASYNC)
4485 {
4486 /** @todo Async. doesn't implement options like retries, waiting. We'll need
4487 * to pass those options to the thread somehow and implement it in the
4488 * thread. Check if anyone uses/needs fAsync before implementing this. */
4489 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4490 RTCpuSetAddByIndex(&pDevExt->TscDeltaCpuSet, iCpuSet);
4491 if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Listening
4492 || pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring)
4493 {
4494 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_WaitAndMeasure;
4495 rc = VINF_SUCCESS;
4496 }
4497 else if (pDevExt->enmTscDeltaThreadState != kTscDeltaThreadState_WaitAndMeasure)
4498 rc = VERR_THREAD_IS_DEAD;
4499 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4500 RTThreadUserSignal(pDevExt->hTscDeltaThread);
4501 return VINF_SUCCESS;
4502 }
4503
4504 /*
4505 * If a TSC-delta measurement request is already being serviced by the thread,
4506 * wait 'cTries' times if a retry-timeout is provided, otherwise bail as busy.
4507 */
4508 msTsStartWait = RTTimeSystemMilliTS();
4509 for (iWaitLoop = 0;; iWaitLoop++)
4510 {
4511 uint64_t cMsElapsed;
4512 SUPDRVTSCDELTATHREADSTATE enmState;
4513 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);
4514 enmState = pDevExt->enmTscDeltaThreadState;
4515 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);
4516
4517 if (enmState == kTscDeltaThreadState_Measuring)
4518 { /* Must wait, the thread is busy. */ }
4519 else if (enmState == kTscDeltaThreadState_WaitAndMeasure)
4520 { /* Must wait, this state only says what will happen next. */ }
4521 else if (enmState == kTscDeltaThreadState_Terminating)
4522 { /* Must wait, this state only says what should happen next. */ }
4523 else
4524 break; /* All other states, the thread is either idly listening or dead. */
4525
4526 /* Wait or fail. */
4527 if (cMsWaitThread == 0)
4528 return VERR_SUPDRV_TSC_DELTA_MEASUREMENT_BUSY;
4529 cMsElapsed = RTTimeSystemMilliTS() - msTsStartWait;
4530 if (cMsElapsed >= cMsWaitThread)
4531 return VERR_SUPDRV_TSC_DELTA_MEASUREMENT_BUSY;
4532
4533 rc = RTThreadSleep(RT_MIN((RTMSINTERVAL)(cMsWaitThread - cMsElapsed), RT_MIN(iWaitLoop + 1, 10)));
4534 if (rc == VERR_INTERRUPTED)
4535 return rc;
4536 }
4537#endif /* SUPDRV_USE_TSC_DELTA_THREAD */
4538
4539 /*
4540 * Try measure the TSC delta the given number of times.
4541 */
4542 for (;;)
4543 {
4544 /* Unless we're forced to measure the delta, check whether it's done already. */
4545 if ( !(fFlags & SUP_TSCDELTA_MEASURE_F_FORCE)
4546 && pGip->aCPUs[iGipCpu].i64TSCDelta != INT64_MAX)
4547 {
4548 rc = VINF_SUCCESS;
4549 break;
4550 }
4551
4552 /* Measure it. */
4553 rc = supdrvMeasureTscDeltaOne(pDevExt, iGipCpu);
4554 if (rc != VERR_SUPDRV_TSC_DELTA_MEASUREMENT_FAILED)
4555 {
4556 Assert(pGip->aCPUs[iGipCpu].i64TSCDelta != INT64_MAX || RT_FAILURE_NP(rc));
4557 break;
4558 }
4559
4560 /* Retry? */
4561 if (cTries <= 1)
4562 break;
4563 cTries--;
4564
4565 /* Always delay between retries (be nice to the rest of the system
4566 and avoid the BSOD hounds). */
4567 rc = RTThreadSleep(cMsWaitRetry);
4568 if (rc == VERR_INTERRUPTED)
4569 break;
4570 }
4571
4572 return rc;
4573}
4574
4575
4576/**
4577 * Service a TSC-delta measurement request.
4578 *
4579 * @returns VBox status code.
4580 * @param pDevExt Pointer to the device instance data.
4581 * @param pSession The support driver session.
4582 * @param pReq Pointer to the TSC-delta measurement request.
4583 */
4584int VBOXCALL supdrvIOCtl_TscDeltaMeasure(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPTSCDELTAMEASURE pReq)
4585{
4586 uint32_t cTries;
4587 uint32_t iCpuSet;
4588 uint32_t fFlags;
4589 RTMSINTERVAL cMsWaitRetry;
4590
4591 /*
4592 * Validate and adjust/resolve the input so they can be passed onto SUPR0TscDeltaMeasureBySetIndex.
4593 */
4594 AssertPtr(pDevExt); AssertPtr(pSession); AssertPtr(pReq); /* paranoia^2 */
4595
4596 if (pReq->u.In.idCpu == NIL_RTCPUID)
4597 return VERR_INVALID_CPU_ID;
4598 iCpuSet = RTMpCpuIdToSetIndex(pReq->u.In.idCpu);
4599 if (iCpuSet >= RTCPUSET_MAX_CPUS)
4600 return VERR_INVALID_CPU_ID;
4601
4602 cTries = pReq->u.In.cRetries == 0 ? 0 : (uint32_t)pReq->u.In.cRetries + 1;
4603
4604 cMsWaitRetry = RT_MAX(pReq->u.In.cMsWaitRetry, 5);
4605
4606 fFlags = 0;
4607 if (pReq->u.In.fAsync)
4608 fFlags |= SUP_TSCDELTA_MEASURE_F_ASYNC;
4609 if (pReq->u.In.fForce)
4610 fFlags |= SUP_TSCDELTA_MEASURE_F_FORCE;
4611
4612 return SUPR0TscDeltaMeasureBySetIndex(pSession, iCpuSet, fFlags, cMsWaitRetry,
4613 cTries == 0 ? 5 * RT_MS_1SEC : cMsWaitRetry * cTries /*cMsWaitThread*/,
4614 cTries);
4615}
4616
4617
4618/**
4619 * Reads TSC with delta applied.
4620 *
4621 * Will try to resolve delta value INT64_MAX before applying it. This is the
4622 * main purpose of this function, to handle the case where the delta needs to be
4623 * determined.
4624 *
4625 * @returns VBox status code.
4626 * @param pDevExt Pointer to the device instance data.
4627 * @param pSession The support driver session.
4628 * @param pReq Pointer to the TSC-read request.
4629 */
4630int VBOXCALL supdrvIOCtl_TscRead(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPTSCREAD pReq)
4631{
4632 PSUPGLOBALINFOPAGE pGip;
4633 int rc;
4634
4635 /*
4636 * Validate. We require the client to have mapped GIP (no asserting on
4637 * ring-3 preconditions).
4638 */
4639 AssertPtr(pDevExt); AssertPtr(pReq); AssertPtr(pSession); /* paranoia^2 */
4640 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
4641 return VERR_WRONG_ORDER;
4642 pGip = pDevExt->pGip;
4643 AssertReturn(pGip, VERR_INTERNAL_ERROR_2);
4644
4645 /*
4646 * We're usually here because we need to apply delta, but we shouldn't be
4647 * upset if the GIP is some different mode.
4648 */
4649 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
4650 {
4651 uint32_t cTries = 0;
4652 for (;;)
4653 {
4654 /*
4655 * Start by gathering the data, using CLI for disabling preemption
4656 * while we do that.
4657 */
4658 RTCCUINTREG fEFlags = ASMIntDisableFlags();
4659 int iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
4660 int iGipCpu;
4661 if (RT_LIKELY( (unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
4662 && (iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]) < pGip->cCpus ))
4663 {
4664 int64_t i64Delta = pGip->aCPUs[iGipCpu].i64TSCDelta;
4665 pReq->u.Out.idApic = pGip->aCPUs[iGipCpu].idApic;
4666 pReq->u.Out.u64AdjustedTsc = ASMReadTSC();
4667 ASMSetFlags(fEFlags);
4668
4669 /*
4670 * If we're lucky we've got a delta, but no predicitions here
4671 * as this I/O control is normally only used when the TSC delta
4672 * is set to INT64_MAX.
4673 */
4674 if (i64Delta != INT64_MAX)
4675 {
4676 pReq->u.Out.u64AdjustedTsc -= i64Delta;
4677 rc = VINF_SUCCESS;
4678 break;
4679 }
4680
4681 /* Give up after a few times. */
4682 if (cTries >= 4)
4683 {
4684 rc = VWRN_SUPDRV_TSC_DELTA_MEASUREMENT_FAILED;
4685 break;
4686 }
4687
4688 /* Need to measure the delta an try again. */
4689 rc = supdrvMeasureTscDeltaOne(pDevExt, iGipCpu);
4690 Assert(pGip->aCPUs[iGipCpu].i64TSCDelta != INT64_MAX || RT_FAILURE_NP(rc));
4691 /** @todo should probably delay on failure... dpc watchdogs */
4692 }
4693 else
4694 {
4695 /* This really shouldn't happen. */
4696 AssertMsgFailed(("idCpu=%#x iCpuSet=%#x (%d)\n", RTMpCpuId(), iCpuSet, iCpuSet));
4697 pReq->u.Out.idApic = ASMGetApicId();
4698 pReq->u.Out.u64AdjustedTsc = ASMReadTSC();
4699 ASMSetFlags(fEFlags);
4700 rc = VERR_INTERNAL_ERROR_5; /** @todo change to warning. */
4701 break;
4702 }
4703 }
4704 }
4705 else
4706 {
4707 /*
4708 * No delta to apply. Easy. Deal with preemption the lazy way.
4709 */
4710 RTCCUINTREG fEFlags = ASMIntDisableFlags();
4711 int iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
4712 int iGipCpu;
4713 if (RT_LIKELY( (unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
4714 && (iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]) < pGip->cCpus ))
4715 pReq->u.Out.idApic = pGip->aCPUs[iGipCpu].idApic;
4716 else
4717 pReq->u.Out.idApic = ASMGetApicId();
4718 pReq->u.Out.u64AdjustedTsc = ASMReadTSC();
4719 ASMSetFlags(fEFlags);
4720 rc = VINF_SUCCESS;
4721 }
4722
4723 return rc;
4724}
4725
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette