VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 69222

Last change on this file since 69222 was 69111, checked in by vboxsync, 7 years ago

(C) year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 160.0 KB
Line 
1/* $Id: lockvalidator.cpp 69111 2017-10-17 14:26:02Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param SRC_POS The source position where call is being made from.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.szName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->szName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/**
854 * Hashes the specified source position.
855 *
856 * @returns Hash.
857 * @param pSrcPos The source position record.
858 */
859static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
860{
861 uint32_t uHash;
862 if ( ( pSrcPos->pszFile
863 || pSrcPos->pszFunction)
864 && pSrcPos->uLine != 0)
865 {
866 uHash = 0;
867 if (pSrcPos->pszFile)
868 uHash = sdbmInc(pSrcPos->pszFile, uHash);
869 if (pSrcPos->pszFunction)
870 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
871 uHash += pSrcPos->uLine;
872 }
873 else
874 {
875 Assert(pSrcPos->uId);
876 uHash = (uint32_t)pSrcPos->uId;
877 }
878
879 return uHash;
880}
881
882
883/**
884 * Compares two source positions.
885 *
886 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
887 * otherwise.
888 * @param pSrcPos1 The first source position.
889 * @param pSrcPos2 The second source position.
890 */
891static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
892{
893 if (pSrcPos1->uLine != pSrcPos2->uLine)
894 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
895
896 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
897 if (iDiff != 0)
898 return iDiff;
899
900 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
901 if (iDiff != 0)
902 return iDiff;
903
904 if (pSrcPos1->uId != pSrcPos2->uId)
905 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
906 return 0;
907}
908
909
910
911/**
912 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
913 */
914DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
915{
916 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
917 if (hXRoads != NIL_RTSEMXROADS)
918 RTSemXRoadsNSEnter(hXRoads);
919}
920
921
922/**
923 * Call after rtLockValidatorSerializeDestructEnter.
924 */
925DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
926{
927 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
928 if (hXRoads != NIL_RTSEMXROADS)
929 RTSemXRoadsNSLeave(hXRoads);
930}
931
932
933/**
934 * Serializes deadlock detection against destruction of the objects being
935 * inspected.
936 */
937DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsEWEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDetectionEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsEWLeave(hXRoads);
953}
954
955
956/**
957 * Initializes the per thread lock validator data.
958 *
959 * @param pPerThread The data.
960 */
961DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
962{
963 pPerThread->bmFreeShrdOwners = UINT32_MAX;
964
965 /* ASSUMES the rest has already been zeroed. */
966 Assert(pPerThread->pRec == NULL);
967 Assert(pPerThread->cWriteLocks == 0);
968 Assert(pPerThread->cReadLocks == 0);
969 Assert(pPerThread->fInValidator == false);
970 Assert(pPerThread->pStackTop == NULL);
971}
972
973
974/**
975 * Delete the per thread lock validator data.
976 *
977 * @param pPerThread The data.
978 */
979DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
980{
981 /*
982 * Check that the thread doesn't own any locks at this time.
983 */
984 if (pPerThread->pStackTop)
985 {
986 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
987 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
988 pPerThread->pStackTop, true);
989 rtLockValComplainPanic();
990 }
991
992 /*
993 * Free the recursion records.
994 */
995 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
996 pPerThread->pFreeNestRecs = NULL;
997 while (pCur)
998 {
999 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1000 RTMemFree(pCur);
1001 pCur = pNext;
1002 }
1003}
1004
1005RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1006 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1007 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1008 const char *pszNameFmt, ...)
1009{
1010 va_list va;
1011 va_start(va, pszNameFmt);
1012 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1013 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1014 va_end(va);
1015 return rc;
1016}
1017
1018
1019RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1020 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1021 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1022 const char *pszNameFmt, va_list va)
1023{
1024 Assert(cMsMinDeadlock >= 1);
1025 Assert(cMsMinOrder >= 1);
1026 AssertPtr(pSrcPos);
1027
1028 /*
1029 * Format the name and calc its length.
1030 */
1031 size_t cbName;
1032 char szName[32];
1033 if (pszNameFmt && *pszNameFmt)
1034 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1035 else
1036 {
1037 static uint32_t volatile s_cAnonymous = 0;
1038 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1039 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1040 }
1041
1042 /*
1043 * Figure out the file and function name lengths and allocate memory for
1044 * it all.
1045 */
1046 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1047 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1048 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1049 if (!pThis)
1050 return VERR_NO_MEMORY;
1051
1052 /*
1053 * Initialize the class data.
1054 */
1055 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1056 pThis->Core.uchHeight = 0;
1057 pThis->Core.pLeft = NULL;
1058 pThis->Core.pRight = NULL;
1059 pThis->Core.pList = NULL;
1060 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1061 pThis->cRefs = 1;
1062 pThis->fAutodidact = fAutodidact;
1063 pThis->fRecursionOk = fRecursionOk;
1064 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1065 pThis->fInTree = false;
1066 pThis->fDonateRefToNextRetainer = false;
1067 pThis->afReserved[0] = false;
1068 pThis->afReserved[1] = false;
1069 pThis->afReserved[2] = false;
1070 pThis->cMsMinDeadlock = cMsMinDeadlock;
1071 pThis->cMsMinOrder = cMsMinOrder;
1072 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1073 pThis->au32Reserved[i] = 0;
1074 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1075 {
1076 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1077 pThis->PriorLocks.aRefs[i].cLookups = 0;
1078 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1079 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1082 }
1083 pThis->PriorLocks.pNext = NULL;
1084 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1085 pThis->apPriorLocksHash[i] = NULL;
1086 char *pszDst = (char *)(pThis + 1);
1087 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1088 pszDst += cbName;
1089 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1090 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1091 pszDst += cbFile;
1092 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1093 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1094#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1095 pThis->cHashHits = 0;
1096 pThis->cHashMisses = 0;
1097#endif
1098
1099 *phClass = pThis;
1100 return VINF_SUCCESS;
1101}
1102
1103
1104RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1105{
1106 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1107 va_list va;
1108 va_start(va, pszNameFmt);
1109 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1110 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1111 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1112 pszNameFmt, va);
1113 va_end(va);
1114 return rc;
1115}
1116
1117
1118/**
1119 * Creates a new lock validator class with a reference that is consumed by the
1120 * first call to RTLockValidatorClassRetain.
1121 *
1122 * This is tailored for use in the parameter list of a semaphore constructor.
1123 *
1124 * @returns Class handle with a reference that is automatically consumed by the
1125 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1126 *
1127 * @param SRC_POS The source position where call is being made from.
1128 * Use RT_SRC_POS when possible. Optional.
1129 * @param pszNameFmt Class name format string, optional (NULL). Max
1130 * length is 32 bytes.
1131 * @param ... Format string arguments.
1132 */
1133RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1134{
1135 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1136 RTLOCKVALCLASSINT *pClass;
1137 va_list va;
1138 va_start(va, pszNameFmt);
1139 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1140 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1141 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1142 pszNameFmt, va);
1143 va_end(va);
1144 if (RT_FAILURE(rc))
1145 return NIL_RTLOCKVALCLASS;
1146 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1147 return pClass;
1148}
1149
1150
1151/**
1152 * Internal class retainer.
1153 * @returns The new reference count.
1154 * @param pClass The class.
1155 */
1156DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1157{
1158 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1159 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1160 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1161 else if ( cRefs == 2
1162 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1163 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1164 return cRefs;
1165}
1166
1167
1168/**
1169 * Validates and retains a lock validator class.
1170 *
1171 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1172 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1173 */
1174DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1175{
1176 if (hClass == NIL_RTLOCKVALCLASS)
1177 return hClass;
1178 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1179 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1180 rtLockValidatorClassRetain(hClass);
1181 return hClass;
1182}
1183
1184
1185/**
1186 * Internal class releaser.
1187 * @returns The new reference count.
1188 * @param pClass The class.
1189 */
1190DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1191{
1192 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1193 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1194 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1195 else if (!cRefs)
1196 rtLockValidatorClassDestroy(pClass);
1197 return cRefs;
1198}
1199
1200
1201/**
1202 * Destroys a class once there are not more references to it.
1203 *
1204 * @param pClass The class.
1205 */
1206static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1207{
1208 AssertReturnVoid(!pClass->fInTree);
1209 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1210
1211 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1212 while (pChunk)
1213 {
1214 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1215 {
1216 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1217 if (pClass2 != NIL_RTLOCKVALCLASS)
1218 {
1219 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1220 rtLockValidatorClassRelease(pClass2);
1221 }
1222 }
1223
1224 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1225 pChunk->pNext = NULL;
1226 if (pChunk != &pClass->PriorLocks)
1227 RTMemFree(pChunk);
1228 pChunk = pNext;
1229 }
1230
1231 RTMemFree(pClass);
1232}
1233
1234
1235RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1236{
1237 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1238 rtLockValidatorLazyInit();
1239 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1240
1241 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1242 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1243 while (pClass)
1244 {
1245 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1246 break;
1247 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1248 }
1249
1250 if (RT_SUCCESS(rcLock))
1251 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1252 return pClass;
1253}
1254
1255
1256RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1257{
1258 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1259 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1260 if (hClass == NIL_RTLOCKVALCLASS)
1261 {
1262 /*
1263 * Create a new class and insert it into the tree.
1264 */
1265 va_list va;
1266 va_start(va, pszNameFmt);
1267 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1268 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1269 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1270 pszNameFmt, va);
1271 va_end(va);
1272 if (RT_SUCCESS(rc))
1273 {
1274 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1275 rtLockValidatorLazyInit();
1276 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1277
1278 Assert(!hClass->fInTree);
1279 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1280 Assert(hClass->fInTree);
1281
1282 if (RT_SUCCESS(rcLock))
1283 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1284 return hClass;
1285 }
1286 }
1287 return hClass;
1288}
1289
1290
1291RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1292{
1293 RTLOCKVALCLASSINT *pClass = hClass;
1294 AssertPtrReturn(pClass, UINT32_MAX);
1295 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1296 return rtLockValidatorClassRetain(pClass);
1297}
1298
1299
1300RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1301{
1302 RTLOCKVALCLASSINT *pClass = hClass;
1303 if (pClass == NIL_RTLOCKVALCLASS)
1304 return 0;
1305 AssertPtrReturn(pClass, UINT32_MAX);
1306 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1307 return rtLockValidatorClassRelease(pClass);
1308}
1309
1310
1311/**
1312 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1313 * all the chunks for @a pPriorClass.
1314 *
1315 * @returns true / false.
1316 * @param pClass The class to search.
1317 * @param pPriorClass The class to search for.
1318 */
1319static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1320{
1321 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1322 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1323 {
1324 if (pChunk->aRefs[i].hClass == pPriorClass)
1325 {
1326 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1327 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1328 {
1329 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1330 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1331 }
1332
1333 /* update the hash table entry. */
1334 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1335 if ( !(*ppHashEntry)
1336 || (*ppHashEntry)->cLookups + 128 < cLookups)
1337 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1338
1339#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1340 ASMAtomicIncU32(&pClass->cHashMisses);
1341#endif
1342 return true;
1343 }
1344 }
1345
1346 return false;
1347}
1348
1349
1350/**
1351 * Checks if @a pPriorClass is a known prior class.
1352 *
1353 * @returns true / false.
1354 * @param pClass The class to search.
1355 * @param pPriorClass The class to search for.
1356 */
1357DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1358{
1359 /*
1360 * Hash lookup here.
1361 */
1362 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1363 if ( pRef
1364 && pRef->hClass == pPriorClass)
1365 {
1366 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1367 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1368 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1369#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1370 ASMAtomicIncU32(&pClass->cHashHits);
1371#endif
1372 return true;
1373 }
1374
1375 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1376}
1377
1378
1379/**
1380 * Adds a class to the prior list.
1381 *
1382 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1383 * @param pClass The class to work on.
1384 * @param pPriorClass The class to add.
1385 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1386 * somebody is teaching us via the API (false).
1387 * @param pSrcPos Where this rule was added (optional).
1388 */
1389static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1390 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1391{
1392 NOREF(pSrcPos);
1393 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1394 rtLockValidatorLazyInit();
1395 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1396
1397 /*
1398 * Check that there are no conflict (no assert since we might race each other).
1399 */
1400 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1401 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1402 {
1403 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1404 {
1405 /*
1406 * Scan the table for a free entry, allocating a new chunk if necessary.
1407 */
1408 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1409 {
1410 bool fDone = false;
1411 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1412 {
1413 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1414 if (fDone)
1415 {
1416 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1417 rtLockValidatorClassRetain(pPriorClass);
1418 rc = VINF_SUCCESS;
1419 break;
1420 }
1421 }
1422 if (fDone)
1423 break;
1424
1425 /* If no more chunks, allocate a new one and insert the class before linking it. */
1426 if (!pChunk->pNext)
1427 {
1428 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1429 if (!pNew)
1430 {
1431 rc = VERR_NO_MEMORY;
1432 break;
1433 }
1434 pNew->pNext = NULL;
1435 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1436 {
1437 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1438 pNew->aRefs[i].cLookups = 0;
1439 pNew->aRefs[i].fAutodidacticism = false;
1440 pNew->aRefs[i].afReserved[0] = false;
1441 pNew->aRefs[i].afReserved[1] = false;
1442 pNew->aRefs[i].afReserved[2] = false;
1443 }
1444
1445 pNew->aRefs[0].hClass = pPriorClass;
1446 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1447
1448 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1449 rtLockValidatorClassRetain(pPriorClass);
1450 rc = VINF_SUCCESS;
1451 break;
1452 }
1453 } /* chunk loop */
1454 }
1455 else
1456 rc = VINF_SUCCESS;
1457 }
1458 else
1459 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1460
1461 if (RT_SUCCESS(rcLock))
1462 RTCritSectLeave(&g_LockValClassTeachCS);
1463 return rc;
1464}
1465
1466
1467RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1468{
1469 RTLOCKVALCLASSINT *pClass = hClass;
1470 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1471 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1472
1473 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1474 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1475 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1476
1477 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1478}
1479
1480
1481RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1482{
1483 RTLOCKVALCLASSINT *pClass = hClass;
1484 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1485 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1486
1487 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1488 return VINF_SUCCESS;
1489}
1490
1491
1492/**
1493 * Unlinks all siblings.
1494 *
1495 * This is used during record deletion and assumes no races.
1496 *
1497 * @param pCore One of the siblings.
1498 */
1499static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1500{
1501 /* ASSUMES sibling destruction doesn't involve any races and that all
1502 related records are to be disposed off now. */
1503 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1504 while (pSibling)
1505 {
1506 PRTLOCKVALRECUNION volatile *ppCoreNext;
1507 switch (pSibling->Core.u32Magic)
1508 {
1509 case RTLOCKVALRECEXCL_MAGIC:
1510 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1511 ppCoreNext = &pSibling->Excl.pSibling;
1512 break;
1513
1514 case RTLOCKVALRECSHRD_MAGIC:
1515 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1516 ppCoreNext = &pSibling->Shared.pSibling;
1517 break;
1518
1519 default:
1520 AssertFailed();
1521 ppCoreNext = NULL;
1522 break;
1523 }
1524 if (RT_UNLIKELY(ppCoreNext))
1525 break;
1526 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1527 }
1528}
1529
1530
1531RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1532{
1533 /*
1534 * Validate input.
1535 */
1536 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1537 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1538
1539 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1540 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1541 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1542 , VERR_SEM_LV_INVALID_PARAMETER);
1543
1544 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1545 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1546 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1547 , VERR_SEM_LV_INVALID_PARAMETER);
1548
1549 /*
1550 * Link them (circular list).
1551 */
1552 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1553 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1554 {
1555 p1->Excl.pSibling = p2;
1556 p2->Shared.pSibling = p1;
1557 }
1558 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1559 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1560 {
1561 p1->Shared.pSibling = p2;
1562 p2->Excl.pSibling = p1;
1563 }
1564 else
1565 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1566
1567 return VINF_SUCCESS;
1568}
1569
1570
1571#if 0 /* unused */
1572/**
1573 * Gets the lock name for the given record.
1574 *
1575 * @returns Read-only lock name.
1576 * @param pRec The lock record.
1577 */
1578DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1579{
1580 switch (pRec->Core.u32Magic)
1581 {
1582 case RTLOCKVALRECEXCL_MAGIC:
1583 return pRec->Excl.szName;
1584 case RTLOCKVALRECSHRD_MAGIC:
1585 return pRec->Shared.szName;
1586 case RTLOCKVALRECSHRDOWN_MAGIC:
1587 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1588 case RTLOCKVALRECNEST_MAGIC:
1589 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1590 if (VALID_PTR(pRec))
1591 {
1592 switch (pRec->Core.u32Magic)
1593 {
1594 case RTLOCKVALRECEXCL_MAGIC:
1595 return pRec->Excl.szName;
1596 case RTLOCKVALRECSHRD_MAGIC:
1597 return pRec->Shared.szName;
1598 case RTLOCKVALRECSHRDOWN_MAGIC:
1599 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1600 default:
1601 return "unknown-nested";
1602 }
1603 }
1604 return "orphaned-nested";
1605 default:
1606 return "unknown";
1607 }
1608}
1609#endif /* unused */
1610
1611
1612#if 0 /* unused */
1613/**
1614 * Gets the class for this locking record.
1615 *
1616 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1617 * @param pRec The lock validator record.
1618 */
1619DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1620{
1621 switch (pRec->Core.u32Magic)
1622 {
1623 case RTLOCKVALRECEXCL_MAGIC:
1624 return pRec->Excl.hClass;
1625
1626 case RTLOCKVALRECSHRD_MAGIC:
1627 return pRec->Shared.hClass;
1628
1629 case RTLOCKVALRECSHRDOWN_MAGIC:
1630 {
1631 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1632 if (RT_LIKELY( VALID_PTR(pSharedRec)
1633 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1634 return pSharedRec->hClass;
1635 return NIL_RTLOCKVALCLASS;
1636 }
1637
1638 case RTLOCKVALRECNEST_MAGIC:
1639 {
1640 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1641 if (VALID_PTR(pRealRec))
1642 {
1643 switch (pRealRec->Core.u32Magic)
1644 {
1645 case RTLOCKVALRECEXCL_MAGIC:
1646 return pRealRec->Excl.hClass;
1647
1648 case RTLOCKVALRECSHRDOWN_MAGIC:
1649 {
1650 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1651 if (RT_LIKELY( VALID_PTR(pSharedRec)
1652 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1653 return pSharedRec->hClass;
1654 break;
1655 }
1656
1657 default:
1658 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1659 break;
1660 }
1661 }
1662 return NIL_RTLOCKVALCLASS;
1663 }
1664
1665 default:
1666 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1667 return NIL_RTLOCKVALCLASS;
1668 }
1669}
1670#endif /* unused */
1671
1672/**
1673 * Gets the class for this locking record and the pointer to the one below it in
1674 * the stack.
1675 *
1676 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1677 * @param pRec The lock validator record.
1678 * @param puSubClass Where to return the sub-class.
1679 * @param ppDown Where to return the pointer to the record below.
1680 */
1681DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1682rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1683{
1684 switch (pRec->Core.u32Magic)
1685 {
1686 case RTLOCKVALRECEXCL_MAGIC:
1687 *ppDown = pRec->Excl.pDown;
1688 *puSubClass = pRec->Excl.uSubClass;
1689 return pRec->Excl.hClass;
1690
1691 case RTLOCKVALRECSHRD_MAGIC:
1692 *ppDown = NULL;
1693 *puSubClass = pRec->Shared.uSubClass;
1694 return pRec->Shared.hClass;
1695
1696 case RTLOCKVALRECSHRDOWN_MAGIC:
1697 {
1698 *ppDown = pRec->ShrdOwner.pDown;
1699
1700 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1701 if (RT_LIKELY( VALID_PTR(pSharedRec)
1702 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1703 {
1704 *puSubClass = pSharedRec->uSubClass;
1705 return pSharedRec->hClass;
1706 }
1707 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1708 return NIL_RTLOCKVALCLASS;
1709 }
1710
1711 case RTLOCKVALRECNEST_MAGIC:
1712 {
1713 *ppDown = pRec->Nest.pDown;
1714
1715 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1716 if (VALID_PTR(pRealRec))
1717 {
1718 switch (pRealRec->Core.u32Magic)
1719 {
1720 case RTLOCKVALRECEXCL_MAGIC:
1721 *puSubClass = pRealRec->Excl.uSubClass;
1722 return pRealRec->Excl.hClass;
1723
1724 case RTLOCKVALRECSHRDOWN_MAGIC:
1725 {
1726 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1727 if (RT_LIKELY( VALID_PTR(pSharedRec)
1728 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1729 {
1730 *puSubClass = pSharedRec->uSubClass;
1731 return pSharedRec->hClass;
1732 }
1733 break;
1734 }
1735
1736 default:
1737 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1738 break;
1739 }
1740 }
1741 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1742 return NIL_RTLOCKVALCLASS;
1743 }
1744
1745 default:
1746 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1747 *ppDown = NULL;
1748 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1749 return NIL_RTLOCKVALCLASS;
1750 }
1751}
1752
1753
1754/**
1755 * Gets the sub-class for a lock record.
1756 *
1757 * @returns the sub-class.
1758 * @param pRec The lock validator record.
1759 */
1760DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1761{
1762 switch (pRec->Core.u32Magic)
1763 {
1764 case RTLOCKVALRECEXCL_MAGIC:
1765 return pRec->Excl.uSubClass;
1766
1767 case RTLOCKVALRECSHRD_MAGIC:
1768 return pRec->Shared.uSubClass;
1769
1770 case RTLOCKVALRECSHRDOWN_MAGIC:
1771 {
1772 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1773 if (RT_LIKELY( VALID_PTR(pSharedRec)
1774 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1775 return pSharedRec->uSubClass;
1776 return RTLOCKVAL_SUB_CLASS_NONE;
1777 }
1778
1779 case RTLOCKVALRECNEST_MAGIC:
1780 {
1781 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1782 if (VALID_PTR(pRealRec))
1783 {
1784 switch (pRealRec->Core.u32Magic)
1785 {
1786 case RTLOCKVALRECEXCL_MAGIC:
1787 return pRec->Excl.uSubClass;
1788
1789 case RTLOCKVALRECSHRDOWN_MAGIC:
1790 {
1791 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1792 if (RT_LIKELY( VALID_PTR(pSharedRec)
1793 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1794 return pSharedRec->uSubClass;
1795 break;
1796 }
1797
1798 default:
1799 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1800 break;
1801 }
1802 }
1803 return RTLOCKVAL_SUB_CLASS_NONE;
1804 }
1805
1806 default:
1807 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1808 return RTLOCKVAL_SUB_CLASS_NONE;
1809 }
1810}
1811
1812
1813
1814
1815/**
1816 * Calculates the depth of a lock stack.
1817 *
1818 * @returns Number of stack frames.
1819 * @param pThread The thread.
1820 */
1821static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1822{
1823 uint32_t cEntries = 0;
1824 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1825 while (VALID_PTR(pCur))
1826 {
1827 switch (pCur->Core.u32Magic)
1828 {
1829 case RTLOCKVALRECEXCL_MAGIC:
1830 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1831 break;
1832
1833 case RTLOCKVALRECSHRDOWN_MAGIC:
1834 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1835 break;
1836
1837 case RTLOCKVALRECNEST_MAGIC:
1838 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1839 break;
1840
1841 default:
1842 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1843 }
1844 cEntries++;
1845 }
1846 return cEntries;
1847}
1848
1849
1850#ifdef RT_STRICT
1851/**
1852 * Checks if the stack contains @a pRec.
1853 *
1854 * @returns true / false.
1855 * @param pThreadSelf The current thread.
1856 * @param pRec The lock record.
1857 */
1858static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1859{
1860 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1861 while (pCur)
1862 {
1863 AssertPtrReturn(pCur, false);
1864 if (pCur == pRec)
1865 return true;
1866 switch (pCur->Core.u32Magic)
1867 {
1868 case RTLOCKVALRECEXCL_MAGIC:
1869 Assert(pCur->Excl.cRecursion >= 1);
1870 pCur = pCur->Excl.pDown;
1871 break;
1872
1873 case RTLOCKVALRECSHRDOWN_MAGIC:
1874 Assert(pCur->ShrdOwner.cRecursion >= 1);
1875 pCur = pCur->ShrdOwner.pDown;
1876 break;
1877
1878 case RTLOCKVALRECNEST_MAGIC:
1879 Assert(pCur->Nest.cRecursion > 1);
1880 pCur = pCur->Nest.pDown;
1881 break;
1882
1883 default:
1884 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1885 }
1886 }
1887 return false;
1888}
1889#endif /* RT_STRICT */
1890
1891
1892/**
1893 * Pushes a lock record onto the stack.
1894 *
1895 * @param pThreadSelf The current thread.
1896 * @param pRec The lock record.
1897 */
1898static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1899{
1900 Assert(pThreadSelf == RTThreadSelf());
1901 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1902
1903 switch (pRec->Core.u32Magic)
1904 {
1905 case RTLOCKVALRECEXCL_MAGIC:
1906 Assert(pRec->Excl.cRecursion == 1);
1907 Assert(pRec->Excl.pDown == NULL);
1908 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1909 break;
1910
1911 case RTLOCKVALRECSHRDOWN_MAGIC:
1912 Assert(pRec->ShrdOwner.cRecursion == 1);
1913 Assert(pRec->ShrdOwner.pDown == NULL);
1914 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1915 break;
1916
1917 default:
1918 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1919 }
1920 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1921}
1922
1923
1924/**
1925 * Pops a lock record off the stack.
1926 *
1927 * @param pThreadSelf The current thread.
1928 * @param pRec The lock.
1929 */
1930static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1931{
1932 Assert(pThreadSelf == RTThreadSelf());
1933
1934 PRTLOCKVALRECUNION pDown;
1935 switch (pRec->Core.u32Magic)
1936 {
1937 case RTLOCKVALRECEXCL_MAGIC:
1938 Assert(pRec->Excl.cRecursion == 0);
1939 pDown = pRec->Excl.pDown;
1940 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1941 break;
1942
1943 case RTLOCKVALRECSHRDOWN_MAGIC:
1944 Assert(pRec->ShrdOwner.cRecursion == 0);
1945 pDown = pRec->ShrdOwner.pDown;
1946 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1947 break;
1948
1949 default:
1950 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1951 }
1952 if (pThreadSelf->LockValidator.pStackTop == pRec)
1953 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1954 else
1955 {
1956 /* Find the pointer to our record and unlink ourselves. */
1957 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1958 while (pCur)
1959 {
1960 PRTLOCKVALRECUNION volatile *ppDown;
1961 switch (pCur->Core.u32Magic)
1962 {
1963 case RTLOCKVALRECEXCL_MAGIC:
1964 Assert(pCur->Excl.cRecursion >= 1);
1965 ppDown = &pCur->Excl.pDown;
1966 break;
1967
1968 case RTLOCKVALRECSHRDOWN_MAGIC:
1969 Assert(pCur->ShrdOwner.cRecursion >= 1);
1970 ppDown = &pCur->ShrdOwner.pDown;
1971 break;
1972
1973 case RTLOCKVALRECNEST_MAGIC:
1974 Assert(pCur->Nest.cRecursion >= 1);
1975 ppDown = &pCur->Nest.pDown;
1976 break;
1977
1978 default:
1979 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1980 }
1981 pCur = *ppDown;
1982 if (pCur == pRec)
1983 {
1984 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1985 return;
1986 }
1987 }
1988 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1989 }
1990}
1991
1992
1993/**
1994 * Creates and pushes lock recursion record onto the stack.
1995 *
1996 * @param pThreadSelf The current thread.
1997 * @param pRec The lock record.
1998 * @param pSrcPos Where the recursion occurred.
1999 */
2000static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2001{
2002 Assert(pThreadSelf == RTThreadSelf());
2003 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2004
2005#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2006 /*
2007 * Allocate a new recursion record
2008 */
2009 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2010 if (pRecursionRec)
2011 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2012 else
2013 {
2014 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2015 if (!pRecursionRec)
2016 return;
2017 }
2018
2019 /*
2020 * Initialize it.
2021 */
2022 switch (pRec->Core.u32Magic)
2023 {
2024 case RTLOCKVALRECEXCL_MAGIC:
2025 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2026 break;
2027
2028 case RTLOCKVALRECSHRDOWN_MAGIC:
2029 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2030 break;
2031
2032 default:
2033 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2034 rtLockValidatorSerializeDestructEnter();
2035 rtLockValidatorSerializeDestructLeave();
2036 RTMemFree(pRecursionRec);
2037 return;
2038 }
2039 Assert(pRecursionRec->cRecursion > 1);
2040 pRecursionRec->pRec = pRec;
2041 pRecursionRec->pDown = NULL;
2042 pRecursionRec->pNextFree = NULL;
2043 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2044 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2045
2046 /*
2047 * Link it.
2048 */
2049 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2050 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2051#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2052}
2053
2054
2055/**
2056 * Pops a lock recursion record off the stack.
2057 *
2058 * @param pThreadSelf The current thread.
2059 * @param pRec The lock record.
2060 */
2061static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2062{
2063 Assert(pThreadSelf == RTThreadSelf());
2064 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2065
2066 uint32_t cRecursion;
2067 switch (pRec->Core.u32Magic)
2068 {
2069 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2070 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2071 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2072 }
2073 Assert(cRecursion >= 1);
2074
2075#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2076 /*
2077 * Pop the recursion record.
2078 */
2079 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2080 if ( pNest != NULL
2081 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2082 && pNest->Nest.pRec == pRec
2083 )
2084 {
2085 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2086 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2087 }
2088 else
2089 {
2090 /* Find the record above ours. */
2091 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2092 for (;;)
2093 {
2094 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2095 switch (pNest->Core.u32Magic)
2096 {
2097 case RTLOCKVALRECEXCL_MAGIC:
2098 ppDown = &pNest->Excl.pDown;
2099 pNest = *ppDown;
2100 continue;
2101 case RTLOCKVALRECSHRDOWN_MAGIC:
2102 ppDown = &pNest->ShrdOwner.pDown;
2103 pNest = *ppDown;
2104 continue;
2105 case RTLOCKVALRECNEST_MAGIC:
2106 if (pNest->Nest.pRec == pRec)
2107 break;
2108 ppDown = &pNest->Nest.pDown;
2109 pNest = *ppDown;
2110 continue;
2111 default:
2112 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2113 }
2114 break; /* ugly */
2115 }
2116 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2117 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2118 }
2119
2120 /*
2121 * Invalidate and free the record.
2122 */
2123 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2124 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2125 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2126 pNest->Nest.cRecursion = 0;
2127 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2128 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2129#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2130}
2131
2132
2133/**
2134 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2135 * returns VERR_SEM_LV_WRONG_ORDER.
2136 */
2137static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2138 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2139 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2140
2141
2142{
2143 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2144 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2145 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2146 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2147 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2148 rtLockValComplainPanic();
2149 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2150}
2151
2152
2153/**
2154 * Checks if the sub-class order is ok or not.
2155 *
2156 * Used to deal with two locks from the same class.
2157 *
2158 * @returns true if ok, false if not.
2159 * @param uSubClass1 The sub-class of the lock that is being
2160 * considered.
2161 * @param uSubClass2 The sub-class of the lock that is already being
2162 * held.
2163 */
2164DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2165{
2166 if (uSubClass1 > uSubClass2)
2167 {
2168 /* NONE kills ANY. */
2169 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2170 return false;
2171 return true;
2172 }
2173
2174 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2175 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2176 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2177 return true;
2178 return false;
2179}
2180
2181
2182/**
2183 * Checks if the class and sub-class lock order is ok.
2184 *
2185 * @returns true if ok, false if not.
2186 * @param pClass1 The class of the lock that is being considered.
2187 * @param uSubClass1 The sub-class that goes with @a pClass1.
2188 * @param pClass2 The class of the lock that is already being
2189 * held.
2190 * @param uSubClass2 The sub-class that goes with @a pClass2.
2191 */
2192DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2193 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2194{
2195 if (pClass1 == pClass2)
2196 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2197 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2198}
2199
2200
2201/**
2202 * Checks the locking order, part two.
2203 *
2204 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2205 * @param pClass The lock class.
2206 * @param uSubClass The lock sub-class.
2207 * @param pThreadSelf The current thread.
2208 * @param pRec The lock record.
2209 * @param pSrcPos The source position of the locking operation.
2210 * @param pFirstBadClass The first bad class.
2211 * @param pFirstBadRec The first bad lock record.
2212 * @param pFirstBadDown The next record on the lock stack.
2213 */
2214static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2215 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2216 PCRTLOCKVALSRCPOS const pSrcPos,
2217 RTLOCKVALCLASSINT * const pFirstBadClass,
2218 PRTLOCKVALRECUNION const pFirstBadRec,
2219 PRTLOCKVALRECUNION const pFirstBadDown)
2220{
2221 /*
2222 * Something went wrong, pCur is pointing to where.
2223 */
2224 if ( pClass == pFirstBadClass
2225 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2226 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2227 pRec, pFirstBadRec, pClass, pFirstBadClass);
2228 if (!pClass->fAutodidact)
2229 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2230 pRec, pFirstBadRec, pClass, pFirstBadClass);
2231
2232 /*
2233 * This class is an autodidact, so we have to check out the rest of the stack
2234 * for direct violations.
2235 */
2236 uint32_t cNewRules = 1;
2237 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2238 while (pCur)
2239 {
2240 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2241
2242 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2243 pCur = pCur->Nest.pDown;
2244 else
2245 {
2246 PRTLOCKVALRECUNION pDown;
2247 uint32_t uPriorSubClass;
2248 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2249 if (pPriorClass != NIL_RTLOCKVALCLASS)
2250 {
2251 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2252 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2253 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2254 {
2255 if ( pClass == pPriorClass
2256 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2257 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2258 pRec, pCur, pClass, pPriorClass);
2259 cNewRules++;
2260 }
2261 }
2262 pCur = pDown;
2263 }
2264 }
2265
2266 if (cNewRules == 1)
2267 {
2268 /*
2269 * Special case the simple operation, hoping that it will be a
2270 * frequent case.
2271 */
2272 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2273 if (rc == VERR_SEM_LV_WRONG_ORDER)
2274 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2275 pRec, pFirstBadRec, pClass, pFirstBadClass);
2276 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2277 }
2278 else
2279 {
2280 /*
2281 * We may be adding more than one rule, so we have to take the lock
2282 * before starting to add the rules. This means we have to check
2283 * the state after taking it since we might be racing someone adding
2284 * a conflicting rule.
2285 */
2286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2287 rtLockValidatorLazyInit();
2288 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2289
2290 /* Check */
2291 pCur = pFirstBadRec;
2292 while (pCur)
2293 {
2294 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2295 pCur = pCur->Nest.pDown;
2296 else
2297 {
2298 uint32_t uPriorSubClass;
2299 PRTLOCKVALRECUNION pDown;
2300 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2301 if (pPriorClass != NIL_RTLOCKVALCLASS)
2302 {
2303 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2304 {
2305 if ( pClass == pPriorClass
2306 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2307 {
2308 if (RT_SUCCESS(rcLock))
2309 RTCritSectLeave(&g_LockValClassTeachCS);
2310 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2311 pRec, pCur, pClass, pPriorClass);
2312 }
2313 }
2314 }
2315 pCur = pDown;
2316 }
2317 }
2318
2319 /* Iterate the stack yet again, adding new rules this time. */
2320 pCur = pFirstBadRec;
2321 while (pCur)
2322 {
2323 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2324 pCur = pCur->Nest.pDown;
2325 else
2326 {
2327 uint32_t uPriorSubClass;
2328 PRTLOCKVALRECUNION pDown;
2329 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2330 if (pPriorClass != NIL_RTLOCKVALCLASS)
2331 {
2332 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2333 {
2334 Assert( pClass != pPriorClass
2335 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2336 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2337 if (RT_FAILURE(rc))
2338 {
2339 Assert(rc == VERR_NO_MEMORY);
2340 break;
2341 }
2342 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2343 }
2344 }
2345 pCur = pDown;
2346 }
2347 }
2348
2349 if (RT_SUCCESS(rcLock))
2350 RTCritSectLeave(&g_LockValClassTeachCS);
2351 }
2352
2353 return VINF_SUCCESS;
2354}
2355
2356
2357
2358/**
2359 * Checks the locking order.
2360 *
2361 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2362 * @param pClass The lock class.
2363 * @param uSubClass The lock sub-class.
2364 * @param pThreadSelf The current thread.
2365 * @param pRec The lock record.
2366 * @param pSrcPos The source position of the locking operation.
2367 */
2368static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2369 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2370 PCRTLOCKVALSRCPOS pSrcPos)
2371{
2372 /*
2373 * Some internal paranoia first.
2374 */
2375 AssertPtr(pClass);
2376 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2377 AssertPtr(pThreadSelf);
2378 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2379 AssertPtr(pRec);
2380 AssertPtrNull(pSrcPos);
2381
2382 /*
2383 * Walk the stack, delegate problems to a worker routine.
2384 */
2385 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2386 if (!pCur)
2387 return VINF_SUCCESS;
2388
2389 for (;;)
2390 {
2391 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2392
2393 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2394 pCur = pCur->Nest.pDown;
2395 else
2396 {
2397 uint32_t uPriorSubClass;
2398 PRTLOCKVALRECUNION pDown;
2399 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2400 if (pPriorClass != NIL_RTLOCKVALCLASS)
2401 {
2402 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2403 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2404 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2405 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2406 pPriorClass, pCur, pDown);
2407 }
2408 pCur = pDown;
2409 }
2410 if (!pCur)
2411 return VINF_SUCCESS;
2412 }
2413}
2414
2415
2416/**
2417 * Check that the lock record is the topmost one on the stack, complain and fail
2418 * if it isn't.
2419 *
2420 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2421 * VERR_SEM_LV_INVALID_PARAMETER.
2422 * @param pThreadSelf The current thread.
2423 * @param pRec The record.
2424 */
2425static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2426{
2427 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2428 Assert(pThreadSelf == RTThreadSelf());
2429
2430 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2431 if (RT_LIKELY( pTop == pRec
2432 || ( pTop
2433 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2434 && pTop->Nest.pRec == pRec) ))
2435 return VINF_SUCCESS;
2436
2437#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2438 /* Look for a recursion record so the right frame is dumped and marked. */
2439 while (pTop)
2440 {
2441 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2442 {
2443 if (pTop->Nest.pRec == pRec)
2444 {
2445 pRec = pTop;
2446 break;
2447 }
2448 pTop = pTop->Nest.pDown;
2449 }
2450 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2451 pTop = pTop->Excl.pDown;
2452 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2453 pTop = pTop->ShrdOwner.pDown;
2454 else
2455 break;
2456 }
2457#endif
2458
2459 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2460 rtLockValComplainPanic();
2461 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2462}
2463
2464
2465/**
2466 * Checks if all owners are blocked - shared record operated in signaller mode.
2467 *
2468 * @returns true / false accordingly.
2469 * @param pRec The record.
2470 * @param pThreadSelf The current thread.
2471 */
2472DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2473{
2474 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2475 uint32_t cAllocated = pRec->cAllocated;
2476 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2477 if (cEntries == 0)
2478 return false;
2479
2480 for (uint32_t i = 0; i < cAllocated; i++)
2481 {
2482 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2483 if ( pEntry
2484 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2485 {
2486 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2487 if (!pCurThread)
2488 return false;
2489 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2490 return false;
2491 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2492 && pCurThread != pThreadSelf)
2493 return false;
2494 if (--cEntries == 0)
2495 break;
2496 }
2497 else
2498 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2499 }
2500
2501 return true;
2502}
2503
2504
2505/**
2506 * Verifies the deadlock stack before calling it a deadlock.
2507 *
2508 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2509 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2510 * @retval VERR_TRY_AGAIN if something changed.
2511 *
2512 * @param pStack The deadlock detection stack.
2513 * @param pThreadSelf The current thread.
2514 */
2515static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2516{
2517 uint32_t const c = pStack->c;
2518 for (uint32_t iPass = 0; iPass < 3; iPass++)
2519 {
2520 for (uint32_t i = 1; i < c; i++)
2521 {
2522 PRTTHREADINT pThread = pStack->a[i].pThread;
2523 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2524 return VERR_TRY_AGAIN;
2525 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2526 return VERR_TRY_AGAIN;
2527 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2528 return VERR_TRY_AGAIN;
2529 /* ASSUMES the signaller records won't have siblings! */
2530 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2531 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2532 && pRec->Shared.fSignaller
2533 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2534 return VERR_TRY_AGAIN;
2535 }
2536 RTThreadYield();
2537 }
2538
2539 if (c == 1)
2540 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2541 return VERR_SEM_LV_DEADLOCK;
2542}
2543
2544
2545/**
2546 * Checks for stack cycles caused by another deadlock before returning.
2547 *
2548 * @retval VINF_SUCCESS if the stack is simply too small.
2549 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2550 *
2551 * @param pStack The deadlock detection stack.
2552 */
2553static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2554{
2555 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2556 {
2557 PRTTHREADINT pThread = pStack->a[i].pThread;
2558 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2559 if (pStack->a[j].pThread == pThread)
2560 return VERR_SEM_LV_EXISTING_DEADLOCK;
2561 }
2562 static bool volatile s_fComplained = false;
2563 if (!s_fComplained)
2564 {
2565 s_fComplained = true;
2566 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2567 }
2568 return VINF_SUCCESS;
2569}
2570
2571
2572/**
2573 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2574 * detection.
2575 *
2576 * @retval VINF_SUCCESS
2577 * @retval VERR_SEM_LV_DEADLOCK
2578 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2579 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2580 * @retval VERR_TRY_AGAIN
2581 *
2582 * @param pStack The stack to use.
2583 * @param pOriginalRec The original record.
2584 * @param pThreadSelf The calling thread.
2585 */
2586static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2587 PRTTHREADINT const pThreadSelf)
2588{
2589 pStack->c = 0;
2590
2591 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2592 compiler may make a better job of it when using individual variables. */
2593 PRTLOCKVALRECUNION pRec = pOriginalRec;
2594 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2595 uint32_t iEntry = UINT32_MAX;
2596 PRTTHREADINT pThread = NIL_RTTHREAD;
2597 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2598 for (uint32_t iLoop = 0; ; iLoop++)
2599 {
2600 /*
2601 * Process the current record.
2602 */
2603 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2604
2605 /* Find the next relevant owner thread and record. */
2606 PRTLOCKVALRECUNION pNextRec = NULL;
2607 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2608 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2609 switch (pRec->Core.u32Magic)
2610 {
2611 case RTLOCKVALRECEXCL_MAGIC:
2612 Assert(iEntry == UINT32_MAX);
2613 for (;;)
2614 {
2615 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2616 if ( !pNextThread
2617 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2618 break;
2619 enmNextState = rtThreadGetState(pNextThread);
2620 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2621 && pNextThread != pThreadSelf)
2622 break;
2623 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2624 if (RT_LIKELY( !pNextRec
2625 || enmNextState == rtThreadGetState(pNextThread)))
2626 break;
2627 pNextRec = NULL;
2628 }
2629 if (!pNextRec)
2630 {
2631 pRec = pRec->Excl.pSibling;
2632 if ( pRec
2633 && pRec != pFirstSibling)
2634 continue;
2635 pNextThread = NIL_RTTHREAD;
2636 }
2637 break;
2638
2639 case RTLOCKVALRECSHRD_MAGIC:
2640 if (!pRec->Shared.fSignaller)
2641 {
2642 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2643 /** @todo The read side of a read-write lock is problematic if
2644 * the implementation prioritizes writers over readers because
2645 * that means we should could deadlock against current readers
2646 * if a writer showed up. If the RW sem implementation is
2647 * wrapping some native API, it's not so easy to detect when we
2648 * should do this and when we shouldn't. Checking when we
2649 * shouldn't is subject to wakeup scheduling and cannot easily
2650 * be made reliable.
2651 *
2652 * At the moment we circumvent all this mess by declaring that
2653 * readers has priority. This is TRUE on linux, but probably
2654 * isn't on Solaris and FreeBSD. */
2655 if ( pRec == pFirstSibling
2656 && pRec->Shared.pSibling != NULL
2657 && pRec->Shared.pSibling != pFirstSibling)
2658 {
2659 pRec = pRec->Shared.pSibling;
2660 Assert(iEntry == UINT32_MAX);
2661 continue;
2662 }
2663 }
2664
2665 /* Scan the owner table for blocked owners. */
2666 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2667 && ( !pRec->Shared.fSignaller
2668 || iEntry != UINT32_MAX
2669 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2670 )
2671 )
2672 {
2673 uint32_t cAllocated = pRec->Shared.cAllocated;
2674 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2675 while (++iEntry < cAllocated)
2676 {
2677 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2678 if (pEntry)
2679 {
2680 for (;;)
2681 {
2682 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2683 break;
2684 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2685 if ( !pNextThread
2686 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2687 break;
2688 enmNextState = rtThreadGetState(pNextThread);
2689 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2690 && pNextThread != pThreadSelf)
2691 break;
2692 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2693 if (RT_LIKELY( !pNextRec
2694 || enmNextState == rtThreadGetState(pNextThread)))
2695 break;
2696 pNextRec = NULL;
2697 }
2698 if (pNextRec)
2699 break;
2700 }
2701 else
2702 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2703 }
2704 if (pNextRec)
2705 break;
2706 pNextThread = NIL_RTTHREAD;
2707 }
2708
2709 /* Advance to the next sibling, if any. */
2710 pRec = pRec->Shared.pSibling;
2711 if ( pRec != NULL
2712 && pRec != pFirstSibling)
2713 {
2714 iEntry = UINT32_MAX;
2715 continue;
2716 }
2717 break;
2718
2719 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2720 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2721 break;
2722
2723 case RTLOCKVALRECSHRDOWN_MAGIC:
2724 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2725 default:
2726 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2727 break;
2728 }
2729
2730 if (pNextRec)
2731 {
2732 /*
2733 * Recurse and check for deadlock.
2734 */
2735 uint32_t i = pStack->c;
2736 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2737 return rtLockValidatorDdHandleStackOverflow(pStack);
2738
2739 pStack->c++;
2740 pStack->a[i].pRec = pRec;
2741 pStack->a[i].iEntry = iEntry;
2742 pStack->a[i].enmState = enmState;
2743 pStack->a[i].pThread = pThread;
2744 pStack->a[i].pFirstSibling = pFirstSibling;
2745
2746 if (RT_UNLIKELY( pNextThread == pThreadSelf
2747 && ( i != 0
2748 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2749 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2750 )
2751 )
2752 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2753
2754 pRec = pNextRec;
2755 pFirstSibling = pNextRec;
2756 iEntry = UINT32_MAX;
2757 enmState = enmNextState;
2758 pThread = pNextThread;
2759 }
2760 else
2761 {
2762 /*
2763 * No deadlock here, unwind the stack and deal with any unfinished
2764 * business there.
2765 */
2766 uint32_t i = pStack->c;
2767 for (;;)
2768 {
2769 /* pop */
2770 if (i == 0)
2771 return VINF_SUCCESS;
2772 i--;
2773 pRec = pStack->a[i].pRec;
2774 iEntry = pStack->a[i].iEntry;
2775
2776 /* Examine it. */
2777 uint32_t u32Magic = pRec->Core.u32Magic;
2778 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2779 pRec = pRec->Excl.pSibling;
2780 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2781 {
2782 if (iEntry + 1 < pRec->Shared.cAllocated)
2783 break; /* continue processing this record. */
2784 pRec = pRec->Shared.pSibling;
2785 }
2786 else
2787 {
2788 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2789 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2790 continue;
2791 }
2792
2793 /* Any next record to advance to? */
2794 if ( !pRec
2795 || pRec == pStack->a[i].pFirstSibling)
2796 continue;
2797 iEntry = UINT32_MAX;
2798 break;
2799 }
2800
2801 /* Restore the rest of the state and update the stack. */
2802 pFirstSibling = pStack->a[i].pFirstSibling;
2803 enmState = pStack->a[i].enmState;
2804 pThread = pStack->a[i].pThread;
2805 pStack->c = i;
2806 }
2807
2808 Assert(iLoop != 1000000);
2809 }
2810}
2811
2812
2813/**
2814 * Check for the simple no-deadlock case.
2815 *
2816 * @returns true if no deadlock, false if further investigation is required.
2817 *
2818 * @param pOriginalRec The original record.
2819 */
2820DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2821{
2822 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2823 && !pOriginalRec->Excl.pSibling)
2824 {
2825 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2826 if ( !pThread
2827 || pThread->u32Magic != RTTHREADINT_MAGIC)
2828 return true;
2829 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2830 if (!RTTHREAD_IS_SLEEPING(enmState))
2831 return true;
2832 }
2833 return false;
2834}
2835
2836
2837/**
2838 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2839 *
2840 * @param pStack The chain of locks causing the deadlock.
2841 * @param pRec The record relating to the current thread's lock
2842 * operation.
2843 * @param pThreadSelf This thread.
2844 * @param pSrcPos Where we are going to deadlock.
2845 * @param rc The return code.
2846 */
2847static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2848 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2849{
2850 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2851 {
2852 const char *pszWhat;
2853 switch (rc)
2854 {
2855 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2856 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2857 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2858 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2859 }
2860 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2861 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2862 for (uint32_t i = 0; i < pStack->c; i++)
2863 {
2864 char szPrefix[24];
2865 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2866 PRTLOCKVALRECUNION pShrdOwner = NULL;
2867 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2868 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2869 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2870 {
2871 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2872 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2873 }
2874 else
2875 {
2876 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2877 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2878 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2879 }
2880 }
2881 rtLockValComplainMore("---- end of deadlock chain ----\n");
2882 }
2883
2884 rtLockValComplainPanic();
2885}
2886
2887
2888/**
2889 * Perform deadlock detection.
2890 *
2891 * @retval VINF_SUCCESS
2892 * @retval VERR_SEM_LV_DEADLOCK
2893 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2894 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2895 *
2896 * @param pRec The record relating to the current thread's lock
2897 * operation.
2898 * @param pThreadSelf The current thread.
2899 * @param pSrcPos The position of the current lock operation.
2900 */
2901static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2902{
2903 RTLOCKVALDDSTACK Stack;
2904 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2905 if (RT_SUCCESS(rc))
2906 return VINF_SUCCESS;
2907
2908 if (rc == VERR_TRY_AGAIN)
2909 {
2910 for (uint32_t iLoop = 0; ; iLoop++)
2911 {
2912 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2913 if (RT_SUCCESS_NP(rc))
2914 return VINF_SUCCESS;
2915 if (rc != VERR_TRY_AGAIN)
2916 break;
2917 RTThreadYield();
2918 if (iLoop >= 3)
2919 return VINF_SUCCESS;
2920 }
2921 }
2922
2923 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2924 return rc;
2925}
2926
2927
2928RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2929 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2930{
2931 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2932 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2933 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2934 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2935 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2936
2937 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2938 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2939 pRec->afReserved[0] = 0;
2940 pRec->afReserved[1] = 0;
2941 pRec->afReserved[2] = 0;
2942 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2943 pRec->hThread = NIL_RTTHREAD;
2944 pRec->pDown = NULL;
2945 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2946 pRec->uSubClass = uSubClass;
2947 pRec->cRecursion = 0;
2948 pRec->hLock = hLock;
2949 pRec->pSibling = NULL;
2950 if (pszNameFmt)
2951 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2952 else
2953 {
2954 static uint32_t volatile s_cAnonymous = 0;
2955 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2956 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2957 }
2958
2959 /* Lazy initialization. */
2960 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2961 rtLockValidatorLazyInit();
2962}
2963
2964
2965RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2966 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2967{
2968 va_list va;
2969 va_start(va, pszNameFmt);
2970 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2971 va_end(va);
2972}
2973
2974
2975RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2976 uint32_t uSubClass, void *pvLock, bool fEnabled,
2977 const char *pszNameFmt, va_list va)
2978{
2979 PRTLOCKVALRECEXCL pRec;
2980 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2981 if (!pRec)
2982 return VERR_NO_MEMORY;
2983 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2984 return VINF_SUCCESS;
2985}
2986
2987
2988RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2989 uint32_t uSubClass, void *pvLock, bool fEnabled,
2990 const char *pszNameFmt, ...)
2991{
2992 va_list va;
2993 va_start(va, pszNameFmt);
2994 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2995 va_end(va);
2996 return rc;
2997}
2998
2999
3000RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3001{
3002 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3003
3004 rtLockValidatorSerializeDestructEnter();
3005
3006 /** @todo Check that it's not on our stack first. Need to make it
3007 * configurable whether deleting a owned lock is acceptable? */
3008
3009 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3010 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3011 RTLOCKVALCLASS hClass;
3012 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3013 if (pRec->pSibling)
3014 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3015 rtLockValidatorSerializeDestructLeave();
3016 if (hClass != NIL_RTLOCKVALCLASS)
3017 RTLockValidatorClassRelease(hClass);
3018}
3019
3020
3021RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3022{
3023 PRTLOCKVALRECEXCL pRec = *ppRec;
3024 *ppRec = NULL;
3025 if (pRec)
3026 {
3027 RTLockValidatorRecExclDelete(pRec);
3028 RTMemFree(pRec);
3029 }
3030}
3031
3032
3033RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3034{
3035 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3036 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3037 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3038 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3039 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3040 RTLOCKVAL_SUB_CLASS_INVALID);
3041 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3042}
3043
3044
3045RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3046 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3047{
3048 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3049 if (!pRecU)
3050 return;
3051 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3052 if (!pRecU->Excl.fEnabled)
3053 return;
3054 if (hThreadSelf == NIL_RTTHREAD)
3055 {
3056 hThreadSelf = RTThreadSelfAutoAdopt();
3057 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3058 }
3059 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3060 Assert(hThreadSelf == RTThreadSelf());
3061
3062 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3063
3064 if (pRecU->Excl.hThread == hThreadSelf)
3065 {
3066 Assert(!fFirstRecursion); RT_NOREF_PV(fFirstRecursion);
3067 pRecU->Excl.cRecursion++;
3068 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3069 }
3070 else
3071 {
3072 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3073
3074 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3075 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3076 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3077
3078 rtLockValidatorStackPush(hThreadSelf, pRecU);
3079 }
3080}
3081
3082
3083/**
3084 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3085 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3086 */
3087static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3088{
3089 RTTHREADINT *pThread = pRec->Excl.hThread;
3090 AssertReturnVoid(pThread != NIL_RTTHREAD);
3091 Assert(pThread == RTThreadSelf());
3092
3093 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3094 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3095 if (c == 0)
3096 {
3097 rtLockValidatorStackPop(pThread, pRec);
3098 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3099 }
3100 else
3101 {
3102 Assert(c < UINT32_C(0xffff0000));
3103 Assert(!fFinalRecursion); RT_NOREF_PV(fFinalRecursion);
3104 rtLockValidatorStackPopRecursion(pThread, pRec);
3105 }
3106}
3107
3108RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3109{
3110 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3111 if (!pRecU)
3112 return VINF_SUCCESS;
3113 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3114 if (!pRecU->Excl.fEnabled)
3115 return VINF_SUCCESS;
3116
3117 /*
3118 * Check the release order.
3119 */
3120 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3121 && pRecU->Excl.hClass->fStrictReleaseOrder
3122 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3123 )
3124 {
3125 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3126 if (RT_FAILURE(rc))
3127 return rc;
3128 }
3129
3130 /*
3131 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3132 */
3133 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3134 return VINF_SUCCESS;
3135}
3136
3137
3138RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3139{
3140 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3141 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3142 if (pRecU->Excl.fEnabled)
3143 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3144}
3145
3146
3147RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3148{
3149 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3150 if (!pRecU)
3151 return VINF_SUCCESS;
3152 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3153 if (!pRecU->Excl.fEnabled)
3154 return VINF_SUCCESS;
3155 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3156 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3157
3158 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3159 && !pRecU->Excl.hClass->fRecursionOk)
3160 {
3161 rtLockValComplainFirst("Recursion not allowed by the class!",
3162 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3163 rtLockValComplainPanic();
3164 return VERR_SEM_LV_NESTED;
3165 }
3166
3167 Assert(pRecU->Excl.cRecursion < _1M);
3168 pRecU->Excl.cRecursion++;
3169 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3170 return VINF_SUCCESS;
3171}
3172
3173
3174RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3175{
3176 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3177 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3178 if (!pRecU->Excl.fEnabled)
3179 return VINF_SUCCESS;
3180 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3181 Assert(pRecU->Excl.hThread == RTThreadSelf());
3182 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3183
3184 /*
3185 * Check the release order.
3186 */
3187 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3188 && pRecU->Excl.hClass->fStrictReleaseOrder
3189 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3190 )
3191 {
3192 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3193 if (RT_FAILURE(rc))
3194 return rc;
3195 }
3196
3197 /*
3198 * Perform the unwind.
3199 */
3200 pRecU->Excl.cRecursion--;
3201 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3202 return VINF_SUCCESS;
3203}
3204
3205
3206RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3207{
3208 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3209 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3210 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3211 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3212 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3213 , VERR_SEM_LV_INVALID_PARAMETER);
3214 if (!pRecU->Excl.fEnabled)
3215 return VINF_SUCCESS;
3216 Assert(pRecU->Excl.hThread == RTThreadSelf());
3217 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3218 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3219
3220 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3221 && !pRecU->Excl.hClass->fRecursionOk)
3222 {
3223 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3224 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3225 rtLockValComplainPanic();
3226 return VERR_SEM_LV_NESTED;
3227 }
3228
3229 Assert(pRecU->Excl.cRecursion < _1M);
3230 pRecU->Excl.cRecursion++;
3231 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3232
3233 return VINF_SUCCESS;
3234}
3235
3236
3237RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3238{
3239 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3240 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3241 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3242 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3243 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3244 , VERR_SEM_LV_INVALID_PARAMETER);
3245 if (!pRecU->Excl.fEnabled)
3246 return VINF_SUCCESS;
3247 Assert(pRecU->Excl.hThread == RTThreadSelf());
3248 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3249 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3250
3251 /*
3252 * Check the release order.
3253 */
3254 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3255 && pRecU->Excl.hClass->fStrictReleaseOrder
3256 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3257 )
3258 {
3259 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3260 if (RT_FAILURE(rc))
3261 return rc;
3262 }
3263
3264 /*
3265 * Perform the unwind.
3266 */
3267 pRecU->Excl.cRecursion--;
3268 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3269 return VINF_SUCCESS;
3270}
3271
3272
3273RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3274 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3275{
3276 /*
3277 * Validate and adjust input. Quit early if order validation is disabled.
3278 */
3279 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3280 if (!pRecU)
3281 return VINF_SUCCESS;
3282 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3283 if ( !pRecU->Excl.fEnabled
3284 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3285 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3286 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3287 return VINF_SUCCESS;
3288
3289 if (hThreadSelf == NIL_RTTHREAD)
3290 {
3291 hThreadSelf = RTThreadSelfAutoAdopt();
3292 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3293 }
3294 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3295 Assert(hThreadSelf == RTThreadSelf());
3296
3297 /*
3298 * Detect recursion as it isn't subject to order restrictions.
3299 */
3300 if (pRec->hThread == hThreadSelf)
3301 return VINF_SUCCESS;
3302
3303 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3304}
3305
3306
3307RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3308 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3309 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3310{
3311 /*
3312 * Fend off wild life.
3313 */
3314 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3315 if (!pRecU)
3316 return VINF_SUCCESS;
3317 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3318 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3319 if (!pRec->fEnabled)
3320 return VINF_SUCCESS;
3321
3322 PRTTHREADINT pThreadSelf = hThreadSelf;
3323 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3324 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3325 Assert(pThreadSelf == RTThreadSelf());
3326
3327 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3328
3329 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3330 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3331 {
3332 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3333 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3334 , VERR_SEM_LV_INVALID_PARAMETER);
3335 enmSleepState = enmThreadState;
3336 }
3337
3338 /*
3339 * Record the location.
3340 */
3341 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3342 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3343 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3344 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3345 rtThreadSetState(pThreadSelf, enmSleepState);
3346
3347 /*
3348 * Don't do deadlock detection if we're recursing.
3349 *
3350 * On some hosts we don't do recursion accounting our selves and there
3351 * isn't any other place to check for this.
3352 */
3353 int rc = VINF_SUCCESS;
3354 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3355 {
3356 if ( !fRecursiveOk
3357 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3358 && !pRecU->Excl.hClass->fRecursionOk))
3359 {
3360 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3361 rtLockValComplainPanic();
3362 rc = VERR_SEM_LV_NESTED;
3363 }
3364 }
3365 /*
3366 * Perform deadlock detection.
3367 */
3368 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3369 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3370 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3371 rc = VINF_SUCCESS;
3372 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3373 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3374
3375 if (RT_SUCCESS(rc))
3376 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3377 else
3378 {
3379 rtThreadSetState(pThreadSelf, enmThreadState);
3380 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3381 }
3382 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3383 return rc;
3384}
3385RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3386
3387
3388RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3389 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3390 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3391{
3392 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3393 if (RT_SUCCESS(rc))
3394 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3395 enmSleepState, fReallySleeping);
3396 return rc;
3397}
3398RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3399
3400
3401RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3402 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3403{
3404 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3405 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3406 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3407 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3408 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3409
3410 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3411 pRec->uSubClass = uSubClass;
3412 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3413 pRec->hLock = hLock;
3414 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3415 pRec->fSignaller = fSignaller;
3416 pRec->pSibling = NULL;
3417
3418 /* the table */
3419 pRec->cEntries = 0;
3420 pRec->iLastEntry = 0;
3421 pRec->cAllocated = 0;
3422 pRec->fReallocating = false;
3423 pRec->fPadding = false;
3424 pRec->papOwners = NULL;
3425
3426 /* the name */
3427 if (pszNameFmt)
3428 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3429 else
3430 {
3431 static uint32_t volatile s_cAnonymous = 0;
3432 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3433 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3434 }
3435}
3436
3437
3438RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3439 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3440{
3441 va_list va;
3442 va_start(va, pszNameFmt);
3443 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3444 va_end(va);
3445}
3446
3447
3448RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3449 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3450 const char *pszNameFmt, va_list va)
3451{
3452 PRTLOCKVALRECSHRD pRec;
3453 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3454 if (!pRec)
3455 return VERR_NO_MEMORY;
3456 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3457 return VINF_SUCCESS;
3458}
3459
3460
3461RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3462 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3463 const char *pszNameFmt, ...)
3464{
3465 va_list va;
3466 va_start(va, pszNameFmt);
3467 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3468 va_end(va);
3469 return rc;
3470}
3471
3472
3473RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3474{
3475 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3476
3477 /** @todo Check that it's not on our stack first. Need to make it
3478 * configurable whether deleting a owned lock is acceptable? */
3479
3480 /*
3481 * Flip it into table realloc mode and take the destruction lock.
3482 */
3483 rtLockValidatorSerializeDestructEnter();
3484 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3485 {
3486 rtLockValidatorSerializeDestructLeave();
3487
3488 rtLockValidatorSerializeDetectionEnter();
3489 rtLockValidatorSerializeDetectionLeave();
3490
3491 rtLockValidatorSerializeDestructEnter();
3492 }
3493
3494 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3495 RTLOCKVALCLASS hClass;
3496 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3497 if (pRec->papOwners)
3498 {
3499 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3500 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3501 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3502
3503 RTMemFree((void *)papOwners);
3504 }
3505 if (pRec->pSibling)
3506 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3507 ASMAtomicWriteBool(&pRec->fReallocating, false);
3508
3509 rtLockValidatorSerializeDestructLeave();
3510
3511 if (hClass != NIL_RTLOCKVALCLASS)
3512 RTLockValidatorClassRelease(hClass);
3513}
3514
3515
3516RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3517{
3518 PRTLOCKVALRECSHRD pRec = *ppRec;
3519 *ppRec = NULL;
3520 if (pRec)
3521 {
3522 RTLockValidatorRecSharedDelete(pRec);
3523 RTMemFree(pRec);
3524 }
3525}
3526
3527
3528RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3529{
3530 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3531 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3532 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3533 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3534 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3535 RTLOCKVAL_SUB_CLASS_INVALID);
3536 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3537}
3538
3539
3540/**
3541 * Locates an owner (thread) in a shared lock record.
3542 *
3543 * @returns Pointer to the owner entry on success, NULL on failure..
3544 * @param pShared The shared lock record.
3545 * @param hThread The thread (owner) to find.
3546 * @param piEntry Where to optionally return the table in index.
3547 * Optional.
3548 */
3549DECLINLINE(PRTLOCKVALRECUNION)
3550rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3551{
3552 rtLockValidatorSerializeDetectionEnter();
3553
3554 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3555 if (papOwners)
3556 {
3557 uint32_t const cMax = pShared->cAllocated;
3558 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3559 {
3560 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3561 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3562 {
3563 rtLockValidatorSerializeDetectionLeave();
3564 if (piEntry)
3565 *piEntry = iEntry;
3566 return pEntry;
3567 }
3568 }
3569 }
3570
3571 rtLockValidatorSerializeDetectionLeave();
3572 return NULL;
3573}
3574
3575
3576RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3577 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3578{
3579 /*
3580 * Validate and adjust input. Quit early if order validation is disabled.
3581 */
3582 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3583 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3584 if ( !pRecU->Shared.fEnabled
3585 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3586 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3587 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3588 )
3589 return VINF_SUCCESS;
3590
3591 if (hThreadSelf == NIL_RTTHREAD)
3592 {
3593 hThreadSelf = RTThreadSelfAutoAdopt();
3594 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3595 }
3596 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3597 Assert(hThreadSelf == RTThreadSelf());
3598
3599 /*
3600 * Detect recursion as it isn't subject to order restrictions.
3601 */
3602 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3603 if (pEntry)
3604 return VINF_SUCCESS;
3605
3606 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3607}
3608
3609
3610RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3611 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3612 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3613{
3614 /*
3615 * Fend off wild life.
3616 */
3617 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3618 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3619 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3620 if (!pRecU->Shared.fEnabled)
3621 return VINF_SUCCESS;
3622
3623 PRTTHREADINT pThreadSelf = hThreadSelf;
3624 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3625 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3626 Assert(pThreadSelf == RTThreadSelf());
3627
3628 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3629
3630 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3631 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3632 {
3633 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3634 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3635 , VERR_SEM_LV_INVALID_PARAMETER);
3636 enmSleepState = enmThreadState;
3637 }
3638
3639 /*
3640 * Record the location.
3641 */
3642 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3643 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3644 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3645 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3646 rtThreadSetState(pThreadSelf, enmSleepState);
3647
3648 /*
3649 * Don't do deadlock detection if we're recursing.
3650 */
3651 int rc = VINF_SUCCESS;
3652 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3653 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3654 : NULL;
3655 if (pEntry)
3656 {
3657 if ( !fRecursiveOk
3658 || ( pRec->hClass
3659 && !pRec->hClass->fRecursionOk)
3660 )
3661 {
3662 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3663 rtLockValComplainPanic();
3664 rc = VERR_SEM_LV_NESTED;
3665 }
3666 }
3667 /*
3668 * Perform deadlock detection.
3669 */
3670 else if ( pRec->hClass
3671 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3672 || pRec->hClass->cMsMinDeadlock > cMillies))
3673 rc = VINF_SUCCESS;
3674 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3675 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3676
3677 if (RT_SUCCESS(rc))
3678 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3679 else
3680 {
3681 rtThreadSetState(pThreadSelf, enmThreadState);
3682 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3683 }
3684 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3685 return rc;
3686}
3687RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3688
3689
3690RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3691 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3692 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3693{
3694 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3695 if (RT_SUCCESS(rc))
3696 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3697 enmSleepState, fReallySleeping);
3698 return rc;
3699}
3700RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3701
3702
3703/**
3704 * Allocates and initializes an owner entry for the shared lock record.
3705 *
3706 * @returns The new owner entry.
3707 * @param pRec The shared lock record.
3708 * @param pThreadSelf The calling thread and owner. Used for record
3709 * initialization and allocation.
3710 * @param pSrcPos The source position.
3711 */
3712DECLINLINE(PRTLOCKVALRECUNION)
3713rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3714{
3715 PRTLOCKVALRECUNION pEntry;
3716
3717 /*
3718 * Check if the thread has any statically allocated records we can easily
3719 * make use of.
3720 */
3721 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3722 if ( iEntry > 0
3723 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3724 {
3725 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3726 Assert(!pEntry->ShrdOwner.fReserved);
3727 pEntry->ShrdOwner.fStaticAlloc = true;
3728 rtThreadGet(pThreadSelf);
3729 }
3730 else
3731 {
3732 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3733 if (RT_UNLIKELY(!pEntry))
3734 return NULL;
3735 pEntry->ShrdOwner.fStaticAlloc = false;
3736 }
3737
3738 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3739 pEntry->ShrdOwner.cRecursion = 1;
3740 pEntry->ShrdOwner.fReserved = true;
3741 pEntry->ShrdOwner.hThread = pThreadSelf;
3742 pEntry->ShrdOwner.pDown = NULL;
3743 pEntry->ShrdOwner.pSharedRec = pRec;
3744#if HC_ARCH_BITS == 32
3745 pEntry->ShrdOwner.pvReserved = NULL;
3746#endif
3747 if (pSrcPos)
3748 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3749 else
3750 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3751 return pEntry;
3752}
3753
3754
3755/**
3756 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3757 *
3758 * @param pEntry The owner entry.
3759 */
3760DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3761{
3762 if (pEntry)
3763 {
3764 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3765 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3766
3767 PRTTHREADINT pThread;
3768 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3769
3770 Assert(pEntry->fReserved);
3771 pEntry->fReserved = false;
3772
3773 if (pEntry->fStaticAlloc)
3774 {
3775 AssertPtrReturnVoid(pThread);
3776 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3777
3778 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3779 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3780
3781 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3782 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3783
3784 rtThreadRelease(pThread);
3785 }
3786 else
3787 {
3788 rtLockValidatorSerializeDestructEnter();
3789 rtLockValidatorSerializeDestructLeave();
3790
3791 RTMemFree(pEntry);
3792 }
3793 }
3794}
3795
3796
3797/**
3798 * Make more room in the table.
3799 *
3800 * @retval true on success
3801 * @retval false if we're out of memory or running into a bad race condition
3802 * (probably a bug somewhere). No longer holding the lock.
3803 *
3804 * @param pShared The shared lock record.
3805 */
3806static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3807{
3808 for (unsigned i = 0; i < 1000; i++)
3809 {
3810 /*
3811 * Switch to the other data access direction.
3812 */
3813 rtLockValidatorSerializeDetectionLeave();
3814 if (i >= 10)
3815 {
3816 Assert(i != 10 && i != 100);
3817 RTThreadSleep(i >= 100);
3818 }
3819 rtLockValidatorSerializeDestructEnter();
3820
3821 /*
3822 * Try grab the privilege to reallocating the table.
3823 */
3824 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3825 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3826 {
3827 uint32_t cAllocated = pShared->cAllocated;
3828 if (cAllocated < pShared->cEntries)
3829 {
3830 /*
3831 * Ok, still not enough space. Reallocate the table.
3832 */
3833#if 0 /** @todo enable this after making sure growing works flawlessly. */
3834 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3835#else
3836 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3837#endif
3838 PRTLOCKVALRECSHRDOWN *papOwners;
3839 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3840 (cAllocated + cInc) * sizeof(void *));
3841 if (!papOwners)
3842 {
3843 ASMAtomicWriteBool(&pShared->fReallocating, false);
3844 rtLockValidatorSerializeDestructLeave();
3845 /* RTMemRealloc will assert */
3846 return false;
3847 }
3848
3849 while (cInc-- > 0)
3850 {
3851 papOwners[cAllocated] = NULL;
3852 cAllocated++;
3853 }
3854
3855 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3856 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3857 }
3858 ASMAtomicWriteBool(&pShared->fReallocating, false);
3859 }
3860 rtLockValidatorSerializeDestructLeave();
3861
3862 rtLockValidatorSerializeDetectionEnter();
3863 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3864 break;
3865
3866 if (pShared->cAllocated >= pShared->cEntries)
3867 return true;
3868 }
3869
3870 rtLockValidatorSerializeDetectionLeave();
3871 AssertFailed(); /* too many iterations or destroyed while racing. */
3872 return false;
3873}
3874
3875
3876/**
3877 * Adds an owner entry to a shared lock record.
3878 *
3879 * @returns true on success, false on serious race or we're if out of memory.
3880 * @param pShared The shared lock record.
3881 * @param pEntry The owner entry.
3882 */
3883DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3884{
3885 rtLockValidatorSerializeDetectionEnter();
3886 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3887 {
3888 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3889 && !rtLockValidatorRecSharedMakeRoom(pShared))
3890 return false; /* the worker leave the lock */
3891
3892 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3893 uint32_t const cMax = pShared->cAllocated;
3894 for (unsigned i = 0; i < 100; i++)
3895 {
3896 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3897 {
3898 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3899 {
3900 rtLockValidatorSerializeDetectionLeave();
3901 return true;
3902 }
3903 }
3904 Assert(i != 25);
3905 }
3906 AssertFailed();
3907 }
3908 rtLockValidatorSerializeDetectionLeave();
3909 return false;
3910}
3911
3912
3913/**
3914 * Remove an owner entry from a shared lock record and free it.
3915 *
3916 * @param pShared The shared lock record.
3917 * @param pEntry The owner entry to remove.
3918 * @param iEntry The last known index.
3919 */
3920DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3921 uint32_t iEntry)
3922{
3923 /*
3924 * Remove it from the table.
3925 */
3926 rtLockValidatorSerializeDetectionEnter();
3927 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3928 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3929 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3930 {
3931 /* this shouldn't happen yet... */
3932 AssertFailed();
3933 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3934 uint32_t const cMax = pShared->cAllocated;
3935 for (iEntry = 0; iEntry < cMax; iEntry++)
3936 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3937 break;
3938 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3939 }
3940 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3941 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3942 rtLockValidatorSerializeDetectionLeave();
3943
3944 /*
3945 * Successfully removed, now free it.
3946 */
3947 rtLockValidatorRecSharedFreeOwner(pEntry);
3948}
3949
3950
3951RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3952{
3953 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3954 if (!pRec->fEnabled)
3955 return;
3956 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3957 AssertReturnVoid(pRec->fSignaller);
3958
3959 /*
3960 * Free all current owners.
3961 */
3962 rtLockValidatorSerializeDetectionEnter();
3963 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3964 {
3965 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3966 uint32_t iEntry = 0;
3967 uint32_t cEntries = pRec->cAllocated;
3968 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3969 while (iEntry < cEntries)
3970 {
3971 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3972 if (pEntry)
3973 {
3974 ASMAtomicDecU32(&pRec->cEntries);
3975 rtLockValidatorSerializeDetectionLeave();
3976
3977 rtLockValidatorRecSharedFreeOwner(pEntry);
3978
3979 rtLockValidatorSerializeDetectionEnter();
3980 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3981 break;
3982 cEntries = pRec->cAllocated;
3983 papEntries = pRec->papOwners;
3984 }
3985 iEntry++;
3986 }
3987 }
3988 rtLockValidatorSerializeDetectionLeave();
3989
3990 if (hThread != NIL_RTTHREAD)
3991 {
3992 /*
3993 * Allocate a new owner entry and insert it into the table.
3994 */
3995 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3996 if ( pEntry
3997 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3998 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3999 }
4000}
4001RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
4002
4003
4004RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4005{
4006 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4007 if (!pRec->fEnabled)
4008 return;
4009 if (hThread == NIL_RTTHREAD)
4010 {
4011 hThread = RTThreadSelfAutoAdopt();
4012 AssertReturnVoid(hThread != NIL_RTTHREAD);
4013 }
4014 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4015
4016 /*
4017 * Recursive?
4018 *
4019 * Note! This code can be optimized to try avoid scanning the table on
4020 * insert. However, that's annoying work that makes the code big,
4021 * so it can wait til later sometime.
4022 */
4023 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4024 if (pEntry)
4025 {
4026 Assert(!pRec->fSignaller);
4027 pEntry->ShrdOwner.cRecursion++;
4028 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4029 return;
4030 }
4031
4032 /*
4033 * Allocate a new owner entry and insert it into the table.
4034 */
4035 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4036 if (pEntry)
4037 {
4038 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4039 {
4040 if (!pRec->fSignaller)
4041 rtLockValidatorStackPush(hThread, pEntry);
4042 }
4043 else
4044 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4045 }
4046}
4047RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4048
4049
4050RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4051{
4052 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4053 if (!pRec->fEnabled)
4054 return;
4055 if (hThread == NIL_RTTHREAD)
4056 {
4057 hThread = RTThreadSelfAutoAdopt();
4058 AssertReturnVoid(hThread != NIL_RTTHREAD);
4059 }
4060 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4061
4062 /*
4063 * Find the entry hope it's a recursive one.
4064 */
4065 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4066 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4067 AssertReturnVoid(pEntry);
4068 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4069
4070 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4071 if (c == 0)
4072 {
4073 if (!pRec->fSignaller)
4074 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4075 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4076 }
4077 else
4078 {
4079 Assert(!pRec->fSignaller);
4080 rtLockValidatorStackPopRecursion(hThread, pEntry);
4081 }
4082}
4083RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4084
4085
4086RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4087{
4088 /* Validate and resolve input. */
4089 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4090 if (!pRec->fEnabled)
4091 return false;
4092 if (hThread == NIL_RTTHREAD)
4093 {
4094 hThread = RTThreadSelfAutoAdopt();
4095 AssertReturn(hThread != NIL_RTTHREAD, false);
4096 }
4097 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4098
4099 /* Do the job. */
4100 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4101 return pEntry != NULL;
4102}
4103RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4104
4105
4106RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4107{
4108 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4109 if (!pRec->fEnabled)
4110 return VINF_SUCCESS;
4111 if (hThreadSelf == NIL_RTTHREAD)
4112 {
4113 hThreadSelf = RTThreadSelfAutoAdopt();
4114 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4115 }
4116 Assert(hThreadSelf == RTThreadSelf());
4117 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4118
4119 /*
4120 * Locate the entry for this thread in the table.
4121 */
4122 uint32_t iEntry = 0;
4123 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4124 if (RT_UNLIKELY(!pEntry))
4125 {
4126 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4127 rtLockValComplainPanic();
4128 return VERR_SEM_LV_NOT_OWNER;
4129 }
4130
4131 /*
4132 * Check the release order.
4133 */
4134 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4135 && pRec->hClass->fStrictReleaseOrder
4136 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4137 )
4138 {
4139 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4140 if (RT_FAILURE(rc))
4141 return rc;
4142 }
4143
4144 /*
4145 * Release the ownership or unwind a level of recursion.
4146 */
4147 Assert(pEntry->ShrdOwner.cRecursion > 0);
4148 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4149 if (c == 0)
4150 {
4151 rtLockValidatorStackPop(hThreadSelf, pEntry);
4152 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4153 }
4154 else
4155 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4156
4157 return VINF_SUCCESS;
4158}
4159
4160
4161RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4162{
4163 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4164 if (!pRec->fEnabled)
4165 return VINF_SUCCESS;
4166 if (hThreadSelf == NIL_RTTHREAD)
4167 {
4168 hThreadSelf = RTThreadSelfAutoAdopt();
4169 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4170 }
4171 Assert(hThreadSelf == RTThreadSelf());
4172 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4173
4174 /*
4175 * Locate the entry for this thread in the table.
4176 */
4177 uint32_t iEntry = 0;
4178 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4179 if (RT_UNLIKELY(!pEntry))
4180 {
4181 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4182 rtLockValComplainPanic();
4183 return VERR_SEM_LV_NOT_SIGNALLER;
4184 }
4185 return VINF_SUCCESS;
4186}
4187
4188
4189RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4190{
4191 if (Thread == NIL_RTTHREAD)
4192 return 0;
4193
4194 PRTTHREADINT pThread = rtThreadGet(Thread);
4195 if (!pThread)
4196 return VERR_INVALID_HANDLE;
4197 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4198 rtThreadRelease(pThread);
4199 return cWriteLocks;
4200}
4201RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4202
4203
4204RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4205{
4206 PRTTHREADINT pThread = rtThreadGet(Thread);
4207 AssertReturnVoid(pThread);
4208 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4209 rtThreadRelease(pThread);
4210}
4211RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4212
4213
4214RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4215{
4216 PRTTHREADINT pThread = rtThreadGet(Thread);
4217 AssertReturnVoid(pThread);
4218 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4219 rtThreadRelease(pThread);
4220}
4221RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4222
4223
4224RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4225{
4226 if (Thread == NIL_RTTHREAD)
4227 return 0;
4228
4229 PRTTHREADINT pThread = rtThreadGet(Thread);
4230 if (!pThread)
4231 return VERR_INVALID_HANDLE;
4232 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4233 rtThreadRelease(pThread);
4234 return cReadLocks;
4235}
4236RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4237
4238
4239RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4240{
4241 PRTTHREADINT pThread = rtThreadGet(Thread);
4242 Assert(pThread);
4243 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4244 rtThreadRelease(pThread);
4245}
4246RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4247
4248
4249RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4250{
4251 PRTTHREADINT pThread = rtThreadGet(Thread);
4252 Assert(pThread);
4253 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4254 rtThreadRelease(pThread);
4255}
4256RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4257
4258
4259RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4260{
4261 void *pvLock = NULL;
4262 PRTTHREADINT pThread = rtThreadGet(hThread);
4263 if (pThread)
4264 {
4265 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4266 if (RTTHREAD_IS_SLEEPING(enmState))
4267 {
4268 rtLockValidatorSerializeDetectionEnter();
4269
4270 enmState = rtThreadGetState(pThread);
4271 if (RTTHREAD_IS_SLEEPING(enmState))
4272 {
4273 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4274 if (pRec)
4275 {
4276 switch (pRec->Core.u32Magic)
4277 {
4278 case RTLOCKVALRECEXCL_MAGIC:
4279 pvLock = pRec->Excl.hLock;
4280 break;
4281
4282 case RTLOCKVALRECSHRDOWN_MAGIC:
4283 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4284 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4285 break;
4286 RT_FALL_THRU();
4287 case RTLOCKVALRECSHRD_MAGIC:
4288 pvLock = pRec->Shared.hLock;
4289 break;
4290 }
4291 if (RTThreadGetState(pThread) != enmState)
4292 pvLock = NULL;
4293 }
4294 }
4295
4296 rtLockValidatorSerializeDetectionLeave();
4297 }
4298 rtThreadRelease(pThread);
4299 }
4300 return pvLock;
4301}
4302RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4303
4304
4305RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4306{
4307 bool fRet = false;
4308 PRTTHREADINT pThread = rtThreadGet(hThread);
4309 if (pThread)
4310 {
4311 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4312 rtThreadRelease(pThread);
4313 }
4314 return fRet;
4315}
4316RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4317
4318
4319RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4320{
4321 bool fRet = false;
4322 if (hCurrentThread == NIL_RTTHREAD)
4323 hCurrentThread = RTThreadSelf();
4324 else
4325 Assert(hCurrentThread == RTThreadSelf());
4326 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4327 if (pThread)
4328 {
4329 if (hClass != NIL_RTLOCKVALCLASS)
4330 {
4331 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4332 while (VALID_PTR(pCur) && !fRet)
4333 {
4334 switch (pCur->Core.u32Magic)
4335 {
4336 case RTLOCKVALRECEXCL_MAGIC:
4337 fRet = pCur->Excl.hClass == hClass;
4338 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4339 break;
4340 case RTLOCKVALRECSHRDOWN_MAGIC:
4341 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4342 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4343 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4344 break;
4345 case RTLOCKVALRECNEST_MAGIC:
4346 switch (pCur->Nest.pRec->Core.u32Magic)
4347 {
4348 case RTLOCKVALRECEXCL_MAGIC:
4349 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4350 break;
4351 case RTLOCKVALRECSHRDOWN_MAGIC:
4352 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4353 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4354 break;
4355 }
4356 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4357 break;
4358 default:
4359 pCur = NULL;
4360 break;
4361 }
4362 }
4363 }
4364
4365 rtThreadRelease(pThread);
4366 }
4367 return fRet;
4368}
4369RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4370
4371
4372RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4373{
4374 bool fRet = false;
4375 if (hCurrentThread == NIL_RTTHREAD)
4376 hCurrentThread = RTThreadSelf();
4377 else
4378 Assert(hCurrentThread == RTThreadSelf());
4379 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4380 if (pThread)
4381 {
4382 if (hClass != NIL_RTLOCKVALCLASS)
4383 {
4384 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4385 while (VALID_PTR(pCur) && !fRet)
4386 {
4387 switch (pCur->Core.u32Magic)
4388 {
4389 case RTLOCKVALRECEXCL_MAGIC:
4390 fRet = pCur->Excl.hClass == hClass
4391 && pCur->Excl.uSubClass == uSubClass;
4392 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4393 break;
4394 case RTLOCKVALRECSHRDOWN_MAGIC:
4395 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4396 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4397 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4398 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4399 break;
4400 case RTLOCKVALRECNEST_MAGIC:
4401 switch (pCur->Nest.pRec->Core.u32Magic)
4402 {
4403 case RTLOCKVALRECEXCL_MAGIC:
4404 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4405 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4406 break;
4407 case RTLOCKVALRECSHRDOWN_MAGIC:
4408 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4409 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4410 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4411 break;
4412 }
4413 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4414 break;
4415 default:
4416 pCur = NULL;
4417 break;
4418 }
4419 }
4420 }
4421
4422 rtThreadRelease(pThread);
4423 }
4424 return fRet;
4425}
4426RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4427
4428
4429RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4430{
4431 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4432}
4433RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4434
4435
4436RTDECL(bool) RTLockValidatorIsEnabled(void)
4437{
4438 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4439}
4440RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4441
4442
4443RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4444{
4445 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4446}
4447RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4448
4449
4450RTDECL(bool) RTLockValidatorIsQuiet(void)
4451{
4452 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4453}
4454RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4455
4456
4457RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4458{
4459 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4460}
4461RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4462
4463
4464RTDECL(bool) RTLockValidatorMayPanic(void)
4465{
4466 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4467}
4468RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4469
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette