VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 75482

Last change on this file since 75482 was 72613, checked in by vboxsync, 7 years ago

IPRT: Call lsan_ignore_object() if must-leak or may-leak tags are use.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 160.1 KB
Line 
1/* $Id: lockvalidator.cpp 72613 2018-06-19 13:16:32Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param SRC_POS The source position where call is being made from.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.szName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->szName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/**
854 * Hashes the specified source position.
855 *
856 * @returns Hash.
857 * @param pSrcPos The source position record.
858 */
859static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
860{
861 uint32_t uHash;
862 if ( ( pSrcPos->pszFile
863 || pSrcPos->pszFunction)
864 && pSrcPos->uLine != 0)
865 {
866 uHash = 0;
867 if (pSrcPos->pszFile)
868 uHash = sdbmInc(pSrcPos->pszFile, uHash);
869 if (pSrcPos->pszFunction)
870 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
871 uHash += pSrcPos->uLine;
872 }
873 else
874 {
875 Assert(pSrcPos->uId);
876 uHash = (uint32_t)pSrcPos->uId;
877 }
878
879 return uHash;
880}
881
882
883/**
884 * Compares two source positions.
885 *
886 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
887 * otherwise.
888 * @param pSrcPos1 The first source position.
889 * @param pSrcPos2 The second source position.
890 */
891static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
892{
893 if (pSrcPos1->uLine != pSrcPos2->uLine)
894 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
895
896 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
897 if (iDiff != 0)
898 return iDiff;
899
900 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
901 if (iDiff != 0)
902 return iDiff;
903
904 if (pSrcPos1->uId != pSrcPos2->uId)
905 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
906 return 0;
907}
908
909
910
911/**
912 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
913 */
914DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
915{
916 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
917 if (hXRoads != NIL_RTSEMXROADS)
918 RTSemXRoadsNSEnter(hXRoads);
919}
920
921
922/**
923 * Call after rtLockValidatorSerializeDestructEnter.
924 */
925DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
926{
927 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
928 if (hXRoads != NIL_RTSEMXROADS)
929 RTSemXRoadsNSLeave(hXRoads);
930}
931
932
933/**
934 * Serializes deadlock detection against destruction of the objects being
935 * inspected.
936 */
937DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsEWEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDetectionEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsEWLeave(hXRoads);
953}
954
955
956/**
957 * Initializes the per thread lock validator data.
958 *
959 * @param pPerThread The data.
960 */
961DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
962{
963 pPerThread->bmFreeShrdOwners = UINT32_MAX;
964
965 /* ASSUMES the rest has already been zeroed. */
966 Assert(pPerThread->pRec == NULL);
967 Assert(pPerThread->cWriteLocks == 0);
968 Assert(pPerThread->cReadLocks == 0);
969 Assert(pPerThread->fInValidator == false);
970 Assert(pPerThread->pStackTop == NULL);
971}
972
973
974/**
975 * Delete the per thread lock validator data.
976 *
977 * @param pPerThread The data.
978 */
979DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
980{
981 /*
982 * Check that the thread doesn't own any locks at this time.
983 */
984 if (pPerThread->pStackTop)
985 {
986 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
987 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
988 pPerThread->pStackTop, true);
989 rtLockValComplainPanic();
990 }
991
992 /*
993 * Free the recursion records.
994 */
995 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
996 pPerThread->pFreeNestRecs = NULL;
997 while (pCur)
998 {
999 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1000 RTMemFree(pCur);
1001 pCur = pNext;
1002 }
1003}
1004
1005RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1006 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1007 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1008 const char *pszNameFmt, ...)
1009{
1010 va_list va;
1011 va_start(va, pszNameFmt);
1012 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1013 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1014 va_end(va);
1015 return rc;
1016}
1017
1018
1019RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1020 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1021 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1022 const char *pszNameFmt, va_list va)
1023{
1024 Assert(cMsMinDeadlock >= 1);
1025 Assert(cMsMinOrder >= 1);
1026 AssertPtr(pSrcPos);
1027
1028 /*
1029 * Format the name and calc its length.
1030 */
1031 size_t cbName;
1032 char szName[32];
1033 if (pszNameFmt && *pszNameFmt)
1034 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1035 else
1036 {
1037 static uint32_t volatile s_cAnonymous = 0;
1038 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1039 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1040 }
1041
1042 /*
1043 * Figure out the file and function name lengths and allocate memory for
1044 * it all.
1045 */
1046 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1047 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1048 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVarTag(sizeof(*pThis) + cbFile + cbFunction + cbName,
1049 "may-leak:RTLockValidatorClassCreateExV");
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052
1053 /*
1054 * Initialize the class data.
1055 */
1056 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1057 pThis->Core.uchHeight = 0;
1058 pThis->Core.pLeft = NULL;
1059 pThis->Core.pRight = NULL;
1060 pThis->Core.pList = NULL;
1061 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1062 pThis->cRefs = 1;
1063 pThis->fAutodidact = fAutodidact;
1064 pThis->fRecursionOk = fRecursionOk;
1065 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1066 pThis->fInTree = false;
1067 pThis->fDonateRefToNextRetainer = false;
1068 pThis->afReserved[0] = false;
1069 pThis->afReserved[1] = false;
1070 pThis->afReserved[2] = false;
1071 pThis->cMsMinDeadlock = cMsMinDeadlock;
1072 pThis->cMsMinOrder = cMsMinOrder;
1073 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1074 pThis->au32Reserved[i] = 0;
1075 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1076 {
1077 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1078 pThis->PriorLocks.aRefs[i].cLookups = 0;
1079 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1083 }
1084 pThis->PriorLocks.pNext = NULL;
1085 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1086 pThis->apPriorLocksHash[i] = NULL;
1087 char *pszDst = (char *)(pThis + 1);
1088 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1089 pszDst += cbName;
1090 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1091 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1092 pszDst += cbFile;
1093 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1094 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1095#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1096 pThis->cHashHits = 0;
1097 pThis->cHashMisses = 0;
1098#endif
1099
1100 *phClass = pThis;
1101 return VINF_SUCCESS;
1102}
1103
1104
1105RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1106{
1107 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1108 va_list va;
1109 va_start(va, pszNameFmt);
1110 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1111 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1112 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1113 pszNameFmt, va);
1114 va_end(va);
1115 return rc;
1116}
1117
1118
1119/**
1120 * Creates a new lock validator class with a reference that is consumed by the
1121 * first call to RTLockValidatorClassRetain.
1122 *
1123 * This is tailored for use in the parameter list of a semaphore constructor.
1124 *
1125 * @returns Class handle with a reference that is automatically consumed by the
1126 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1127 *
1128 * @param SRC_POS The source position where call is being made from.
1129 * Use RT_SRC_POS when possible. Optional.
1130 * @param pszNameFmt Class name format string, optional (NULL). Max
1131 * length is 32 bytes.
1132 * @param ... Format string arguments.
1133 */
1134RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1135{
1136 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1137 RTLOCKVALCLASSINT *pClass;
1138 va_list va;
1139 va_start(va, pszNameFmt);
1140 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1141 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1142 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1143 pszNameFmt, va);
1144 va_end(va);
1145 if (RT_FAILURE(rc))
1146 return NIL_RTLOCKVALCLASS;
1147 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1148 return pClass;
1149}
1150
1151
1152/**
1153 * Internal class retainer.
1154 * @returns The new reference count.
1155 * @param pClass The class.
1156 */
1157DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1158{
1159 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1160 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1161 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1162 else if ( cRefs == 2
1163 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1164 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1165 return cRefs;
1166}
1167
1168
1169/**
1170 * Validates and retains a lock validator class.
1171 *
1172 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1173 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1174 */
1175DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1176{
1177 if (hClass == NIL_RTLOCKVALCLASS)
1178 return hClass;
1179 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1180 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1181 rtLockValidatorClassRetain(hClass);
1182 return hClass;
1183}
1184
1185
1186/**
1187 * Internal class releaser.
1188 * @returns The new reference count.
1189 * @param pClass The class.
1190 */
1191DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1192{
1193 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1194 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1195 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1196 else if (!cRefs)
1197 rtLockValidatorClassDestroy(pClass);
1198 return cRefs;
1199}
1200
1201
1202/**
1203 * Destroys a class once there are not more references to it.
1204 *
1205 * @param pClass The class.
1206 */
1207static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1208{
1209 AssertReturnVoid(!pClass->fInTree);
1210 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1211
1212 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1213 while (pChunk)
1214 {
1215 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1216 {
1217 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1218 if (pClass2 != NIL_RTLOCKVALCLASS)
1219 {
1220 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1221 rtLockValidatorClassRelease(pClass2);
1222 }
1223 }
1224
1225 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1226 pChunk->pNext = NULL;
1227 if (pChunk != &pClass->PriorLocks)
1228 RTMemFree(pChunk);
1229 pChunk = pNext;
1230 }
1231
1232 RTMemFree(pClass);
1233}
1234
1235
1236RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1237{
1238 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1239 rtLockValidatorLazyInit();
1240 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1241
1242 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1243 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1244 while (pClass)
1245 {
1246 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1247 break;
1248 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1249 }
1250
1251 if (RT_SUCCESS(rcLock))
1252 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1253 return pClass;
1254}
1255
1256
1257RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1258{
1259 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1260 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1261 if (hClass == NIL_RTLOCKVALCLASS)
1262 {
1263 /*
1264 * Create a new class and insert it into the tree.
1265 */
1266 va_list va;
1267 va_start(va, pszNameFmt);
1268 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1269 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1270 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1271 pszNameFmt, va);
1272 va_end(va);
1273 if (RT_SUCCESS(rc))
1274 {
1275 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1276 rtLockValidatorLazyInit();
1277 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1278
1279 Assert(!hClass->fInTree);
1280 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1281 Assert(hClass->fInTree);
1282
1283 if (RT_SUCCESS(rcLock))
1284 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1285 return hClass;
1286 }
1287 }
1288 return hClass;
1289}
1290
1291
1292RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1293{
1294 RTLOCKVALCLASSINT *pClass = hClass;
1295 AssertPtrReturn(pClass, UINT32_MAX);
1296 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1297 return rtLockValidatorClassRetain(pClass);
1298}
1299
1300
1301RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1302{
1303 RTLOCKVALCLASSINT *pClass = hClass;
1304 if (pClass == NIL_RTLOCKVALCLASS)
1305 return 0;
1306 AssertPtrReturn(pClass, UINT32_MAX);
1307 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1308 return rtLockValidatorClassRelease(pClass);
1309}
1310
1311
1312/**
1313 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1314 * all the chunks for @a pPriorClass.
1315 *
1316 * @returns true / false.
1317 * @param pClass The class to search.
1318 * @param pPriorClass The class to search for.
1319 */
1320static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1321{
1322 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1323 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1324 {
1325 if (pChunk->aRefs[i].hClass == pPriorClass)
1326 {
1327 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1328 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1329 {
1330 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1331 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1332 }
1333
1334 /* update the hash table entry. */
1335 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1336 if ( !(*ppHashEntry)
1337 || (*ppHashEntry)->cLookups + 128 < cLookups)
1338 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1339
1340#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1341 ASMAtomicIncU32(&pClass->cHashMisses);
1342#endif
1343 return true;
1344 }
1345 }
1346
1347 return false;
1348}
1349
1350
1351/**
1352 * Checks if @a pPriorClass is a known prior class.
1353 *
1354 * @returns true / false.
1355 * @param pClass The class to search.
1356 * @param pPriorClass The class to search for.
1357 */
1358DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1359{
1360 /*
1361 * Hash lookup here.
1362 */
1363 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1364 if ( pRef
1365 && pRef->hClass == pPriorClass)
1366 {
1367 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1368 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1369 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1370#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1371 ASMAtomicIncU32(&pClass->cHashHits);
1372#endif
1373 return true;
1374 }
1375
1376 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1377}
1378
1379
1380/**
1381 * Adds a class to the prior list.
1382 *
1383 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1384 * @param pClass The class to work on.
1385 * @param pPriorClass The class to add.
1386 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1387 * somebody is teaching us via the API (false).
1388 * @param pSrcPos Where this rule was added (optional).
1389 */
1390static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1391 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1392{
1393 NOREF(pSrcPos);
1394 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1395 rtLockValidatorLazyInit();
1396 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1397
1398 /*
1399 * Check that there are no conflict (no assert since we might race each other).
1400 */
1401 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1402 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1403 {
1404 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1405 {
1406 /*
1407 * Scan the table for a free entry, allocating a new chunk if necessary.
1408 */
1409 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1410 {
1411 bool fDone = false;
1412 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1413 {
1414 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1415 if (fDone)
1416 {
1417 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1418 rtLockValidatorClassRetain(pPriorClass);
1419 rc = VINF_SUCCESS;
1420 break;
1421 }
1422 }
1423 if (fDone)
1424 break;
1425
1426 /* If no more chunks, allocate a new one and insert the class before linking it. */
1427 if (!pChunk->pNext)
1428 {
1429 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1430 if (!pNew)
1431 {
1432 rc = VERR_NO_MEMORY;
1433 break;
1434 }
1435 pNew->pNext = NULL;
1436 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1437 {
1438 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1439 pNew->aRefs[i].cLookups = 0;
1440 pNew->aRefs[i].fAutodidacticism = false;
1441 pNew->aRefs[i].afReserved[0] = false;
1442 pNew->aRefs[i].afReserved[1] = false;
1443 pNew->aRefs[i].afReserved[2] = false;
1444 }
1445
1446 pNew->aRefs[0].hClass = pPriorClass;
1447 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1448
1449 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1450 rtLockValidatorClassRetain(pPriorClass);
1451 rc = VINF_SUCCESS;
1452 break;
1453 }
1454 } /* chunk loop */
1455 }
1456 else
1457 rc = VINF_SUCCESS;
1458 }
1459 else
1460 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1461
1462 if (RT_SUCCESS(rcLock))
1463 RTCritSectLeave(&g_LockValClassTeachCS);
1464 return rc;
1465}
1466
1467
1468RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1469{
1470 RTLOCKVALCLASSINT *pClass = hClass;
1471 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1472 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1473
1474 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1475 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1476 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1477
1478 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1479}
1480
1481
1482RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1483{
1484 RTLOCKVALCLASSINT *pClass = hClass;
1485 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1486 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1487
1488 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1489 return VINF_SUCCESS;
1490}
1491
1492
1493/**
1494 * Unlinks all siblings.
1495 *
1496 * This is used during record deletion and assumes no races.
1497 *
1498 * @param pCore One of the siblings.
1499 */
1500static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1501{
1502 /* ASSUMES sibling destruction doesn't involve any races and that all
1503 related records are to be disposed off now. */
1504 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1505 while (pSibling)
1506 {
1507 PRTLOCKVALRECUNION volatile *ppCoreNext;
1508 switch (pSibling->Core.u32Magic)
1509 {
1510 case RTLOCKVALRECEXCL_MAGIC:
1511 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1512 ppCoreNext = &pSibling->Excl.pSibling;
1513 break;
1514
1515 case RTLOCKVALRECSHRD_MAGIC:
1516 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1517 ppCoreNext = &pSibling->Shared.pSibling;
1518 break;
1519
1520 default:
1521 AssertFailed();
1522 ppCoreNext = NULL;
1523 break;
1524 }
1525 if (RT_UNLIKELY(ppCoreNext))
1526 break;
1527 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1528 }
1529}
1530
1531
1532RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1533{
1534 /*
1535 * Validate input.
1536 */
1537 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1538 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1539
1540 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1541 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1542 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1543 , VERR_SEM_LV_INVALID_PARAMETER);
1544
1545 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1546 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1547 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1548 , VERR_SEM_LV_INVALID_PARAMETER);
1549
1550 /*
1551 * Link them (circular list).
1552 */
1553 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1554 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1555 {
1556 p1->Excl.pSibling = p2;
1557 p2->Shared.pSibling = p1;
1558 }
1559 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1560 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1561 {
1562 p1->Shared.pSibling = p2;
1563 p2->Excl.pSibling = p1;
1564 }
1565 else
1566 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1567
1568 return VINF_SUCCESS;
1569}
1570
1571
1572#if 0 /* unused */
1573/**
1574 * Gets the lock name for the given record.
1575 *
1576 * @returns Read-only lock name.
1577 * @param pRec The lock record.
1578 */
1579DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1580{
1581 switch (pRec->Core.u32Magic)
1582 {
1583 case RTLOCKVALRECEXCL_MAGIC:
1584 return pRec->Excl.szName;
1585 case RTLOCKVALRECSHRD_MAGIC:
1586 return pRec->Shared.szName;
1587 case RTLOCKVALRECSHRDOWN_MAGIC:
1588 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1589 case RTLOCKVALRECNEST_MAGIC:
1590 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1591 if (VALID_PTR(pRec))
1592 {
1593 switch (pRec->Core.u32Magic)
1594 {
1595 case RTLOCKVALRECEXCL_MAGIC:
1596 return pRec->Excl.szName;
1597 case RTLOCKVALRECSHRD_MAGIC:
1598 return pRec->Shared.szName;
1599 case RTLOCKVALRECSHRDOWN_MAGIC:
1600 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1601 default:
1602 return "unknown-nested";
1603 }
1604 }
1605 return "orphaned-nested";
1606 default:
1607 return "unknown";
1608 }
1609}
1610#endif /* unused */
1611
1612
1613#if 0 /* unused */
1614/**
1615 * Gets the class for this locking record.
1616 *
1617 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1618 * @param pRec The lock validator record.
1619 */
1620DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1621{
1622 switch (pRec->Core.u32Magic)
1623 {
1624 case RTLOCKVALRECEXCL_MAGIC:
1625 return pRec->Excl.hClass;
1626
1627 case RTLOCKVALRECSHRD_MAGIC:
1628 return pRec->Shared.hClass;
1629
1630 case RTLOCKVALRECSHRDOWN_MAGIC:
1631 {
1632 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1633 if (RT_LIKELY( VALID_PTR(pSharedRec)
1634 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1635 return pSharedRec->hClass;
1636 return NIL_RTLOCKVALCLASS;
1637 }
1638
1639 case RTLOCKVALRECNEST_MAGIC:
1640 {
1641 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1642 if (VALID_PTR(pRealRec))
1643 {
1644 switch (pRealRec->Core.u32Magic)
1645 {
1646 case RTLOCKVALRECEXCL_MAGIC:
1647 return pRealRec->Excl.hClass;
1648
1649 case RTLOCKVALRECSHRDOWN_MAGIC:
1650 {
1651 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1652 if (RT_LIKELY( VALID_PTR(pSharedRec)
1653 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1654 return pSharedRec->hClass;
1655 break;
1656 }
1657
1658 default:
1659 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1660 break;
1661 }
1662 }
1663 return NIL_RTLOCKVALCLASS;
1664 }
1665
1666 default:
1667 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1668 return NIL_RTLOCKVALCLASS;
1669 }
1670}
1671#endif /* unused */
1672
1673/**
1674 * Gets the class for this locking record and the pointer to the one below it in
1675 * the stack.
1676 *
1677 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1678 * @param pRec The lock validator record.
1679 * @param puSubClass Where to return the sub-class.
1680 * @param ppDown Where to return the pointer to the record below.
1681 */
1682DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1683rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1684{
1685 switch (pRec->Core.u32Magic)
1686 {
1687 case RTLOCKVALRECEXCL_MAGIC:
1688 *ppDown = pRec->Excl.pDown;
1689 *puSubClass = pRec->Excl.uSubClass;
1690 return pRec->Excl.hClass;
1691
1692 case RTLOCKVALRECSHRD_MAGIC:
1693 *ppDown = NULL;
1694 *puSubClass = pRec->Shared.uSubClass;
1695 return pRec->Shared.hClass;
1696
1697 case RTLOCKVALRECSHRDOWN_MAGIC:
1698 {
1699 *ppDown = pRec->ShrdOwner.pDown;
1700
1701 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1702 if (RT_LIKELY( VALID_PTR(pSharedRec)
1703 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1704 {
1705 *puSubClass = pSharedRec->uSubClass;
1706 return pSharedRec->hClass;
1707 }
1708 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1709 return NIL_RTLOCKVALCLASS;
1710 }
1711
1712 case RTLOCKVALRECNEST_MAGIC:
1713 {
1714 *ppDown = pRec->Nest.pDown;
1715
1716 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1717 if (VALID_PTR(pRealRec))
1718 {
1719 switch (pRealRec->Core.u32Magic)
1720 {
1721 case RTLOCKVALRECEXCL_MAGIC:
1722 *puSubClass = pRealRec->Excl.uSubClass;
1723 return pRealRec->Excl.hClass;
1724
1725 case RTLOCKVALRECSHRDOWN_MAGIC:
1726 {
1727 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1728 if (RT_LIKELY( VALID_PTR(pSharedRec)
1729 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1730 {
1731 *puSubClass = pSharedRec->uSubClass;
1732 return pSharedRec->hClass;
1733 }
1734 break;
1735 }
1736
1737 default:
1738 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1739 break;
1740 }
1741 }
1742 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1743 return NIL_RTLOCKVALCLASS;
1744 }
1745
1746 default:
1747 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1748 *ppDown = NULL;
1749 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1750 return NIL_RTLOCKVALCLASS;
1751 }
1752}
1753
1754
1755/**
1756 * Gets the sub-class for a lock record.
1757 *
1758 * @returns the sub-class.
1759 * @param pRec The lock validator record.
1760 */
1761DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1762{
1763 switch (pRec->Core.u32Magic)
1764 {
1765 case RTLOCKVALRECEXCL_MAGIC:
1766 return pRec->Excl.uSubClass;
1767
1768 case RTLOCKVALRECSHRD_MAGIC:
1769 return pRec->Shared.uSubClass;
1770
1771 case RTLOCKVALRECSHRDOWN_MAGIC:
1772 {
1773 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1774 if (RT_LIKELY( VALID_PTR(pSharedRec)
1775 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1776 return pSharedRec->uSubClass;
1777 return RTLOCKVAL_SUB_CLASS_NONE;
1778 }
1779
1780 case RTLOCKVALRECNEST_MAGIC:
1781 {
1782 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1783 if (VALID_PTR(pRealRec))
1784 {
1785 switch (pRealRec->Core.u32Magic)
1786 {
1787 case RTLOCKVALRECEXCL_MAGIC:
1788 return pRec->Excl.uSubClass;
1789
1790 case RTLOCKVALRECSHRDOWN_MAGIC:
1791 {
1792 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1793 if (RT_LIKELY( VALID_PTR(pSharedRec)
1794 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1795 return pSharedRec->uSubClass;
1796 break;
1797 }
1798
1799 default:
1800 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1801 break;
1802 }
1803 }
1804 return RTLOCKVAL_SUB_CLASS_NONE;
1805 }
1806
1807 default:
1808 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1809 return RTLOCKVAL_SUB_CLASS_NONE;
1810 }
1811}
1812
1813
1814
1815
1816/**
1817 * Calculates the depth of a lock stack.
1818 *
1819 * @returns Number of stack frames.
1820 * @param pThread The thread.
1821 */
1822static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1823{
1824 uint32_t cEntries = 0;
1825 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1826 while (VALID_PTR(pCur))
1827 {
1828 switch (pCur->Core.u32Magic)
1829 {
1830 case RTLOCKVALRECEXCL_MAGIC:
1831 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1832 break;
1833
1834 case RTLOCKVALRECSHRDOWN_MAGIC:
1835 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1836 break;
1837
1838 case RTLOCKVALRECNEST_MAGIC:
1839 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1840 break;
1841
1842 default:
1843 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1844 }
1845 cEntries++;
1846 }
1847 return cEntries;
1848}
1849
1850
1851#ifdef RT_STRICT
1852/**
1853 * Checks if the stack contains @a pRec.
1854 *
1855 * @returns true / false.
1856 * @param pThreadSelf The current thread.
1857 * @param pRec The lock record.
1858 */
1859static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1860{
1861 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1862 while (pCur)
1863 {
1864 AssertPtrReturn(pCur, false);
1865 if (pCur == pRec)
1866 return true;
1867 switch (pCur->Core.u32Magic)
1868 {
1869 case RTLOCKVALRECEXCL_MAGIC:
1870 Assert(pCur->Excl.cRecursion >= 1);
1871 pCur = pCur->Excl.pDown;
1872 break;
1873
1874 case RTLOCKVALRECSHRDOWN_MAGIC:
1875 Assert(pCur->ShrdOwner.cRecursion >= 1);
1876 pCur = pCur->ShrdOwner.pDown;
1877 break;
1878
1879 case RTLOCKVALRECNEST_MAGIC:
1880 Assert(pCur->Nest.cRecursion > 1);
1881 pCur = pCur->Nest.pDown;
1882 break;
1883
1884 default:
1885 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1886 }
1887 }
1888 return false;
1889}
1890#endif /* RT_STRICT */
1891
1892
1893/**
1894 * Pushes a lock record onto the stack.
1895 *
1896 * @param pThreadSelf The current thread.
1897 * @param pRec The lock record.
1898 */
1899static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1900{
1901 Assert(pThreadSelf == RTThreadSelf());
1902 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1903
1904 switch (pRec->Core.u32Magic)
1905 {
1906 case RTLOCKVALRECEXCL_MAGIC:
1907 Assert(pRec->Excl.cRecursion == 1);
1908 Assert(pRec->Excl.pDown == NULL);
1909 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1910 break;
1911
1912 case RTLOCKVALRECSHRDOWN_MAGIC:
1913 Assert(pRec->ShrdOwner.cRecursion == 1);
1914 Assert(pRec->ShrdOwner.pDown == NULL);
1915 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1916 break;
1917
1918 default:
1919 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1920 }
1921 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1922}
1923
1924
1925/**
1926 * Pops a lock record off the stack.
1927 *
1928 * @param pThreadSelf The current thread.
1929 * @param pRec The lock.
1930 */
1931static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1932{
1933 Assert(pThreadSelf == RTThreadSelf());
1934
1935 PRTLOCKVALRECUNION pDown;
1936 switch (pRec->Core.u32Magic)
1937 {
1938 case RTLOCKVALRECEXCL_MAGIC:
1939 Assert(pRec->Excl.cRecursion == 0);
1940 pDown = pRec->Excl.pDown;
1941 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1942 break;
1943
1944 case RTLOCKVALRECSHRDOWN_MAGIC:
1945 Assert(pRec->ShrdOwner.cRecursion == 0);
1946 pDown = pRec->ShrdOwner.pDown;
1947 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1948 break;
1949
1950 default:
1951 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1952 }
1953 if (pThreadSelf->LockValidator.pStackTop == pRec)
1954 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1955 else
1956 {
1957 /* Find the pointer to our record and unlink ourselves. */
1958 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1959 while (pCur)
1960 {
1961 PRTLOCKVALRECUNION volatile *ppDown;
1962 switch (pCur->Core.u32Magic)
1963 {
1964 case RTLOCKVALRECEXCL_MAGIC:
1965 Assert(pCur->Excl.cRecursion >= 1);
1966 ppDown = &pCur->Excl.pDown;
1967 break;
1968
1969 case RTLOCKVALRECSHRDOWN_MAGIC:
1970 Assert(pCur->ShrdOwner.cRecursion >= 1);
1971 ppDown = &pCur->ShrdOwner.pDown;
1972 break;
1973
1974 case RTLOCKVALRECNEST_MAGIC:
1975 Assert(pCur->Nest.cRecursion >= 1);
1976 ppDown = &pCur->Nest.pDown;
1977 break;
1978
1979 default:
1980 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1981 }
1982 pCur = *ppDown;
1983 if (pCur == pRec)
1984 {
1985 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1986 return;
1987 }
1988 }
1989 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1990 }
1991}
1992
1993
1994/**
1995 * Creates and pushes lock recursion record onto the stack.
1996 *
1997 * @param pThreadSelf The current thread.
1998 * @param pRec The lock record.
1999 * @param pSrcPos Where the recursion occurred.
2000 */
2001static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2002{
2003 Assert(pThreadSelf == RTThreadSelf());
2004 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2005
2006#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2007 /*
2008 * Allocate a new recursion record
2009 */
2010 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2011 if (pRecursionRec)
2012 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2013 else
2014 {
2015 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2016 if (!pRecursionRec)
2017 return;
2018 }
2019
2020 /*
2021 * Initialize it.
2022 */
2023 switch (pRec->Core.u32Magic)
2024 {
2025 case RTLOCKVALRECEXCL_MAGIC:
2026 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2027 break;
2028
2029 case RTLOCKVALRECSHRDOWN_MAGIC:
2030 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2031 break;
2032
2033 default:
2034 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2035 rtLockValidatorSerializeDestructEnter();
2036 rtLockValidatorSerializeDestructLeave();
2037 RTMemFree(pRecursionRec);
2038 return;
2039 }
2040 Assert(pRecursionRec->cRecursion > 1);
2041 pRecursionRec->pRec = pRec;
2042 pRecursionRec->pDown = NULL;
2043 pRecursionRec->pNextFree = NULL;
2044 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2045 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2046
2047 /*
2048 * Link it.
2049 */
2050 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2051 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2052#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2053}
2054
2055
2056/**
2057 * Pops a lock recursion record off the stack.
2058 *
2059 * @param pThreadSelf The current thread.
2060 * @param pRec The lock record.
2061 */
2062static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2063{
2064 Assert(pThreadSelf == RTThreadSelf());
2065 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2066
2067 uint32_t cRecursion;
2068 switch (pRec->Core.u32Magic)
2069 {
2070 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2071 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2072 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2073 }
2074 Assert(cRecursion >= 1);
2075
2076#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2077 /*
2078 * Pop the recursion record.
2079 */
2080 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2081 if ( pNest != NULL
2082 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2083 && pNest->Nest.pRec == pRec
2084 )
2085 {
2086 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2087 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2088 }
2089 else
2090 {
2091 /* Find the record above ours. */
2092 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2093 for (;;)
2094 {
2095 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2096 switch (pNest->Core.u32Magic)
2097 {
2098 case RTLOCKVALRECEXCL_MAGIC:
2099 ppDown = &pNest->Excl.pDown;
2100 pNest = *ppDown;
2101 continue;
2102 case RTLOCKVALRECSHRDOWN_MAGIC:
2103 ppDown = &pNest->ShrdOwner.pDown;
2104 pNest = *ppDown;
2105 continue;
2106 case RTLOCKVALRECNEST_MAGIC:
2107 if (pNest->Nest.pRec == pRec)
2108 break;
2109 ppDown = &pNest->Nest.pDown;
2110 pNest = *ppDown;
2111 continue;
2112 default:
2113 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2114 }
2115 break; /* ugly */
2116 }
2117 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2118 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2119 }
2120
2121 /*
2122 * Invalidate and free the record.
2123 */
2124 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2125 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2126 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2127 pNest->Nest.cRecursion = 0;
2128 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2129 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2130#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2131}
2132
2133
2134/**
2135 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2136 * returns VERR_SEM_LV_WRONG_ORDER.
2137 */
2138static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2139 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2140 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2141
2142
2143{
2144 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2145 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2146 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2147 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2148 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2149 rtLockValComplainPanic();
2150 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2151}
2152
2153
2154/**
2155 * Checks if the sub-class order is ok or not.
2156 *
2157 * Used to deal with two locks from the same class.
2158 *
2159 * @returns true if ok, false if not.
2160 * @param uSubClass1 The sub-class of the lock that is being
2161 * considered.
2162 * @param uSubClass2 The sub-class of the lock that is already being
2163 * held.
2164 */
2165DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2166{
2167 if (uSubClass1 > uSubClass2)
2168 {
2169 /* NONE kills ANY. */
2170 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2171 return false;
2172 return true;
2173 }
2174
2175 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2176 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2177 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2178 return true;
2179 return false;
2180}
2181
2182
2183/**
2184 * Checks if the class and sub-class lock order is ok.
2185 *
2186 * @returns true if ok, false if not.
2187 * @param pClass1 The class of the lock that is being considered.
2188 * @param uSubClass1 The sub-class that goes with @a pClass1.
2189 * @param pClass2 The class of the lock that is already being
2190 * held.
2191 * @param uSubClass2 The sub-class that goes with @a pClass2.
2192 */
2193DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2194 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2195{
2196 if (pClass1 == pClass2)
2197 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2198 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2199}
2200
2201
2202/**
2203 * Checks the locking order, part two.
2204 *
2205 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2206 * @param pClass The lock class.
2207 * @param uSubClass The lock sub-class.
2208 * @param pThreadSelf The current thread.
2209 * @param pRec The lock record.
2210 * @param pSrcPos The source position of the locking operation.
2211 * @param pFirstBadClass The first bad class.
2212 * @param pFirstBadRec The first bad lock record.
2213 * @param pFirstBadDown The next record on the lock stack.
2214 */
2215static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2216 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2217 PCRTLOCKVALSRCPOS const pSrcPos,
2218 RTLOCKVALCLASSINT * const pFirstBadClass,
2219 PRTLOCKVALRECUNION const pFirstBadRec,
2220 PRTLOCKVALRECUNION const pFirstBadDown)
2221{
2222 /*
2223 * Something went wrong, pCur is pointing to where.
2224 */
2225 if ( pClass == pFirstBadClass
2226 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2227 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2228 pRec, pFirstBadRec, pClass, pFirstBadClass);
2229 if (!pClass->fAutodidact)
2230 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2231 pRec, pFirstBadRec, pClass, pFirstBadClass);
2232
2233 /*
2234 * This class is an autodidact, so we have to check out the rest of the stack
2235 * for direct violations.
2236 */
2237 uint32_t cNewRules = 1;
2238 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2239 while (pCur)
2240 {
2241 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2242
2243 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2244 pCur = pCur->Nest.pDown;
2245 else
2246 {
2247 PRTLOCKVALRECUNION pDown;
2248 uint32_t uPriorSubClass;
2249 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2250 if (pPriorClass != NIL_RTLOCKVALCLASS)
2251 {
2252 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2253 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2254 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2255 {
2256 if ( pClass == pPriorClass
2257 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2258 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2259 pRec, pCur, pClass, pPriorClass);
2260 cNewRules++;
2261 }
2262 }
2263 pCur = pDown;
2264 }
2265 }
2266
2267 if (cNewRules == 1)
2268 {
2269 /*
2270 * Special case the simple operation, hoping that it will be a
2271 * frequent case.
2272 */
2273 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2274 if (rc == VERR_SEM_LV_WRONG_ORDER)
2275 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2276 pRec, pFirstBadRec, pClass, pFirstBadClass);
2277 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2278 }
2279 else
2280 {
2281 /*
2282 * We may be adding more than one rule, so we have to take the lock
2283 * before starting to add the rules. This means we have to check
2284 * the state after taking it since we might be racing someone adding
2285 * a conflicting rule.
2286 */
2287 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2288 rtLockValidatorLazyInit();
2289 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2290
2291 /* Check */
2292 pCur = pFirstBadRec;
2293 while (pCur)
2294 {
2295 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2296 pCur = pCur->Nest.pDown;
2297 else
2298 {
2299 uint32_t uPriorSubClass;
2300 PRTLOCKVALRECUNION pDown;
2301 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2302 if (pPriorClass != NIL_RTLOCKVALCLASS)
2303 {
2304 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2305 {
2306 if ( pClass == pPriorClass
2307 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2308 {
2309 if (RT_SUCCESS(rcLock))
2310 RTCritSectLeave(&g_LockValClassTeachCS);
2311 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2312 pRec, pCur, pClass, pPriorClass);
2313 }
2314 }
2315 }
2316 pCur = pDown;
2317 }
2318 }
2319
2320 /* Iterate the stack yet again, adding new rules this time. */
2321 pCur = pFirstBadRec;
2322 while (pCur)
2323 {
2324 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2325 pCur = pCur->Nest.pDown;
2326 else
2327 {
2328 uint32_t uPriorSubClass;
2329 PRTLOCKVALRECUNION pDown;
2330 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2331 if (pPriorClass != NIL_RTLOCKVALCLASS)
2332 {
2333 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2334 {
2335 Assert( pClass != pPriorClass
2336 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2337 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2338 if (RT_FAILURE(rc))
2339 {
2340 Assert(rc == VERR_NO_MEMORY);
2341 break;
2342 }
2343 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2344 }
2345 }
2346 pCur = pDown;
2347 }
2348 }
2349
2350 if (RT_SUCCESS(rcLock))
2351 RTCritSectLeave(&g_LockValClassTeachCS);
2352 }
2353
2354 return VINF_SUCCESS;
2355}
2356
2357
2358
2359/**
2360 * Checks the locking order.
2361 *
2362 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2363 * @param pClass The lock class.
2364 * @param uSubClass The lock sub-class.
2365 * @param pThreadSelf The current thread.
2366 * @param pRec The lock record.
2367 * @param pSrcPos The source position of the locking operation.
2368 */
2369static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2370 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2371 PCRTLOCKVALSRCPOS pSrcPos)
2372{
2373 /*
2374 * Some internal paranoia first.
2375 */
2376 AssertPtr(pClass);
2377 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2378 AssertPtr(pThreadSelf);
2379 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2380 AssertPtr(pRec);
2381 AssertPtrNull(pSrcPos);
2382
2383 /*
2384 * Walk the stack, delegate problems to a worker routine.
2385 */
2386 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2387 if (!pCur)
2388 return VINF_SUCCESS;
2389
2390 for (;;)
2391 {
2392 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2393
2394 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2395 pCur = pCur->Nest.pDown;
2396 else
2397 {
2398 uint32_t uPriorSubClass;
2399 PRTLOCKVALRECUNION pDown;
2400 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2401 if (pPriorClass != NIL_RTLOCKVALCLASS)
2402 {
2403 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2404 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2405 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2406 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2407 pPriorClass, pCur, pDown);
2408 }
2409 pCur = pDown;
2410 }
2411 if (!pCur)
2412 return VINF_SUCCESS;
2413 }
2414}
2415
2416
2417/**
2418 * Check that the lock record is the topmost one on the stack, complain and fail
2419 * if it isn't.
2420 *
2421 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2422 * VERR_SEM_LV_INVALID_PARAMETER.
2423 * @param pThreadSelf The current thread.
2424 * @param pRec The record.
2425 */
2426static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2427{
2428 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2429 Assert(pThreadSelf == RTThreadSelf());
2430
2431 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2432 if (RT_LIKELY( pTop == pRec
2433 || ( pTop
2434 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2435 && pTop->Nest.pRec == pRec) ))
2436 return VINF_SUCCESS;
2437
2438#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2439 /* Look for a recursion record so the right frame is dumped and marked. */
2440 while (pTop)
2441 {
2442 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2443 {
2444 if (pTop->Nest.pRec == pRec)
2445 {
2446 pRec = pTop;
2447 break;
2448 }
2449 pTop = pTop->Nest.pDown;
2450 }
2451 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2452 pTop = pTop->Excl.pDown;
2453 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2454 pTop = pTop->ShrdOwner.pDown;
2455 else
2456 break;
2457 }
2458#endif
2459
2460 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2461 rtLockValComplainPanic();
2462 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Checks if all owners are blocked - shared record operated in signaller mode.
2468 *
2469 * @returns true / false accordingly.
2470 * @param pRec The record.
2471 * @param pThreadSelf The current thread.
2472 */
2473DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2474{
2475 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2476 uint32_t cAllocated = pRec->cAllocated;
2477 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2478 if (cEntries == 0)
2479 return false;
2480
2481 for (uint32_t i = 0; i < cAllocated; i++)
2482 {
2483 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2484 if ( pEntry
2485 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2486 {
2487 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2488 if (!pCurThread)
2489 return false;
2490 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2491 return false;
2492 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2493 && pCurThread != pThreadSelf)
2494 return false;
2495 if (--cEntries == 0)
2496 break;
2497 }
2498 else
2499 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2500 }
2501
2502 return true;
2503}
2504
2505
2506/**
2507 * Verifies the deadlock stack before calling it a deadlock.
2508 *
2509 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2510 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2511 * @retval VERR_TRY_AGAIN if something changed.
2512 *
2513 * @param pStack The deadlock detection stack.
2514 * @param pThreadSelf The current thread.
2515 */
2516static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2517{
2518 uint32_t const c = pStack->c;
2519 for (uint32_t iPass = 0; iPass < 3; iPass++)
2520 {
2521 for (uint32_t i = 1; i < c; i++)
2522 {
2523 PRTTHREADINT pThread = pStack->a[i].pThread;
2524 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2525 return VERR_TRY_AGAIN;
2526 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2527 return VERR_TRY_AGAIN;
2528 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2529 return VERR_TRY_AGAIN;
2530 /* ASSUMES the signaller records won't have siblings! */
2531 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2532 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2533 && pRec->Shared.fSignaller
2534 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2535 return VERR_TRY_AGAIN;
2536 }
2537 RTThreadYield();
2538 }
2539
2540 if (c == 1)
2541 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2542 return VERR_SEM_LV_DEADLOCK;
2543}
2544
2545
2546/**
2547 * Checks for stack cycles caused by another deadlock before returning.
2548 *
2549 * @retval VINF_SUCCESS if the stack is simply too small.
2550 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2551 *
2552 * @param pStack The deadlock detection stack.
2553 */
2554static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2555{
2556 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2557 {
2558 PRTTHREADINT pThread = pStack->a[i].pThread;
2559 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2560 if (pStack->a[j].pThread == pThread)
2561 return VERR_SEM_LV_EXISTING_DEADLOCK;
2562 }
2563 static bool volatile s_fComplained = false;
2564 if (!s_fComplained)
2565 {
2566 s_fComplained = true;
2567 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2568 }
2569 return VINF_SUCCESS;
2570}
2571
2572
2573/**
2574 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2575 * detection.
2576 *
2577 * @retval VINF_SUCCESS
2578 * @retval VERR_SEM_LV_DEADLOCK
2579 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2580 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2581 * @retval VERR_TRY_AGAIN
2582 *
2583 * @param pStack The stack to use.
2584 * @param pOriginalRec The original record.
2585 * @param pThreadSelf The calling thread.
2586 */
2587static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2588 PRTTHREADINT const pThreadSelf)
2589{
2590 pStack->c = 0;
2591
2592 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2593 compiler may make a better job of it when using individual variables. */
2594 PRTLOCKVALRECUNION pRec = pOriginalRec;
2595 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2596 uint32_t iEntry = UINT32_MAX;
2597 PRTTHREADINT pThread = NIL_RTTHREAD;
2598 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2599 for (uint32_t iLoop = 0; ; iLoop++)
2600 {
2601 /*
2602 * Process the current record.
2603 */
2604 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2605
2606 /* Find the next relevant owner thread and record. */
2607 PRTLOCKVALRECUNION pNextRec = NULL;
2608 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2609 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2610 switch (pRec->Core.u32Magic)
2611 {
2612 case RTLOCKVALRECEXCL_MAGIC:
2613 Assert(iEntry == UINT32_MAX);
2614 for (;;)
2615 {
2616 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2617 if ( !pNextThread
2618 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2619 break;
2620 enmNextState = rtThreadGetState(pNextThread);
2621 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2622 && pNextThread != pThreadSelf)
2623 break;
2624 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2625 if (RT_LIKELY( !pNextRec
2626 || enmNextState == rtThreadGetState(pNextThread)))
2627 break;
2628 pNextRec = NULL;
2629 }
2630 if (!pNextRec)
2631 {
2632 pRec = pRec->Excl.pSibling;
2633 if ( pRec
2634 && pRec != pFirstSibling)
2635 continue;
2636 pNextThread = NIL_RTTHREAD;
2637 }
2638 break;
2639
2640 case RTLOCKVALRECSHRD_MAGIC:
2641 if (!pRec->Shared.fSignaller)
2642 {
2643 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2644 /** @todo The read side of a read-write lock is problematic if
2645 * the implementation prioritizes writers over readers because
2646 * that means we should could deadlock against current readers
2647 * if a writer showed up. If the RW sem implementation is
2648 * wrapping some native API, it's not so easy to detect when we
2649 * should do this and when we shouldn't. Checking when we
2650 * shouldn't is subject to wakeup scheduling and cannot easily
2651 * be made reliable.
2652 *
2653 * At the moment we circumvent all this mess by declaring that
2654 * readers has priority. This is TRUE on linux, but probably
2655 * isn't on Solaris and FreeBSD. */
2656 if ( pRec == pFirstSibling
2657 && pRec->Shared.pSibling != NULL
2658 && pRec->Shared.pSibling != pFirstSibling)
2659 {
2660 pRec = pRec->Shared.pSibling;
2661 Assert(iEntry == UINT32_MAX);
2662 continue;
2663 }
2664 }
2665
2666 /* Scan the owner table for blocked owners. */
2667 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2668 && ( !pRec->Shared.fSignaller
2669 || iEntry != UINT32_MAX
2670 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2671 )
2672 )
2673 {
2674 uint32_t cAllocated = pRec->Shared.cAllocated;
2675 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2676 while (++iEntry < cAllocated)
2677 {
2678 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2679 if (pEntry)
2680 {
2681 for (;;)
2682 {
2683 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2684 break;
2685 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2686 if ( !pNextThread
2687 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2688 break;
2689 enmNextState = rtThreadGetState(pNextThread);
2690 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2691 && pNextThread != pThreadSelf)
2692 break;
2693 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2694 if (RT_LIKELY( !pNextRec
2695 || enmNextState == rtThreadGetState(pNextThread)))
2696 break;
2697 pNextRec = NULL;
2698 }
2699 if (pNextRec)
2700 break;
2701 }
2702 else
2703 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2704 }
2705 if (pNextRec)
2706 break;
2707 pNextThread = NIL_RTTHREAD;
2708 }
2709
2710 /* Advance to the next sibling, if any. */
2711 pRec = pRec->Shared.pSibling;
2712 if ( pRec != NULL
2713 && pRec != pFirstSibling)
2714 {
2715 iEntry = UINT32_MAX;
2716 continue;
2717 }
2718 break;
2719
2720 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2721 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2722 break;
2723
2724 case RTLOCKVALRECSHRDOWN_MAGIC:
2725 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2726 default:
2727 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2728 break;
2729 }
2730
2731 if (pNextRec)
2732 {
2733 /*
2734 * Recurse and check for deadlock.
2735 */
2736 uint32_t i = pStack->c;
2737 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2738 return rtLockValidatorDdHandleStackOverflow(pStack);
2739
2740 pStack->c++;
2741 pStack->a[i].pRec = pRec;
2742 pStack->a[i].iEntry = iEntry;
2743 pStack->a[i].enmState = enmState;
2744 pStack->a[i].pThread = pThread;
2745 pStack->a[i].pFirstSibling = pFirstSibling;
2746
2747 if (RT_UNLIKELY( pNextThread == pThreadSelf
2748 && ( i != 0
2749 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2750 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2751 )
2752 )
2753 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2754
2755 pRec = pNextRec;
2756 pFirstSibling = pNextRec;
2757 iEntry = UINT32_MAX;
2758 enmState = enmNextState;
2759 pThread = pNextThread;
2760 }
2761 else
2762 {
2763 /*
2764 * No deadlock here, unwind the stack and deal with any unfinished
2765 * business there.
2766 */
2767 uint32_t i = pStack->c;
2768 for (;;)
2769 {
2770 /* pop */
2771 if (i == 0)
2772 return VINF_SUCCESS;
2773 i--;
2774 pRec = pStack->a[i].pRec;
2775 iEntry = pStack->a[i].iEntry;
2776
2777 /* Examine it. */
2778 uint32_t u32Magic = pRec->Core.u32Magic;
2779 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2780 pRec = pRec->Excl.pSibling;
2781 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2782 {
2783 if (iEntry + 1 < pRec->Shared.cAllocated)
2784 break; /* continue processing this record. */
2785 pRec = pRec->Shared.pSibling;
2786 }
2787 else
2788 {
2789 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2790 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2791 continue;
2792 }
2793
2794 /* Any next record to advance to? */
2795 if ( !pRec
2796 || pRec == pStack->a[i].pFirstSibling)
2797 continue;
2798 iEntry = UINT32_MAX;
2799 break;
2800 }
2801
2802 /* Restore the rest of the state and update the stack. */
2803 pFirstSibling = pStack->a[i].pFirstSibling;
2804 enmState = pStack->a[i].enmState;
2805 pThread = pStack->a[i].pThread;
2806 pStack->c = i;
2807 }
2808
2809 Assert(iLoop != 1000000);
2810 }
2811}
2812
2813
2814/**
2815 * Check for the simple no-deadlock case.
2816 *
2817 * @returns true if no deadlock, false if further investigation is required.
2818 *
2819 * @param pOriginalRec The original record.
2820 */
2821DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2822{
2823 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2824 && !pOriginalRec->Excl.pSibling)
2825 {
2826 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2827 if ( !pThread
2828 || pThread->u32Magic != RTTHREADINT_MAGIC)
2829 return true;
2830 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2831 if (!RTTHREAD_IS_SLEEPING(enmState))
2832 return true;
2833 }
2834 return false;
2835}
2836
2837
2838/**
2839 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2840 *
2841 * @param pStack The chain of locks causing the deadlock.
2842 * @param pRec The record relating to the current thread's lock
2843 * operation.
2844 * @param pThreadSelf This thread.
2845 * @param pSrcPos Where we are going to deadlock.
2846 * @param rc The return code.
2847 */
2848static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2849 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2850{
2851 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2852 {
2853 const char *pszWhat;
2854 switch (rc)
2855 {
2856 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2857 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2858 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2859 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2860 }
2861 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2862 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2863 for (uint32_t i = 0; i < pStack->c; i++)
2864 {
2865 char szPrefix[24];
2866 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2867 PRTLOCKVALRECUNION pShrdOwner = NULL;
2868 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2869 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2870 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2871 {
2872 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2873 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2874 }
2875 else
2876 {
2877 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2878 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2879 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2880 }
2881 }
2882 rtLockValComplainMore("---- end of deadlock chain ----\n");
2883 }
2884
2885 rtLockValComplainPanic();
2886}
2887
2888
2889/**
2890 * Perform deadlock detection.
2891 *
2892 * @retval VINF_SUCCESS
2893 * @retval VERR_SEM_LV_DEADLOCK
2894 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2895 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2896 *
2897 * @param pRec The record relating to the current thread's lock
2898 * operation.
2899 * @param pThreadSelf The current thread.
2900 * @param pSrcPos The position of the current lock operation.
2901 */
2902static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2903{
2904 RTLOCKVALDDSTACK Stack;
2905 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2906 if (RT_SUCCESS(rc))
2907 return VINF_SUCCESS;
2908
2909 if (rc == VERR_TRY_AGAIN)
2910 {
2911 for (uint32_t iLoop = 0; ; iLoop++)
2912 {
2913 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2914 if (RT_SUCCESS_NP(rc))
2915 return VINF_SUCCESS;
2916 if (rc != VERR_TRY_AGAIN)
2917 break;
2918 RTThreadYield();
2919 if (iLoop >= 3)
2920 return VINF_SUCCESS;
2921 }
2922 }
2923
2924 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2925 return rc;
2926}
2927
2928
2929RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2930 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2931{
2932 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2933 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2934 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2935 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2936 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2937
2938 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2939 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2940 pRec->afReserved[0] = 0;
2941 pRec->afReserved[1] = 0;
2942 pRec->afReserved[2] = 0;
2943 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2944 pRec->hThread = NIL_RTTHREAD;
2945 pRec->pDown = NULL;
2946 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2947 pRec->uSubClass = uSubClass;
2948 pRec->cRecursion = 0;
2949 pRec->hLock = hLock;
2950 pRec->pSibling = NULL;
2951 if (pszNameFmt)
2952 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2953 else
2954 {
2955 static uint32_t volatile s_cAnonymous = 0;
2956 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2957 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2958 }
2959
2960 /* Lazy initialization. */
2961 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2962 rtLockValidatorLazyInit();
2963}
2964
2965
2966RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2967 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2968{
2969 va_list va;
2970 va_start(va, pszNameFmt);
2971 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2972 va_end(va);
2973}
2974
2975
2976RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2977 uint32_t uSubClass, void *pvLock, bool fEnabled,
2978 const char *pszNameFmt, va_list va)
2979{
2980 PRTLOCKVALRECEXCL pRec;
2981 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2982 if (!pRec)
2983 return VERR_NO_MEMORY;
2984 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2985 return VINF_SUCCESS;
2986}
2987
2988
2989RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2990 uint32_t uSubClass, void *pvLock, bool fEnabled,
2991 const char *pszNameFmt, ...)
2992{
2993 va_list va;
2994 va_start(va, pszNameFmt);
2995 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2996 va_end(va);
2997 return rc;
2998}
2999
3000
3001RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3002{
3003 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3004
3005 rtLockValidatorSerializeDestructEnter();
3006
3007 /** @todo Check that it's not on our stack first. Need to make it
3008 * configurable whether deleting a owned lock is acceptable? */
3009
3010 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3011 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3012 RTLOCKVALCLASS hClass;
3013 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3014 if (pRec->pSibling)
3015 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3016 rtLockValidatorSerializeDestructLeave();
3017 if (hClass != NIL_RTLOCKVALCLASS)
3018 RTLockValidatorClassRelease(hClass);
3019}
3020
3021
3022RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3023{
3024 PRTLOCKVALRECEXCL pRec = *ppRec;
3025 *ppRec = NULL;
3026 if (pRec)
3027 {
3028 RTLockValidatorRecExclDelete(pRec);
3029 RTMemFree(pRec);
3030 }
3031}
3032
3033
3034RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3035{
3036 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3037 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3038 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3039 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3040 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3041 RTLOCKVAL_SUB_CLASS_INVALID);
3042 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3043}
3044
3045
3046RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3047 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3048{
3049 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3050 if (!pRecU)
3051 return;
3052 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3053 if (!pRecU->Excl.fEnabled)
3054 return;
3055 if (hThreadSelf == NIL_RTTHREAD)
3056 {
3057 hThreadSelf = RTThreadSelfAutoAdopt();
3058 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3059 }
3060 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3061 Assert(hThreadSelf == RTThreadSelf());
3062
3063 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3064
3065 if (pRecU->Excl.hThread == hThreadSelf)
3066 {
3067 Assert(!fFirstRecursion); RT_NOREF_PV(fFirstRecursion);
3068 pRecU->Excl.cRecursion++;
3069 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3070 }
3071 else
3072 {
3073 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3074
3075 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3076 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3077 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3078
3079 rtLockValidatorStackPush(hThreadSelf, pRecU);
3080 }
3081}
3082
3083
3084/**
3085 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3086 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3087 */
3088static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3089{
3090 RTTHREADINT *pThread = pRec->Excl.hThread;
3091 AssertReturnVoid(pThread != NIL_RTTHREAD);
3092 Assert(pThread == RTThreadSelf());
3093
3094 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3095 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3096 if (c == 0)
3097 {
3098 rtLockValidatorStackPop(pThread, pRec);
3099 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3100 }
3101 else
3102 {
3103 Assert(c < UINT32_C(0xffff0000));
3104 Assert(!fFinalRecursion); RT_NOREF_PV(fFinalRecursion);
3105 rtLockValidatorStackPopRecursion(pThread, pRec);
3106 }
3107}
3108
3109RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3110{
3111 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3112 if (!pRecU)
3113 return VINF_SUCCESS;
3114 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3115 if (!pRecU->Excl.fEnabled)
3116 return VINF_SUCCESS;
3117
3118 /*
3119 * Check the release order.
3120 */
3121 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3122 && pRecU->Excl.hClass->fStrictReleaseOrder
3123 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3124 )
3125 {
3126 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3127 if (RT_FAILURE(rc))
3128 return rc;
3129 }
3130
3131 /*
3132 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3133 */
3134 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3135 return VINF_SUCCESS;
3136}
3137
3138
3139RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3140{
3141 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3142 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3143 if (pRecU->Excl.fEnabled)
3144 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3145}
3146
3147
3148RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3149{
3150 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3151 if (!pRecU)
3152 return VINF_SUCCESS;
3153 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3154 if (!pRecU->Excl.fEnabled)
3155 return VINF_SUCCESS;
3156 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3157 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3158
3159 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3160 && !pRecU->Excl.hClass->fRecursionOk)
3161 {
3162 rtLockValComplainFirst("Recursion not allowed by the class!",
3163 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3164 rtLockValComplainPanic();
3165 return VERR_SEM_LV_NESTED;
3166 }
3167
3168 Assert(pRecU->Excl.cRecursion < _1M);
3169 pRecU->Excl.cRecursion++;
3170 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3171 return VINF_SUCCESS;
3172}
3173
3174
3175RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3176{
3177 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3178 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3179 if (!pRecU->Excl.fEnabled)
3180 return VINF_SUCCESS;
3181 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3182 Assert(pRecU->Excl.hThread == RTThreadSelf());
3183 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3184
3185 /*
3186 * Check the release order.
3187 */
3188 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3189 && pRecU->Excl.hClass->fStrictReleaseOrder
3190 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3191 )
3192 {
3193 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3194 if (RT_FAILURE(rc))
3195 return rc;
3196 }
3197
3198 /*
3199 * Perform the unwind.
3200 */
3201 pRecU->Excl.cRecursion--;
3202 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3203 return VINF_SUCCESS;
3204}
3205
3206
3207RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3208{
3209 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3210 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3211 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3212 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3213 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3214 , VERR_SEM_LV_INVALID_PARAMETER);
3215 if (!pRecU->Excl.fEnabled)
3216 return VINF_SUCCESS;
3217 Assert(pRecU->Excl.hThread == RTThreadSelf());
3218 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3219 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3220
3221 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3222 && !pRecU->Excl.hClass->fRecursionOk)
3223 {
3224 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3225 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3226 rtLockValComplainPanic();
3227 return VERR_SEM_LV_NESTED;
3228 }
3229
3230 Assert(pRecU->Excl.cRecursion < _1M);
3231 pRecU->Excl.cRecursion++;
3232 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3233
3234 return VINF_SUCCESS;
3235}
3236
3237
3238RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3239{
3240 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3241 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3242 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3243 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3244 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3245 , VERR_SEM_LV_INVALID_PARAMETER);
3246 if (!pRecU->Excl.fEnabled)
3247 return VINF_SUCCESS;
3248 Assert(pRecU->Excl.hThread == RTThreadSelf());
3249 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3250 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3251
3252 /*
3253 * Check the release order.
3254 */
3255 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3256 && pRecU->Excl.hClass->fStrictReleaseOrder
3257 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3258 )
3259 {
3260 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3261 if (RT_FAILURE(rc))
3262 return rc;
3263 }
3264
3265 /*
3266 * Perform the unwind.
3267 */
3268 pRecU->Excl.cRecursion--;
3269 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3270 return VINF_SUCCESS;
3271}
3272
3273
3274RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3275 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3276{
3277 /*
3278 * Validate and adjust input. Quit early if order validation is disabled.
3279 */
3280 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3281 if (!pRecU)
3282 return VINF_SUCCESS;
3283 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3284 if ( !pRecU->Excl.fEnabled
3285 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3286 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3287 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3288 return VINF_SUCCESS;
3289
3290 if (hThreadSelf == NIL_RTTHREAD)
3291 {
3292 hThreadSelf = RTThreadSelfAutoAdopt();
3293 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3294 }
3295 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3296 Assert(hThreadSelf == RTThreadSelf());
3297
3298 /*
3299 * Detect recursion as it isn't subject to order restrictions.
3300 */
3301 if (pRec->hThread == hThreadSelf)
3302 return VINF_SUCCESS;
3303
3304 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3305}
3306
3307
3308RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3309 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3310 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3311{
3312 /*
3313 * Fend off wild life.
3314 */
3315 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3316 if (!pRecU)
3317 return VINF_SUCCESS;
3318 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3319 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3320 if (!pRec->fEnabled)
3321 return VINF_SUCCESS;
3322
3323 PRTTHREADINT pThreadSelf = hThreadSelf;
3324 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3325 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3326 Assert(pThreadSelf == RTThreadSelf());
3327
3328 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3329
3330 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3331 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3332 {
3333 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3334 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3335 , VERR_SEM_LV_INVALID_PARAMETER);
3336 enmSleepState = enmThreadState;
3337 }
3338
3339 /*
3340 * Record the location.
3341 */
3342 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3343 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3344 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3345 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3346 rtThreadSetState(pThreadSelf, enmSleepState);
3347
3348 /*
3349 * Don't do deadlock detection if we're recursing.
3350 *
3351 * On some hosts we don't do recursion accounting our selves and there
3352 * isn't any other place to check for this.
3353 */
3354 int rc = VINF_SUCCESS;
3355 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3356 {
3357 if ( !fRecursiveOk
3358 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3359 && !pRecU->Excl.hClass->fRecursionOk))
3360 {
3361 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3362 rtLockValComplainPanic();
3363 rc = VERR_SEM_LV_NESTED;
3364 }
3365 }
3366 /*
3367 * Perform deadlock detection.
3368 */
3369 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3370 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3371 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3372 rc = VINF_SUCCESS;
3373 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3374 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3375
3376 if (RT_SUCCESS(rc))
3377 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3378 else
3379 {
3380 rtThreadSetState(pThreadSelf, enmThreadState);
3381 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3382 }
3383 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3384 return rc;
3385}
3386RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3387
3388
3389RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3390 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3391 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3392{
3393 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3394 if (RT_SUCCESS(rc))
3395 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3396 enmSleepState, fReallySleeping);
3397 return rc;
3398}
3399RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3400
3401
3402RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3403 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3404{
3405 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3406 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3407 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3408 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3409 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3410
3411 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3412 pRec->uSubClass = uSubClass;
3413 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3414 pRec->hLock = hLock;
3415 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3416 pRec->fSignaller = fSignaller;
3417 pRec->pSibling = NULL;
3418
3419 /* the table */
3420 pRec->cEntries = 0;
3421 pRec->iLastEntry = 0;
3422 pRec->cAllocated = 0;
3423 pRec->fReallocating = false;
3424 pRec->fPadding = false;
3425 pRec->papOwners = NULL;
3426
3427 /* the name */
3428 if (pszNameFmt)
3429 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3430 else
3431 {
3432 static uint32_t volatile s_cAnonymous = 0;
3433 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3434 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3435 }
3436}
3437
3438
3439RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3440 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3441{
3442 va_list va;
3443 va_start(va, pszNameFmt);
3444 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3445 va_end(va);
3446}
3447
3448
3449RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3450 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3451 const char *pszNameFmt, va_list va)
3452{
3453 PRTLOCKVALRECSHRD pRec;
3454 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3455 if (!pRec)
3456 return VERR_NO_MEMORY;
3457 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3458 return VINF_SUCCESS;
3459}
3460
3461
3462RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3463 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3464 const char *pszNameFmt, ...)
3465{
3466 va_list va;
3467 va_start(va, pszNameFmt);
3468 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3469 va_end(va);
3470 return rc;
3471}
3472
3473
3474RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3475{
3476 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3477
3478 /** @todo Check that it's not on our stack first. Need to make it
3479 * configurable whether deleting a owned lock is acceptable? */
3480
3481 /*
3482 * Flip it into table realloc mode and take the destruction lock.
3483 */
3484 rtLockValidatorSerializeDestructEnter();
3485 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3486 {
3487 rtLockValidatorSerializeDestructLeave();
3488
3489 rtLockValidatorSerializeDetectionEnter();
3490 rtLockValidatorSerializeDetectionLeave();
3491
3492 rtLockValidatorSerializeDestructEnter();
3493 }
3494
3495 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3496 RTLOCKVALCLASS hClass;
3497 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3498 if (pRec->papOwners)
3499 {
3500 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3501 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3502 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3503
3504 RTMemFree((void *)papOwners);
3505 }
3506 if (pRec->pSibling)
3507 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3508 ASMAtomicWriteBool(&pRec->fReallocating, false);
3509
3510 rtLockValidatorSerializeDestructLeave();
3511
3512 if (hClass != NIL_RTLOCKVALCLASS)
3513 RTLockValidatorClassRelease(hClass);
3514}
3515
3516
3517RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3518{
3519 PRTLOCKVALRECSHRD pRec = *ppRec;
3520 *ppRec = NULL;
3521 if (pRec)
3522 {
3523 RTLockValidatorRecSharedDelete(pRec);
3524 RTMemFree(pRec);
3525 }
3526}
3527
3528
3529RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3530{
3531 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3532 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3533 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3534 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3535 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3536 RTLOCKVAL_SUB_CLASS_INVALID);
3537 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3538}
3539
3540
3541/**
3542 * Locates an owner (thread) in a shared lock record.
3543 *
3544 * @returns Pointer to the owner entry on success, NULL on failure..
3545 * @param pShared The shared lock record.
3546 * @param hThread The thread (owner) to find.
3547 * @param piEntry Where to optionally return the table in index.
3548 * Optional.
3549 */
3550DECLINLINE(PRTLOCKVALRECUNION)
3551rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3552{
3553 rtLockValidatorSerializeDetectionEnter();
3554
3555 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3556 if (papOwners)
3557 {
3558 uint32_t const cMax = pShared->cAllocated;
3559 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3560 {
3561 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3562 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3563 {
3564 rtLockValidatorSerializeDetectionLeave();
3565 if (piEntry)
3566 *piEntry = iEntry;
3567 return pEntry;
3568 }
3569 }
3570 }
3571
3572 rtLockValidatorSerializeDetectionLeave();
3573 return NULL;
3574}
3575
3576
3577RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3578 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3579{
3580 /*
3581 * Validate and adjust input. Quit early if order validation is disabled.
3582 */
3583 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3584 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3585 if ( !pRecU->Shared.fEnabled
3586 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3587 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3588 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3589 )
3590 return VINF_SUCCESS;
3591
3592 if (hThreadSelf == NIL_RTTHREAD)
3593 {
3594 hThreadSelf = RTThreadSelfAutoAdopt();
3595 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3596 }
3597 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3598 Assert(hThreadSelf == RTThreadSelf());
3599
3600 /*
3601 * Detect recursion as it isn't subject to order restrictions.
3602 */
3603 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3604 if (pEntry)
3605 return VINF_SUCCESS;
3606
3607 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3608}
3609
3610
3611RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3612 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3613 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3614{
3615 /*
3616 * Fend off wild life.
3617 */
3618 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3619 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3620 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3621 if (!pRecU->Shared.fEnabled)
3622 return VINF_SUCCESS;
3623
3624 PRTTHREADINT pThreadSelf = hThreadSelf;
3625 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3626 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3627 Assert(pThreadSelf == RTThreadSelf());
3628
3629 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3630
3631 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3632 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3633 {
3634 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3635 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3636 , VERR_SEM_LV_INVALID_PARAMETER);
3637 enmSleepState = enmThreadState;
3638 }
3639
3640 /*
3641 * Record the location.
3642 */
3643 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3644 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3645 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3646 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3647 rtThreadSetState(pThreadSelf, enmSleepState);
3648
3649 /*
3650 * Don't do deadlock detection if we're recursing.
3651 */
3652 int rc = VINF_SUCCESS;
3653 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3654 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3655 : NULL;
3656 if (pEntry)
3657 {
3658 if ( !fRecursiveOk
3659 || ( pRec->hClass
3660 && !pRec->hClass->fRecursionOk)
3661 )
3662 {
3663 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3664 rtLockValComplainPanic();
3665 rc = VERR_SEM_LV_NESTED;
3666 }
3667 }
3668 /*
3669 * Perform deadlock detection.
3670 */
3671 else if ( pRec->hClass
3672 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3673 || pRec->hClass->cMsMinDeadlock > cMillies))
3674 rc = VINF_SUCCESS;
3675 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3676 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3677
3678 if (RT_SUCCESS(rc))
3679 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3680 else
3681 {
3682 rtThreadSetState(pThreadSelf, enmThreadState);
3683 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3684 }
3685 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3686 return rc;
3687}
3688RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3689
3690
3691RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3692 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3693 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3694{
3695 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3696 if (RT_SUCCESS(rc))
3697 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3698 enmSleepState, fReallySleeping);
3699 return rc;
3700}
3701RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3702
3703
3704/**
3705 * Allocates and initializes an owner entry for the shared lock record.
3706 *
3707 * @returns The new owner entry.
3708 * @param pRec The shared lock record.
3709 * @param pThreadSelf The calling thread and owner. Used for record
3710 * initialization and allocation.
3711 * @param pSrcPos The source position.
3712 */
3713DECLINLINE(PRTLOCKVALRECUNION)
3714rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3715{
3716 PRTLOCKVALRECUNION pEntry;
3717
3718 /*
3719 * Check if the thread has any statically allocated records we can easily
3720 * make use of.
3721 */
3722 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3723 if ( iEntry > 0
3724 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3725 {
3726 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3727 Assert(!pEntry->ShrdOwner.fReserved);
3728 pEntry->ShrdOwner.fStaticAlloc = true;
3729 rtThreadGet(pThreadSelf);
3730 }
3731 else
3732 {
3733 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3734 if (RT_UNLIKELY(!pEntry))
3735 return NULL;
3736 pEntry->ShrdOwner.fStaticAlloc = false;
3737 }
3738
3739 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3740 pEntry->ShrdOwner.cRecursion = 1;
3741 pEntry->ShrdOwner.fReserved = true;
3742 pEntry->ShrdOwner.hThread = pThreadSelf;
3743 pEntry->ShrdOwner.pDown = NULL;
3744 pEntry->ShrdOwner.pSharedRec = pRec;
3745#if HC_ARCH_BITS == 32
3746 pEntry->ShrdOwner.pvReserved = NULL;
3747#endif
3748 if (pSrcPos)
3749 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3750 else
3751 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3752 return pEntry;
3753}
3754
3755
3756/**
3757 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3758 *
3759 * @param pEntry The owner entry.
3760 */
3761DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3762{
3763 if (pEntry)
3764 {
3765 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3766 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3767
3768 PRTTHREADINT pThread;
3769 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3770
3771 Assert(pEntry->fReserved);
3772 pEntry->fReserved = false;
3773
3774 if (pEntry->fStaticAlloc)
3775 {
3776 AssertPtrReturnVoid(pThread);
3777 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3778
3779 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3780 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3781
3782 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3783 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3784
3785 rtThreadRelease(pThread);
3786 }
3787 else
3788 {
3789 rtLockValidatorSerializeDestructEnter();
3790 rtLockValidatorSerializeDestructLeave();
3791
3792 RTMemFree(pEntry);
3793 }
3794 }
3795}
3796
3797
3798/**
3799 * Make more room in the table.
3800 *
3801 * @retval true on success
3802 * @retval false if we're out of memory or running into a bad race condition
3803 * (probably a bug somewhere). No longer holding the lock.
3804 *
3805 * @param pShared The shared lock record.
3806 */
3807static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3808{
3809 for (unsigned i = 0; i < 1000; i++)
3810 {
3811 /*
3812 * Switch to the other data access direction.
3813 */
3814 rtLockValidatorSerializeDetectionLeave();
3815 if (i >= 10)
3816 {
3817 Assert(i != 10 && i != 100);
3818 RTThreadSleep(i >= 100);
3819 }
3820 rtLockValidatorSerializeDestructEnter();
3821
3822 /*
3823 * Try grab the privilege to reallocating the table.
3824 */
3825 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3826 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3827 {
3828 uint32_t cAllocated = pShared->cAllocated;
3829 if (cAllocated < pShared->cEntries)
3830 {
3831 /*
3832 * Ok, still not enough space. Reallocate the table.
3833 */
3834#if 0 /** @todo enable this after making sure growing works flawlessly. */
3835 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3836#else
3837 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3838#endif
3839 PRTLOCKVALRECSHRDOWN *papOwners;
3840 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3841 (cAllocated + cInc) * sizeof(void *));
3842 if (!papOwners)
3843 {
3844 ASMAtomicWriteBool(&pShared->fReallocating, false);
3845 rtLockValidatorSerializeDestructLeave();
3846 /* RTMemRealloc will assert */
3847 return false;
3848 }
3849
3850 while (cInc-- > 0)
3851 {
3852 papOwners[cAllocated] = NULL;
3853 cAllocated++;
3854 }
3855
3856 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3857 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3858 }
3859 ASMAtomicWriteBool(&pShared->fReallocating, false);
3860 }
3861 rtLockValidatorSerializeDestructLeave();
3862
3863 rtLockValidatorSerializeDetectionEnter();
3864 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3865 break;
3866
3867 if (pShared->cAllocated >= pShared->cEntries)
3868 return true;
3869 }
3870
3871 rtLockValidatorSerializeDetectionLeave();
3872 AssertFailed(); /* too many iterations or destroyed while racing. */
3873 return false;
3874}
3875
3876
3877/**
3878 * Adds an owner entry to a shared lock record.
3879 *
3880 * @returns true on success, false on serious race or we're if out of memory.
3881 * @param pShared The shared lock record.
3882 * @param pEntry The owner entry.
3883 */
3884DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3885{
3886 rtLockValidatorSerializeDetectionEnter();
3887 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3888 {
3889 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3890 && !rtLockValidatorRecSharedMakeRoom(pShared))
3891 return false; /* the worker leave the lock */
3892
3893 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3894 uint32_t const cMax = pShared->cAllocated;
3895 for (unsigned i = 0; i < 100; i++)
3896 {
3897 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3898 {
3899 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3900 {
3901 rtLockValidatorSerializeDetectionLeave();
3902 return true;
3903 }
3904 }
3905 Assert(i != 25);
3906 }
3907 AssertFailed();
3908 }
3909 rtLockValidatorSerializeDetectionLeave();
3910 return false;
3911}
3912
3913
3914/**
3915 * Remove an owner entry from a shared lock record and free it.
3916 *
3917 * @param pShared The shared lock record.
3918 * @param pEntry The owner entry to remove.
3919 * @param iEntry The last known index.
3920 */
3921DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3922 uint32_t iEntry)
3923{
3924 /*
3925 * Remove it from the table.
3926 */
3927 rtLockValidatorSerializeDetectionEnter();
3928 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3929 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3930 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3931 {
3932 /* this shouldn't happen yet... */
3933 AssertFailed();
3934 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3935 uint32_t const cMax = pShared->cAllocated;
3936 for (iEntry = 0; iEntry < cMax; iEntry++)
3937 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3938 break;
3939 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3940 }
3941 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3942 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3943 rtLockValidatorSerializeDetectionLeave();
3944
3945 /*
3946 * Successfully removed, now free it.
3947 */
3948 rtLockValidatorRecSharedFreeOwner(pEntry);
3949}
3950
3951
3952RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3953{
3954 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3955 if (!pRec->fEnabled)
3956 return;
3957 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3958 AssertReturnVoid(pRec->fSignaller);
3959
3960 /*
3961 * Free all current owners.
3962 */
3963 rtLockValidatorSerializeDetectionEnter();
3964 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3965 {
3966 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3967 uint32_t iEntry = 0;
3968 uint32_t cEntries = pRec->cAllocated;
3969 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3970 while (iEntry < cEntries)
3971 {
3972 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3973 if (pEntry)
3974 {
3975 ASMAtomicDecU32(&pRec->cEntries);
3976 rtLockValidatorSerializeDetectionLeave();
3977
3978 rtLockValidatorRecSharedFreeOwner(pEntry);
3979
3980 rtLockValidatorSerializeDetectionEnter();
3981 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3982 break;
3983 cEntries = pRec->cAllocated;
3984 papEntries = pRec->papOwners;
3985 }
3986 iEntry++;
3987 }
3988 }
3989 rtLockValidatorSerializeDetectionLeave();
3990
3991 if (hThread != NIL_RTTHREAD)
3992 {
3993 /*
3994 * Allocate a new owner entry and insert it into the table.
3995 */
3996 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3997 if ( pEntry
3998 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3999 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4000 }
4001}
4002RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
4003
4004
4005RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4006{
4007 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4008 if (!pRec->fEnabled)
4009 return;
4010 if (hThread == NIL_RTTHREAD)
4011 {
4012 hThread = RTThreadSelfAutoAdopt();
4013 AssertReturnVoid(hThread != NIL_RTTHREAD);
4014 }
4015 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4016
4017 /*
4018 * Recursive?
4019 *
4020 * Note! This code can be optimized to try avoid scanning the table on
4021 * insert. However, that's annoying work that makes the code big,
4022 * so it can wait til later sometime.
4023 */
4024 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4025 if (pEntry)
4026 {
4027 Assert(!pRec->fSignaller);
4028 pEntry->ShrdOwner.cRecursion++;
4029 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4030 return;
4031 }
4032
4033 /*
4034 * Allocate a new owner entry and insert it into the table.
4035 */
4036 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4037 if (pEntry)
4038 {
4039 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4040 {
4041 if (!pRec->fSignaller)
4042 rtLockValidatorStackPush(hThread, pEntry);
4043 }
4044 else
4045 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4046 }
4047}
4048RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4049
4050
4051RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4052{
4053 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4054 if (!pRec->fEnabled)
4055 return;
4056 if (hThread == NIL_RTTHREAD)
4057 {
4058 hThread = RTThreadSelfAutoAdopt();
4059 AssertReturnVoid(hThread != NIL_RTTHREAD);
4060 }
4061 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4062
4063 /*
4064 * Find the entry hope it's a recursive one.
4065 */
4066 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4067 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4068 AssertReturnVoid(pEntry);
4069 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4070
4071 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4072 if (c == 0)
4073 {
4074 if (!pRec->fSignaller)
4075 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4076 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4077 }
4078 else
4079 {
4080 Assert(!pRec->fSignaller);
4081 rtLockValidatorStackPopRecursion(hThread, pEntry);
4082 }
4083}
4084RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4085
4086
4087RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4088{
4089 /* Validate and resolve input. */
4090 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4091 if (!pRec->fEnabled)
4092 return false;
4093 if (hThread == NIL_RTTHREAD)
4094 {
4095 hThread = RTThreadSelfAutoAdopt();
4096 AssertReturn(hThread != NIL_RTTHREAD, false);
4097 }
4098 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4099
4100 /* Do the job. */
4101 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4102 return pEntry != NULL;
4103}
4104RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4105
4106
4107RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4108{
4109 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4110 if (!pRec->fEnabled)
4111 return VINF_SUCCESS;
4112 if (hThreadSelf == NIL_RTTHREAD)
4113 {
4114 hThreadSelf = RTThreadSelfAutoAdopt();
4115 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4116 }
4117 Assert(hThreadSelf == RTThreadSelf());
4118 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4119
4120 /*
4121 * Locate the entry for this thread in the table.
4122 */
4123 uint32_t iEntry = 0;
4124 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4125 if (RT_UNLIKELY(!pEntry))
4126 {
4127 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4128 rtLockValComplainPanic();
4129 return VERR_SEM_LV_NOT_OWNER;
4130 }
4131
4132 /*
4133 * Check the release order.
4134 */
4135 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4136 && pRec->hClass->fStrictReleaseOrder
4137 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4138 )
4139 {
4140 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4141 if (RT_FAILURE(rc))
4142 return rc;
4143 }
4144
4145 /*
4146 * Release the ownership or unwind a level of recursion.
4147 */
4148 Assert(pEntry->ShrdOwner.cRecursion > 0);
4149 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4150 if (c == 0)
4151 {
4152 rtLockValidatorStackPop(hThreadSelf, pEntry);
4153 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4154 }
4155 else
4156 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4157
4158 return VINF_SUCCESS;
4159}
4160
4161
4162RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4163{
4164 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4165 if (!pRec->fEnabled)
4166 return VINF_SUCCESS;
4167 if (hThreadSelf == NIL_RTTHREAD)
4168 {
4169 hThreadSelf = RTThreadSelfAutoAdopt();
4170 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4171 }
4172 Assert(hThreadSelf == RTThreadSelf());
4173 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4174
4175 /*
4176 * Locate the entry for this thread in the table.
4177 */
4178 uint32_t iEntry = 0;
4179 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4180 if (RT_UNLIKELY(!pEntry))
4181 {
4182 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4183 rtLockValComplainPanic();
4184 return VERR_SEM_LV_NOT_SIGNALLER;
4185 }
4186 return VINF_SUCCESS;
4187}
4188
4189
4190RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4191{
4192 if (Thread == NIL_RTTHREAD)
4193 return 0;
4194
4195 PRTTHREADINT pThread = rtThreadGet(Thread);
4196 if (!pThread)
4197 return VERR_INVALID_HANDLE;
4198 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4199 rtThreadRelease(pThread);
4200 return cWriteLocks;
4201}
4202RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4203
4204
4205RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4206{
4207 PRTTHREADINT pThread = rtThreadGet(Thread);
4208 AssertReturnVoid(pThread);
4209 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4210 rtThreadRelease(pThread);
4211}
4212RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4213
4214
4215RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4216{
4217 PRTTHREADINT pThread = rtThreadGet(Thread);
4218 AssertReturnVoid(pThread);
4219 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4220 rtThreadRelease(pThread);
4221}
4222RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4223
4224
4225RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4226{
4227 if (Thread == NIL_RTTHREAD)
4228 return 0;
4229
4230 PRTTHREADINT pThread = rtThreadGet(Thread);
4231 if (!pThread)
4232 return VERR_INVALID_HANDLE;
4233 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4234 rtThreadRelease(pThread);
4235 return cReadLocks;
4236}
4237RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4238
4239
4240RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4241{
4242 PRTTHREADINT pThread = rtThreadGet(Thread);
4243 Assert(pThread);
4244 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4245 rtThreadRelease(pThread);
4246}
4247RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4248
4249
4250RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4251{
4252 PRTTHREADINT pThread = rtThreadGet(Thread);
4253 Assert(pThread);
4254 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4255 rtThreadRelease(pThread);
4256}
4257RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4258
4259
4260RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4261{
4262 void *pvLock = NULL;
4263 PRTTHREADINT pThread = rtThreadGet(hThread);
4264 if (pThread)
4265 {
4266 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4267 if (RTTHREAD_IS_SLEEPING(enmState))
4268 {
4269 rtLockValidatorSerializeDetectionEnter();
4270
4271 enmState = rtThreadGetState(pThread);
4272 if (RTTHREAD_IS_SLEEPING(enmState))
4273 {
4274 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4275 if (pRec)
4276 {
4277 switch (pRec->Core.u32Magic)
4278 {
4279 case RTLOCKVALRECEXCL_MAGIC:
4280 pvLock = pRec->Excl.hLock;
4281 break;
4282
4283 case RTLOCKVALRECSHRDOWN_MAGIC:
4284 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4285 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4286 break;
4287 RT_FALL_THRU();
4288 case RTLOCKVALRECSHRD_MAGIC:
4289 pvLock = pRec->Shared.hLock;
4290 break;
4291 }
4292 if (RTThreadGetState(pThread) != enmState)
4293 pvLock = NULL;
4294 }
4295 }
4296
4297 rtLockValidatorSerializeDetectionLeave();
4298 }
4299 rtThreadRelease(pThread);
4300 }
4301 return pvLock;
4302}
4303RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4304
4305
4306RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4307{
4308 bool fRet = false;
4309 PRTTHREADINT pThread = rtThreadGet(hThread);
4310 if (pThread)
4311 {
4312 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4313 rtThreadRelease(pThread);
4314 }
4315 return fRet;
4316}
4317RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4318
4319
4320RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4321{
4322 bool fRet = false;
4323 if (hCurrentThread == NIL_RTTHREAD)
4324 hCurrentThread = RTThreadSelf();
4325 else
4326 Assert(hCurrentThread == RTThreadSelf());
4327 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4328 if (pThread)
4329 {
4330 if (hClass != NIL_RTLOCKVALCLASS)
4331 {
4332 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4333 while (VALID_PTR(pCur) && !fRet)
4334 {
4335 switch (pCur->Core.u32Magic)
4336 {
4337 case RTLOCKVALRECEXCL_MAGIC:
4338 fRet = pCur->Excl.hClass == hClass;
4339 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4340 break;
4341 case RTLOCKVALRECSHRDOWN_MAGIC:
4342 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4343 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4344 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4345 break;
4346 case RTLOCKVALRECNEST_MAGIC:
4347 switch (pCur->Nest.pRec->Core.u32Magic)
4348 {
4349 case RTLOCKVALRECEXCL_MAGIC:
4350 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4351 break;
4352 case RTLOCKVALRECSHRDOWN_MAGIC:
4353 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4354 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4355 break;
4356 }
4357 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4358 break;
4359 default:
4360 pCur = NULL;
4361 break;
4362 }
4363 }
4364 }
4365
4366 rtThreadRelease(pThread);
4367 }
4368 return fRet;
4369}
4370RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4371
4372
4373RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4374{
4375 bool fRet = false;
4376 if (hCurrentThread == NIL_RTTHREAD)
4377 hCurrentThread = RTThreadSelf();
4378 else
4379 Assert(hCurrentThread == RTThreadSelf());
4380 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4381 if (pThread)
4382 {
4383 if (hClass != NIL_RTLOCKVALCLASS)
4384 {
4385 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4386 while (VALID_PTR(pCur) && !fRet)
4387 {
4388 switch (pCur->Core.u32Magic)
4389 {
4390 case RTLOCKVALRECEXCL_MAGIC:
4391 fRet = pCur->Excl.hClass == hClass
4392 && pCur->Excl.uSubClass == uSubClass;
4393 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4394 break;
4395 case RTLOCKVALRECSHRDOWN_MAGIC:
4396 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4397 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4398 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4399 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4400 break;
4401 case RTLOCKVALRECNEST_MAGIC:
4402 switch (pCur->Nest.pRec->Core.u32Magic)
4403 {
4404 case RTLOCKVALRECEXCL_MAGIC:
4405 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4406 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4407 break;
4408 case RTLOCKVALRECSHRDOWN_MAGIC:
4409 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4410 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4411 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4412 break;
4413 }
4414 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4415 break;
4416 default:
4417 pCur = NULL;
4418 break;
4419 }
4420 }
4421 }
4422
4423 rtThreadRelease(pThread);
4424 }
4425 return fRet;
4426}
4427RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4428
4429
4430RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4431{
4432 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4433}
4434RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4435
4436
4437RTDECL(bool) RTLockValidatorIsEnabled(void)
4438{
4439 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4440}
4441RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4442
4443
4444RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4445{
4446 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4447}
4448RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4449
4450
4451RTDECL(bool) RTLockValidatorIsQuiet(void)
4452{
4453 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4454}
4455RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4456
4457
4458RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4459{
4460 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4461}
4462RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4463
4464
4465RTDECL(bool) RTLockValidatorMayPanic(void)
4466{
4467 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4468}
4469RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4470
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette