VirtualBox

source: vbox/trunk/src/VBox/Runtime/testcase/tstRTLockValidator.cpp@ 25791

Last change on this file since 25791 was 25791, checked in by vboxsync, 15 years ago

iprt/lockvalidator,tstRTLockValidator: Fixed some class reference counting bugs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.0 KB
Line 
1/* $Id: tstRTLockValidator.cpp 25791 2010-01-12 22:57:57Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTLockValidator.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <iprt/lockvalidator.h>
36
37#include <iprt/asm.h> /* for return addresses */
38#include <iprt/critsect.h>
39#include <iprt/err.h>
40#include <iprt/semaphore.h>
41#include <iprt/test.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49#define SECS_SIMPLE_TEST 1
50#define SECS_RACE_TEST 3
51#define TEST_SMALL_TIMEOUT ( 10*1000)
52#define TEST_LARGE_TIMEOUT ( 60*1000)
53#define TEST_DEBUG_TIMEOUT (3600*1000)
54
55
56/*******************************************************************************
57* Global Variables *
58*******************************************************************************/
59/** The testcase handle. */
60static RTTEST g_hTest;
61/** Flip this in the debugger to get some peace to single step wild code. */
62bool volatile g_fDoNotSpin = false;
63
64/** Set when the main thread wishes to terminate the test. */
65bool volatile g_fShutdown = false;
66/** The number of threads. */
67static uint32_t g_cThreads;
68static uint32_t g_iDeadlockThread;
69static RTTHREAD g_ahThreads[32];
70static RTLOCKVALCLASS g_ahClasses[32];
71static RTCRITSECT g_aCritSects[32];
72static RTSEMRW g_ahSemRWs[32];
73static RTSEMMUTEX g_ahSemMtxes[32];
74static RTSEMEVENT g_hSemEvt;
75static RTSEMEVENTMULTI g_hSemEvtMulti;
76
77/** Multiple release event semaphore that is signalled by the main thread after
78 * it has started all the threads. */
79static RTSEMEVENTMULTI g_hThreadsStartedEvt;
80
81/** The number of threads that have called testThreadBlocking */
82static uint32_t volatile g_cThreadsBlocking;
83/** Multiple release event semaphore that is signalled by the last thread to
84 * call testThreadBlocking. testWaitForAllOtherThreadsToSleep waits on this. */
85static RTSEMEVENTMULTI g_hThreadsBlockingEvt;
86
87/** When to stop testing. */
88static uint64_t g_NanoTSStop;
89/** The number of deadlocks. */
90static uint32_t volatile g_cDeadlocks;
91/** The number of loops. */
92static uint32_t volatile g_cLoops;
93
94
95/**
96 * Spin until the callback stops returning VERR_TRY_AGAIN.
97 *
98 * @returns Callback result. VERR_TIMEOUT if too much time elapses.
99 * @param pfnCallback Callback for checking the state.
100 * @param pvWhat Callback parameter.
101 */
102static int testWaitForSomethingToBeOwned(int (*pfnCallback)(void *), void *pvWhat)
103{
104 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
105 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsStartedEvt, TEST_SMALL_TIMEOUT));
106
107 uint64_t u64StartMS = RTTimeMilliTS();
108 for (unsigned iLoop = 0; ; iLoop++)
109 {
110 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
111
112 int rc = pfnCallback(pvWhat);
113 if (rc != VERR_TRY_AGAIN/* && !g_fDoNotSpin*/)
114 {
115 RTTEST_CHECK_RC_OK(g_hTest, rc);
116 return rc;
117 }
118
119 uint64_t cMsElapsed = RTTimeMilliTS() - u64StartMS;
120 if (!g_fDoNotSpin)
121 RTTEST_CHECK_RET(g_hTest, cMsElapsed <= TEST_SMALL_TIMEOUT, VERR_TIMEOUT);
122
123 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
124 RTThreadSleep(/*g_fDoNotSpin ? TEST_DEBUG_TIMEOUT :*/ iLoop > 256 ? 1 : 0);
125 }
126}
127
128
129static int testCheckIfCritSectIsOwned(void *pvWhat)
130{
131 PRTCRITSECT pCritSect = (PRTCRITSECT)pvWhat;
132 if (!RTCritSectIsInitialized(pCritSect))
133 return VERR_SEM_DESTROYED;
134 if (RTCritSectIsOwned(pCritSect))
135 return VINF_SUCCESS;
136 return VERR_TRY_AGAIN;
137}
138
139
140static int testWaitForCritSectToBeOwned(PRTCRITSECT pCritSect)
141{
142 return testWaitForSomethingToBeOwned(testCheckIfCritSectIsOwned, pCritSect);
143}
144
145
146static int testCheckIfSemRWIsOwned(void *pvWhat)
147{
148 RTSEMRW hSemRW = (RTSEMRW)pvWhat;
149 if (RTSemRWGetWriteRecursion(hSemRW) > 0)
150 return VINF_SUCCESS;
151 if (RTSemRWGetReadCount(hSemRW) > 0)
152 return VINF_SUCCESS;
153 return VERR_TRY_AGAIN;
154}
155
156static int testWaitForSemRWToBeOwned(RTSEMRW hSemRW)
157{
158 return testWaitForSomethingToBeOwned(testCheckIfSemRWIsOwned, hSemRW);
159}
160
161
162static int testCheckIfSemMutexIsOwned(void *pvWhat)
163{
164 RTSEMMUTEX hSemRW = (RTSEMMUTEX)pvWhat;
165 if (RTSemMutexIsOwned(hSemRW))
166 return VINF_SUCCESS;
167 return VERR_TRY_AGAIN;
168}
169
170static int testWaitForSemMutexToBeOwned(RTSEMMUTEX hSemMutex)
171{
172 return testWaitForSomethingToBeOwned(testCheckIfSemMutexIsOwned, hSemMutex);
173}
174
175
176/**
177 * For reducing spin in testWaitForAllOtherThreadsToSleep.
178 */
179static void testThreadBlocking(void)
180{
181 if (ASMAtomicIncU32(&g_cThreadsBlocking) == g_cThreads)
182 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt));
183}
184
185
186/**
187 * Waits for all the other threads to enter sleeping states.
188 *
189 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
190 * @param enmDesiredState The desired thread sleep state.
191 * @param cWaitOn The distance to the lock they'll be waiting on,
192 * the lock type is derived from the desired state.
193 * UINT32_MAX means no special lock.
194 */
195static int testWaitForAllOtherThreadsToSleep(RTTHREADSTATE enmDesiredState, uint32_t cWaitOn)
196{
197 testThreadBlocking();
198 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
199 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsBlockingEvt, TEST_SMALL_TIMEOUT));
200
201 RTTHREAD hThreadSelf = RTThreadSelf();
202 for (uint32_t iOuterLoop = 0; ; iOuterLoop++)
203 {
204 uint32_t cMissing = 0;
205 uint32_t cWaitedOn = 0;
206 for (uint32_t i = 0; i < g_cThreads; i++)
207 {
208 RTTHREAD hThread = g_ahThreads[i];
209 if (hThread == NIL_RTTHREAD)
210 cMissing++;
211 else if (hThread != hThreadSelf)
212 {
213 /*
214 * Figure out which lock to wait for.
215 */
216 void *pvLock = NULL;
217 if (cWaitOn != UINT32_MAX)
218 {
219 uint32_t j = (i + cWaitOn) % g_cThreads;
220 switch (enmDesiredState)
221 {
222 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break;
223 case RTTHREADSTATE_RW_WRITE:
224 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break;
225 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break;
226 default: break;
227 }
228 }
229
230 /*
231 * Wait for this thread.
232 */
233 for (unsigned iLoop = 0; ; iLoop++)
234 {
235 RTTHREADSTATE enmState = RTThreadGetReallySleeping(hThread);
236 if (RTTHREAD_IS_SLEEPING(enmState))
237 {
238 if ( enmState == enmDesiredState
239 && ( !pvLock
240 || ( pvLock == RTLockValidatorQueryBlocking(hThread)
241 && !RTLockValidatorIsBlockedThreadInValidator(hThread) )
242 )
243 && RTThreadGetNativeState(hThread) != RTTHREADNATIVESTATE_RUNNING
244 )
245 break;
246 }
247 else if ( enmState != RTTHREADSTATE_RUNNING
248 && enmState != RTTHREADSTATE_INITIALIZING)
249 return VERR_INTERNAL_ERROR;
250 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
251 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop + iLoop > 256 ? 1 : 0);
252 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
253 cWaitedOn++;
254 }
255 }
256 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
257 }
258
259 if (!cMissing && !cWaitedOn)
260 break;
261 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
262 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop > 256 ? 1 : 0);
263 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
264 }
265
266 RTThreadSleep(0); /* fudge factor */
267 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Worker that starts the threads.
274 *
275 * @returns Same as RTThreadCreate.
276 * @param cThreads The number of threads to start.
277 * @param pfnThread Thread function.
278 */
279static int testStartThreads(uint32_t cThreads, PFNRTTHREAD pfnThread)
280{
281 RTSemEventMultiReset(g_hThreadsStartedEvt);
282
283 for (uint32_t i = 0; i < RT_ELEMENTS(g_ahThreads); i++)
284 g_ahThreads[i] = NIL_RTTHREAD;
285
286 int rc = VINF_SUCCESS;
287 for (uint32_t i = 0; i < cThreads; i++)
288 {
289 rc = RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0,
290 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i);
291 RTTEST_CHECK_RC_OK(g_hTest, rc);
292 if (RT_FAILURE(rc))
293 break;
294 }
295
296 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), rcCheck);
297 return rc;
298}
299
300
301/**
302 * Worker that waits for the threads to complete.
303 *
304 * @param cMillies How long to wait for each.
305 * @param fStopOnError Whether to stop on error and heed the thread
306 * return status.
307 */
308static void testWaitForThreads(uint32_t cMillies, bool fStopOnError)
309{
310 uint32_t i = RT_ELEMENTS(g_ahThreads);
311 while (i-- > 0)
312 if (g_ahThreads[i] != NIL_RTTHREAD)
313 {
314 int rcThread;
315 int rc2;
316 RTTEST_CHECK_RC_OK(g_hTest, rc2 = RTThreadWait(g_ahThreads[i], cMillies, &rcThread));
317 if (RT_SUCCESS(rc2))
318 g_ahThreads[i] = NIL_RTTHREAD;
319 if (fStopOnError && (RT_FAILURE(rc2) || RT_FAILURE(rcThread)))
320 return;
321 }
322}
323
324
325static void testIt(uint32_t cThreads, uint32_t cSecs, bool fLoops, PFNRTTHREAD pfnThread, const char *pszName)
326{
327 /*
328 * Init test.
329 */
330 if (cSecs > 0)
331 RTTestSubF(g_hTest, "%s, %u threads, %u secs", pszName, cThreads, cSecs);
332 else
333 RTTestSubF(g_hTest, "%s, %u threads, single pass", pszName, cThreads);
334
335 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_ahThreads) >= cThreads);
336 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_aCritSects) >= cThreads);
337
338 g_cThreads = cThreads;
339 g_fShutdown = false;
340
341 for (uint32_t i = 0; i < cThreads; i++)
342 {
343 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
344 RTLOCKVAL_SUB_CLASS_ANY, "RTCritSect"), VINF_SUCCESS);
345 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
346 RTLOCKVAL_SUB_CLASS_ANY, "RTSemRW"), VINF_SUCCESS);
347 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
348 RTLOCKVAL_SUB_CLASS_ANY, "RTSemMutex"), VINF_SUCCESS);
349 }
350 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventCreate(&g_hSemEvt), VINF_SUCCESS);
351 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hSemEvtMulti), VINF_SUCCESS);
352 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsStartedEvt), VINF_SUCCESS);
353 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsBlockingEvt), VINF_SUCCESS);
354
355 /*
356 * The test loop.
357 */
358 uint32_t cPasses = 0;
359 uint32_t cLoops = 0;
360 uint32_t cDeadlocks = 0;
361 uint32_t cErrors = RTTestErrorCount(g_hTest);
362 uint64_t uStartNS = RTTimeNanoTS();
363 g_NanoTSStop = uStartNS + cSecs * UINT64_C(1000000000);
364 do
365 {
366 g_iDeadlockThread = (cThreads - 1 + cPasses) % cThreads;
367 g_cLoops = 0;
368 g_cDeadlocks = 0;
369 g_cThreadsBlocking = 0;
370 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hThreadsBlockingEvt), VINF_SUCCESS);
371
372 int rc = testStartThreads(cThreads, pfnThread);
373 if (RT_SUCCESS(rc))
374 {
375 testWaitForThreads(TEST_LARGE_TIMEOUT + cSecs*1000, true);
376 if (g_fDoNotSpin && RTTestErrorCount(g_hTest) != cErrors)
377 testWaitForThreads(TEST_DEBUG_TIMEOUT, true);
378 }
379
380 RTTEST_CHECK(g_hTest, !fLoops || g_cLoops > 0);
381 cLoops += g_cLoops;
382 RTTEST_CHECK(g_hTest, !fLoops || g_cDeadlocks > 0);
383 cDeadlocks += g_cDeadlocks;
384 cPasses++;
385 } while ( RTTestErrorCount(g_hTest) == cErrors
386 && !fLoops
387 && RTTimeNanoTS() < g_NanoTSStop);
388
389 /*
390 * Cleanup.
391 */
392 ASMAtomicWriteBool(&g_fShutdown, true);
393 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt), VINF_SUCCESS);
394 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), VINF_SUCCESS);
395 RTThreadSleep(RTTestErrorCount(g_hTest) == cErrors ? 0 : 50);
396
397 for (uint32_t i = 0; i < cThreads; i++)
398 {
399 RTTEST_CHECK_RC(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
400 RTTEST_CHECK_RC(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
401 RTTEST_CHECK_RC(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
402 }
403 RTTEST_CHECK_RC(g_hTest, RTSemEventDestroy(g_hSemEvt), VINF_SUCCESS);
404 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hSemEvtMulti), VINF_SUCCESS);
405 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsStartedEvt), VINF_SUCCESS);
406 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsBlockingEvt), VINF_SUCCESS);
407
408 testWaitForThreads(TEST_SMALL_TIMEOUT, false);
409
410 /*
411 * Print results if applicable.
412 */
413 if (cSecs)
414 {
415 if (fLoops)
416 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cLoops=%u cDeadlocks=%u (%u%%)\n",
417 cLoops, cDeadlocks, cLoops ? cDeadlocks * 100 / cLoops : 0);
418 else
419 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cPasses=%u\n", cPasses);
420 }
421}
422
423
424static DECLCALLBACK(int) testDd1Thread(RTTHREAD ThreadSelf, void *pvUser)
425{
426 uintptr_t i = (uintptr_t)pvUser;
427 PRTCRITSECT pMine = &g_aCritSects[i];
428 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
429
430 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
431 if (!(i & 1))
432 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
433 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
434 {
435 int rc;
436 if (i != g_iDeadlockThread)
437 {
438 testThreadBlocking();
439 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
440 }
441 else
442 {
443 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
444 if (RT_SUCCESS(rc))
445 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VERR_SEM_LV_DEADLOCK);
446 }
447 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
448 if (RT_SUCCESS(rc))
449 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
450 }
451 if (!(i & 1))
452 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
453 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
454 return VINF_SUCCESS;
455}
456
457
458static void testDd1(uint32_t cThreads, uint32_t cSecs)
459{
460 testIt(cThreads, cSecs, false, testDd1Thread, "deadlock, critsect");
461}
462
463
464static DECLCALLBACK(int) testDd2Thread(RTTHREAD ThreadSelf, void *pvUser)
465{
466 uintptr_t i = (uintptr_t)pvUser;
467 RTSEMRW hMine = g_ahSemRWs[i];
468 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
469 int rc;
470
471 if (i & 1)
472 {
473 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
474 if ((i & 3) == 3)
475 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
476 }
477 else
478 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
479 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
480 {
481 if (i != g_iDeadlockThread)
482 {
483 testThreadBlocking();
484 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
485 }
486 else
487 {
488 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_RW_WRITE, 1));
489 if (RT_SUCCESS(rc))
490 {
491 if (g_cThreads > 1)
492 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
493 else
494 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_ILLEGAL_UPGRADE);
495 }
496 }
497 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
498 if (RT_SUCCESS(rc))
499 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
500 }
501 if (i & 1)
502 {
503 if ((i & 3) == 3)
504 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
505 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
506 }
507 else
508 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
509 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
510 return VINF_SUCCESS;
511}
512
513
514static void testDd2(uint32_t cThreads, uint32_t cSecs)
515{
516 testIt(cThreads, cSecs, false, testDd2Thread, "deadlock, read-write");
517}
518
519
520static DECLCALLBACK(int) testDd3Thread(RTTHREAD ThreadSelf, void *pvUser)
521{
522 uintptr_t i = (uintptr_t)pvUser;
523 RTSEMRW hMine = g_ahSemRWs[i];
524 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
525 int rc;
526
527 if (i & 1)
528 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
529 else
530 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
531 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
532 {
533 do
534 {
535 rc = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
536 if (rc != VINF_SUCCESS && rc != VERR_SEM_LV_DEADLOCK && rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
537 {
538 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc);
539 break;
540 }
541 if (RT_SUCCESS(rc))
542 {
543 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
544 if (RT_FAILURE(rc))
545 break;
546 }
547 else
548 ASMAtomicIncU32(&g_cDeadlocks);
549 ASMAtomicIncU32(&g_cLoops);
550 } while (RTTimeNanoTS() < g_NanoTSStop);
551 }
552 if (i & 1)
553 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
554 else
555 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
556 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
557 return VINF_SUCCESS;
558}
559
560
561static void testDd3(uint32_t cThreads, uint32_t cSecs)
562{
563 testIt(cThreads, cSecs, true, testDd3Thread, "deadlock, read-write race");
564}
565
566
567static DECLCALLBACK(int) testDd4Thread(RTTHREAD ThreadSelf, void *pvUser)
568{
569 uintptr_t i = (uintptr_t)pvUser;
570 RTSEMRW hMine = g_ahSemRWs[i];
571 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
572
573 do
574 {
575 int rc1 = (i & 1 ? RTSemRWRequestWrite : RTSemRWRequestRead)(hMine, TEST_SMALL_TIMEOUT); /* ugly ;-) */
576 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
577 if (rc1 != VINF_SUCCESS && rc1 != VERR_SEM_LV_DEADLOCK && rc1 != VERR_SEM_LV_ILLEGAL_UPGRADE)
578 {
579 RTTestFailed(g_hTest, "#%u: RTSemRWRequest%s(hMine,) -> %Rrc\n", i, i & 1 ? "Write" : "read", rc1);
580 break;
581 }
582 if (RT_SUCCESS(rc1))
583 {
584 for (unsigned iInner = 0; iInner < 4; iInner++)
585 {
586 int rc2 = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
587 if (rc2 != VINF_SUCCESS && rc2 != VERR_SEM_LV_DEADLOCK && rc2 != VERR_SEM_LV_ILLEGAL_UPGRADE)
588 {
589 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc2);
590 break;
591 }
592 if (RT_SUCCESS(rc2))
593 {
594 RTTEST_CHECK_RC(g_hTest, rc2 = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
595 if (RT_FAILURE(rc2))
596 break;
597 }
598 else
599 ASMAtomicIncU32(&g_cDeadlocks);
600 ASMAtomicIncU32(&g_cLoops);
601 }
602
603 RTTEST_CHECK_RC(g_hTest, rc1 = (i & 1 ? RTSemRWReleaseWrite : RTSemRWReleaseRead)(hMine), VINF_SUCCESS);
604 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
605 if (RT_FAILURE(rc1))
606 break;
607 }
608 else
609 ASMAtomicIncU32(&g_cDeadlocks);
610 ASMAtomicIncU32(&g_cLoops);
611 } while (RTTimeNanoTS() < g_NanoTSStop);
612
613 return VINF_SUCCESS;
614}
615
616
617static void testDd4(uint32_t cThreads, uint32_t cSecs)
618{
619 testIt(cThreads, cSecs, true, testDd4Thread, "deadlock, read-write race v2");
620}
621
622
623static DECLCALLBACK(int) testDd5Thread(RTTHREAD ThreadSelf, void *pvUser)
624{
625 uintptr_t i = (uintptr_t)pvUser;
626 RTSEMMUTEX hMine = g_ahSemMtxes[i];
627 RTSEMMUTEX hNext = g_ahSemMtxes[(i + 1) % g_cThreads];
628
629 RTTEST_CHECK_RC_RET(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
630 if (i & 1)
631 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
632 if (RT_SUCCESS(testWaitForSemMutexToBeOwned(hNext)))
633 {
634 int rc;
635 if (i != g_iDeadlockThread)
636 {
637 testThreadBlocking();
638 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
639 }
640 else
641 {
642 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_MUTEX, 1));
643 if (RT_SUCCESS(rc))
644 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
645 }
646 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
647 if (RT_SUCCESS(rc))
648 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRelease(hNext), VINF_SUCCESS);
649 }
650 if (i & 1)
651 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
652 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
653 return VINF_SUCCESS;
654}
655
656
657static void testDd5(uint32_t cThreads, uint32_t cSecs)
658{
659 testIt(cThreads, cSecs, false, testDd5Thread, "deadlock, mutex");
660}
661
662
663static DECLCALLBACK(int) testDd6Thread(RTTHREAD ThreadSelf, void *pvUser)
664{
665 uintptr_t i = (uintptr_t)pvUser;
666 PRTCRITSECT pMine = &g_aCritSects[i];
667 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
668
669 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
670 if (i & 1)
671 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
672 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
673 {
674 int rc;
675 if (i != g_iDeadlockThread)
676 {
677 testThreadBlocking();
678 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
679 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
680 if (RT_SUCCESS(rc))
681 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
682 }
683 else
684 {
685 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
686 if (RT_SUCCESS(rc))
687 {
688 RTSemEventSetSignaller(g_hSemEvt, g_ahThreads[0]);
689 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
690 RTSemEventAddSignaller(g_hSemEvt, g_ahThreads[iThread]);
691 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
692 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
693 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
694 RTTEST_CHECK_RC(g_hTest, RTSemEventSignal(g_hSemEvt), VINF_SUCCESS);
695 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
696 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
697 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
698 RTSemEventSetSignaller(g_hSemEvt, NIL_RTTHREAD);
699 }
700 }
701 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
702 }
703 if (i & 1)
704 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
705 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
706 return VINF_SUCCESS;
707}
708
709
710static void testDd6(uint32_t cThreads, uint32_t cSecs)
711{
712 testIt(cThreads, cSecs, false, testDd6Thread, "deadlock, event");
713}
714
715
716static DECLCALLBACK(int) testDd7Thread(RTTHREAD ThreadSelf, void *pvUser)
717{
718 uintptr_t i = (uintptr_t)pvUser;
719 PRTCRITSECT pMine = &g_aCritSects[i];
720 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
721
722 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
723 if (i & 1)
724 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
725 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
726 {
727 int rc;
728 if (i != g_iDeadlockThread)
729 {
730 testThreadBlocking();
731 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
732 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
733 if (RT_SUCCESS(rc))
734 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
735 }
736 else
737 {
738 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
739 if (RT_SUCCESS(rc))
740 {
741 RTSemEventMultiSetSignaller(g_hSemEvtMulti, g_ahThreads[0]);
742 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
743 RTSemEventMultiAddSignaller(g_hSemEvtMulti, g_ahThreads[iThread]);
744 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
745 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hSemEvtMulti), VINF_SUCCESS);
746 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
747 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
748 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hSemEvtMulti), VINF_SUCCESS);
749 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
750 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
751 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
752 RTSemEventMultiSetSignaller(g_hSemEvtMulti, NIL_RTTHREAD);
753 }
754 }
755 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
756 }
757 if (i & 1)
758 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
759 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
760 return VINF_SUCCESS;
761}
762
763
764static void testDd7(uint32_t cThreads, uint32_t cSecs)
765{
766 testIt(cThreads, cSecs, false, testDd7Thread, "deadlock, event multi");
767}
768
769
770static void testLo1(void)
771{
772 RTTestSub(g_hTest, "locking order basics");
773
774 /* Initialize the critsections, the first 4 has their own classes, the rest
775 use the same class and relies on the sub-class mechanism for ordering. */
776 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
777 {
778 if (i <= 3)
779 {
780 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo1-%u", i), VINF_SUCCESS);
781 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-Auto"), VINF_SUCCESS);
782 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
783 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
784 }
785 else
786 {
787 g_ahClasses[i] = RTLockValidatorClassForSrcPos(RT_SRC_POS, "testLo1-%u", i);
788 RTTEST_CHECK_RETV(g_hTest, g_ahClasses[i] != NIL_RTLOCKVALCLASS);
789 RTTEST_CHECK_RETV(g_hTest, i == 4 || g_ahClasses[i] == g_ahClasses[i - 1]);
790 if (i == 4)
791 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-None"), VINF_SUCCESS);
792 else if (i == 5)
793 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_ANY, "RTCritSectLO-Any"), VINF_SUCCESS);
794 else
795 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_USER + i, "RTCritSectLO-User"), VINF_SUCCESS);
796
797 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 1 + (i - 4 + 1) * 2); /* released in cleanup. */
798 }
799 }
800
801 /* Enter the first 4 critsects in ascending order and thereby definining
802 this as a valid lock order. */
803 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
804 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
805 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
806 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
807
808 /* Now, leave and re-enter the critsects in a way that should break the
809 order and check that we get the appropriate response. */
810 int rc;
811 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
812 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
813 if (RT_SUCCESS(rc))
814 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
815
816 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
817 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VERR_SEM_LV_WRONG_ORDER);
818 if (RT_SUCCESS(rc))
819 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
820
821 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
822 RTTEST_CHECK_RC(g_hTest, rc= RTCritSectEnter(&g_aCritSects[2]), VERR_SEM_LV_WRONG_ORDER);
823 if (RT_SUCCESS(rc))
824 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
825
826 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
827
828 /* Check that recursion isn't subject to order checks. */
829 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
830 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
831 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
832 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
833 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
834 if (RT_SUCCESS(rc))
835 {
836 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
837 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
838 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
839 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
840
841 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
842 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
843 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
844 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
845 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
846 }
847 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
848 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
849 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
850 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
851
852 /* Enable strict release order for class 2 and check that violations
853 are caught. */
854 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
855
856 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
857 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
858 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
859 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
860
861 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
862 if (RT_FAILURE(rc))
863 {
864 /* applies to recursions as well */
865 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
866 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
867 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
868 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
869 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
870 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
871 }
872 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
873 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
874 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
875 if (RT_FAILURE(rc))
876 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
877
878 /* Test that sub-class order works (4 = NONE, 5 = ANY, 6+ = USER). */
879 uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
880 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
881
882 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
883 if (RT_SUCCESS(rc))
884 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
885
886 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
887 if (RT_SUCCESS(rc))
888 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
889
890 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[8]), VINF_SUCCESS);
891 if (RT_SUCCESS(rc))
892 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[8]), VINF_SUCCESS);
893
894 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
895 if (RT_SUCCESS(rc))
896 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
897
898 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
899 if (RT_SUCCESS(rc))
900 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
901 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
902
903 /* Check that NONE trumps both ANY and USER. */
904 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VINF_SUCCESS);
905
906 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VERR_SEM_LV_WRONG_ORDER);
907 if (RT_SUCCESS(rc))
908 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
909
910 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
911 if (RT_SUCCESS(rc))
912 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
913
914 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
915
916 /* Take all the locks using sub-classes. */
917 if (cErrorsBefore == RTTestErrorCount(g_hTest))
918 {
919 bool fSavedQuiet = RTLockValidatorSetQuiet(true);
920 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
921 {
922 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[i]), VINF_SUCCESS);
923 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
924 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
925 }
926 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
927 {
928 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[i]), VINF_SUCCESS);
929 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
930 }
931 RTLockValidatorSetQuiet(fSavedQuiet);
932 }
933
934 /* Work up some hash statistics and trigger a violation to show them. */
935 for (uint32_t i = 0; i < 10240; i++)
936 {
937 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
938 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
939 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
940 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
941 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
942
943 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
944 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
945 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
946 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
947 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
948 }
949 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
950 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VERR_SEM_LV_WRONG_ORDER);
951 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
952
953 /* clean up */
954 //for (int i = RT_ELEMENTS(g_ahClasses) - 1; i >= 0; i--)
955 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
956 {
957 uint32_t c;
958 if (i <= 3)
959 RTTEST_CHECK_MSG(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == 5 - i,
960 (g_hTest, "c=%u i=%u\n", c, i));
961 else
962 {
963 uint32_t cExpect = 1 + (RT_ELEMENTS(g_ahClasses) - i) * 2 - 1;
964 RTTEST_CHECK(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == cExpect,
965 (g_hTest, "c=%u e=%u i=%u\n", c, cExpect, i));
966 }
967 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
968 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
969 }
970}
971
972
973static void testLo2(void)
974{
975 RTTestSub(g_hTest, "locking order, critsect");
976
977 /* Initialize the critsection with all different classes */
978 for (unsigned i = 0; i < 4; i++)
979 {
980 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo2-%u", i), VINF_SUCCESS);
981 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO"), VINF_SUCCESS);
982 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
983 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
984 }
985
986 /* Check the sub-class API.*/
987 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
988 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
989
990 /* Enter the first 4 critsects in ascending order and thereby definining
991 this as a valid lock order. */
992 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
993 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
994 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
995 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
996
997 /* Now, leave and re-enter the critsects in a way that should break the
998 order and check that we get the appropriate response. */
999 int rc;
1000 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1001 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
1002 if (RT_SUCCESS(rc))
1003 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1004
1005 /* Check that recursion isn't subject to order checks. */
1006 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1007 if (RT_SUCCESS(rc))
1008 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1009
1010 /* Enable strict release order for class 2 and check that violations
1011 are caught - including recursion. */
1012 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1013 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS); /* start recursion */
1014 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1015 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1016 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1017 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS); /* end recursion */
1018 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1019 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1020 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1021 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
1022
1023 /* clean up */
1024 for (int i = 4 - 1; i >= 0; i--)
1025 {
1026 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1027 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1028 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
1029 }
1030}
1031
1032
1033static void testLo3(void)
1034{
1035 RTTestSub(g_hTest, "locking order, read-write");
1036
1037 /* Initialize the critsection with all different classes */
1038 for (unsigned i = 0; i < 6; i++)
1039 {
1040 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo3-%u", i), VINF_SUCCESS);
1041 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "hSemRW-Lo3-%u", i), VINF_SUCCESS);
1042 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 4);
1043 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 3);
1044 }
1045
1046 /* Check the sub-class API.*/
1047 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1048 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1049
1050 /* Enter the first 4 critsects in ascending order and thereby definining
1051 this as a valid lock order. */
1052 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1053 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1054 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1055 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1056 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1057 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[5], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1058
1059 /* Now, leave and re-enter the critsects in a way that should break the
1060 order and check that we get the appropriate response. */
1061 int rc;
1062 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1063 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1064 if (RT_SUCCESS(rc))
1065 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1066
1067 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1068 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[1], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1069 if (RT_SUCCESS(rc))
1070 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1071
1072 /* Check that recursion isn't subject to order checks. */
1073 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1074 if (RT_SUCCESS(rc))
1075 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[2]), VINF_SUCCESS);
1076 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1077
1078 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1079 if (RT_SUCCESS(rc))
1080 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1081 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1082
1083 /* Enable strict release order for class 2 and 3, then check that violations
1084 are caught - including recursion. */
1085 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1086 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[3], true), VINF_SUCCESS);
1087
1088 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1089 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1090 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1091 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1092 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* (mixed) */
1093
1094 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1095 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1096 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1097 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1098 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[4]), VINF_SUCCESS);
1099 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1100 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1101 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS); /* end recursion */
1102 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1103
1104 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1105 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1106 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1107 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1108 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[5]), VINF_SUCCESS);
1109 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[4]), VINF_SUCCESS);
1110 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1111 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS);
1112
1113 /* clean up */
1114 for (int i = 6 - 1; i >= 0; i--)
1115 {
1116 uint32_t c;
1117 RTTEST_CHECK_MSG(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == 2, (g_hTest, "c=%u i=%u\n", c, i));
1118 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1119 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
1120 g_ahSemRWs[i] = NIL_RTSEMRW;
1121 }
1122}
1123
1124
1125static void testLo4(void)
1126{
1127 RTTestSub(g_hTest, "locking order, mutex");
1128
1129 /* Initialize the critsection with all different classes */
1130 for (unsigned i = 0; i < 4; i++)
1131 {
1132 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo4-%u", i), VINF_SUCCESS);
1133 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTSemMutexLo4-%u", i), VINF_SUCCESS);
1134 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
1135 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
1136 }
1137
1138 /* Check the sub-class API.*/
1139 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1140 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1141
1142 /* Enter the first 4 critsects in ascending order and thereby definining
1143 this as a valid lock order. */
1144 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1145 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1146 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1147 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1148
1149 /* Now, leave and re-enter the critsects in a way that should break the
1150 order and check that we get the appropriate response. */
1151 int rc;
1152 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1153 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1154 if (RT_SUCCESS(rc))
1155 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1156
1157 /* Check that recursion isn't subject to order checks. */
1158 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1159 if (RT_SUCCESS(rc))
1160 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1161
1162 /* Enable strict release order for class 2 and check that violations
1163 are caught - including recursion. */
1164 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1165
1166 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1167 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1168 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1169 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1170 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS); /* end recursion */
1171
1172 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1173 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1174 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1175 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS);
1176
1177 /* clean up */
1178 for (int i = 4 - 1; i >= 0; i--)
1179 {
1180 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1181 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1182 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
1183 }
1184}
1185
1186
1187
1188
1189static const char *testCheckIfLockValidationIsCompiledIn(void)
1190{
1191 RTCRITSECT CritSect;
1192 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectInit(&CritSect), "");
1193 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectEnter(&CritSect), "");
1194 bool fRet = CritSect.pValidatorRec
1195 && CritSect.pValidatorRec->hThread == RTThreadSelf();
1196 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectLeave(&CritSect), "");
1197 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectDelete(&CritSect), "");
1198 if (!fRet)
1199 return "Lock validation is not enabled for critical sections";
1200
1201 /* deadlock detection for RTSemRW */
1202 RTSEMRW hSemRW;
1203 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreateEx(&hSemRW, 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
1204 RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW-1"), false);
1205 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), "");
1206 int rc = RTSemRWRequestWrite(hSemRW, 1);
1207 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1208 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), "");
1209 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), "");
1210 if (rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
1211 return "Deadlock detection is not enabled for the read/write semaphores";
1212
1213 /* lock order for RTSemRW */
1214 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreateEx(&hSemRW, 0 /*fFlags*/,
1215 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1216 RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW-2"), "");
1217 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), "");
1218 rc = RTSemRWRequestWrite(hSemRW, 1);
1219 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1220 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), "");
1221 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), "");
1222 if (rc != VERR_SEM_LV_WRONG_ORDER)
1223 {
1224 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "%Rrc\n", rc);
1225 return "Lock order validation is not enabled for the read/write semaphores";
1226 }
1227
1228 /* lock order for RTSemMutex */
1229 RTSEMMUTEX hSemMtx1;
1230 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreateEx(&hSemMtx1, 0 /*fFlags*/,
1231 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1232 RTLOCKVAL_SUB_CLASS_NONE, "RTSemMtx-1"), "");
1233 RTSEMMUTEX hSemMtx2;
1234 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreateEx(&hSemMtx2, 0 /*fFlags*/,
1235 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1236 RTLOCKVAL_SUB_CLASS_NONE, "RTSemMtx-2"), "");
1237 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx1, 50), "");
1238 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx2, 50), "");
1239 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx2), "");
1240 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx1), "");
1241
1242 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx2, 50), "");
1243 rc = RTSemMutexRequest(hSemMtx1, 50);
1244 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1245 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx2), "");
1246 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexDestroy(hSemMtx2), ""); hSemMtx2 = NIL_RTSEMMUTEX;
1247 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexDestroy(hSemMtx1), ""); hSemMtx1 = NIL_RTSEMMUTEX;
1248 if (rc != VERR_SEM_LV_WRONG_ORDER)
1249 return "Lock order validation is not enabled for the mutex semaphores";
1250
1251 /* signaller checks on event sems. */
1252 RTSEMEVENT hSemEvt;
1253 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventCreate(&hSemEvt), "");
1254 RTSemEventSetSignaller(hSemEvt, RTThreadSelf());
1255 RTSemEventSetSignaller(hSemEvt, NIL_RTTHREAD);
1256 rc = RTSemEventSignal(hSemEvt);
1257 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1258 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventDestroy(hSemEvt), "");
1259 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1260 return "Signalling checks are not enabled for the event semaphores";
1261
1262 /* signaller checks on multiple release event sems. */
1263 RTSEMEVENTMULTI hSemEvtMulti;
1264 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiCreate(&hSemEvtMulti), "");
1265 RTSemEventMultiSetSignaller(hSemEvtMulti, RTThreadSelf());
1266 RTSemEventMultiSetSignaller(hSemEvtMulti, NIL_RTTHREAD);
1267 rc = RTSemEventMultiSignal(hSemEvtMulti);
1268 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1269 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiDestroy(hSemEvtMulti), "");
1270 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1271 return "Signalling checks are not enabled for the multiple release event semaphores";
1272
1273 /* we're good */
1274 return NULL;
1275}
1276
1277
1278int main()
1279{
1280 /*
1281 * Init.
1282 */
1283 int rc = RTTestInitAndCreate("tstRTLockValidator", &g_hTest);
1284 if (rc)
1285 return rc;
1286 RTTestBanner(g_hTest);
1287
1288 RTLockValidatorSetEnabled(true);
1289 RTLockValidatorSetMayPanic(false);
1290 RTLockValidatorSetQuiet(true);
1291 const char *pszWhyDisabled = testCheckIfLockValidationIsCompiledIn();
1292 if (pszWhyDisabled)
1293 return RTTestErrorCount(g_hTest) > 0
1294 ? RTTestSummaryAndDestroy(g_hTest)
1295 : RTTestSkipAndDestroy(g_hTest, pszWhyDisabled);
1296 RTLockValidatorSetQuiet(false);
1297
1298 bool fTestDd = true;
1299 bool fTestLo = true;
1300
1301 /*
1302 * Some initial tests with verbose output (all single pass).
1303 */
1304 if (fTestDd)
1305 {
1306 testDd1(3, 0);
1307 testDd2(1, 0);
1308 testDd2(3, 0);
1309 testDd5(3, 0);
1310 testDd6(3, 0);
1311 testDd7(3, 0);
1312 }
1313 if (fTestLo)
1314 {
1315 testLo1();
1316 testLo2();
1317 testLo3();
1318 testLo4();
1319 }
1320
1321
1322 /*
1323 * If successful, perform more thorough testing without noisy output.
1324 */
1325 if (RTTestErrorCount(g_hTest) == 0)
1326 {
1327 RTLockValidatorSetQuiet(true);
1328
1329 if (fTestDd)
1330 {
1331 testDd1( 2, SECS_SIMPLE_TEST);
1332 testDd1( 3, SECS_SIMPLE_TEST);
1333 testDd1( 7, SECS_SIMPLE_TEST);
1334 testDd1(10, SECS_SIMPLE_TEST);
1335 testDd1(15, SECS_SIMPLE_TEST);
1336 testDd1(30, SECS_SIMPLE_TEST);
1337
1338 testDd2( 1, SECS_SIMPLE_TEST);
1339 testDd2( 2, SECS_SIMPLE_TEST);
1340 testDd2( 3, SECS_SIMPLE_TEST);
1341 testDd2( 7, SECS_SIMPLE_TEST);
1342 testDd2(10, SECS_SIMPLE_TEST);
1343 testDd2(15, SECS_SIMPLE_TEST);
1344 testDd2(30, SECS_SIMPLE_TEST);
1345
1346 testDd3( 2, SECS_SIMPLE_TEST);
1347 testDd3(10, SECS_SIMPLE_TEST);
1348
1349 testDd4( 2, SECS_RACE_TEST);
1350 testDd4( 6, SECS_RACE_TEST);
1351 testDd4(10, SECS_RACE_TEST);
1352 testDd4(30, SECS_RACE_TEST);
1353
1354 testDd5( 2, SECS_RACE_TEST);
1355 testDd5( 3, SECS_RACE_TEST);
1356 testDd5( 7, SECS_RACE_TEST);
1357 testDd5(10, SECS_RACE_TEST);
1358 testDd5(15, SECS_RACE_TEST);
1359 testDd5(30, SECS_RACE_TEST);
1360
1361 testDd6( 2, SECS_SIMPLE_TEST);
1362 testDd6( 3, SECS_SIMPLE_TEST);
1363 testDd6( 7, SECS_SIMPLE_TEST);
1364 testDd6(10, SECS_SIMPLE_TEST);
1365 testDd6(15, SECS_SIMPLE_TEST);
1366 testDd6(30, SECS_SIMPLE_TEST);
1367
1368 testDd7( 2, SECS_SIMPLE_TEST);
1369 testDd7( 3, SECS_SIMPLE_TEST);
1370 testDd7( 7, SECS_SIMPLE_TEST);
1371 testDd7(10, SECS_SIMPLE_TEST);
1372 testDd7(15, SECS_SIMPLE_TEST);
1373 testDd7(30, SECS_SIMPLE_TEST);
1374 }
1375 }
1376
1377 return RTTestSummaryAndDestroy(g_hTest);
1378}
1379
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette