VirtualBox

source: vbox/trunk/src/VBox/Runtime/testcase/tstRTLockValidator.cpp@ 102792

Last change on this file since 102792 was 98103, checked in by vboxsync, 22 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 60.7 KB
Line 
1/* $Id: tstRTLockValidator.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTLockValidator.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include <iprt/lockvalidator.h>
42
43#include <iprt/asm.h> /* for return addresses */
44#include <iprt/critsect.h>
45#include <iprt/err.h>
46#include <iprt/semaphore.h>
47#include <iprt/test.h>
48#include <iprt/thread.h>
49#include <iprt/time.h>
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55#define SECS_SIMPLE_TEST 1
56#define SECS_RACE_TEST 3
57#define TEST_SMALL_TIMEOUT ( 10*1000)
58#define TEST_LARGE_TIMEOUT ( 60*1000)
59#define TEST_DEBUG_TIMEOUT (3600*1000)
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65/** The testcase handle. */
66static RTTEST g_hTest;
67/** Flip this in the debugger to get some peace to single step wild code. */
68bool volatile g_fDoNotSpin = false;
69
70/** Set when the main thread wishes to terminate the test. */
71bool volatile g_fShutdown = false;
72/** The number of threads. */
73static uint32_t g_cThreads;
74static uint32_t g_iDeadlockThread;
75static RTTHREAD g_ahThreads[32];
76static RTLOCKVALCLASS g_ahClasses[32];
77static RTCRITSECT g_aCritSects[32];
78static RTSEMRW g_ahSemRWs[32];
79static RTSEMMUTEX g_ahSemMtxes[32];
80static RTSEMEVENT g_hSemEvt;
81static RTSEMEVENTMULTI g_hSemEvtMulti;
82
83/** Multiple release event semaphore that is signalled by the main thread after
84 * it has started all the threads. */
85static RTSEMEVENTMULTI g_hThreadsStartedEvt;
86
87/** The number of threads that have called testThreadBlocking */
88static uint32_t volatile g_cThreadsBlocking;
89/** Multiple release event semaphore that is signalled by the last thread to
90 * call testThreadBlocking. testWaitForAllOtherThreadsToSleep waits on this. */
91static RTSEMEVENTMULTI g_hThreadsBlockingEvt;
92
93/** When to stop testing. */
94static uint64_t g_NanoTSStop;
95/** The number of deadlocks. */
96static uint32_t volatile g_cDeadlocks;
97/** The number of loops. */
98static uint32_t volatile g_cLoops;
99
100
101/**
102 * Spin until the callback stops returning VERR_TRY_AGAIN.
103 *
104 * @returns Callback result. VERR_TIMEOUT if too much time elapses.
105 * @param pfnCallback Callback for checking the state.
106 * @param pvWhat Callback parameter.
107 */
108static int testWaitForSomethingToBeOwned(int (*pfnCallback)(void *), void *pvWhat)
109{
110 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
111 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsStartedEvt, TEST_SMALL_TIMEOUT));
112
113 uint64_t u64StartMS = RTTimeMilliTS();
114 for (unsigned iLoop = 0; ; iLoop++)
115 {
116 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
117
118 int rc = pfnCallback(pvWhat);
119 if (rc != VERR_TRY_AGAIN/* && !g_fDoNotSpin*/)
120 {
121 RTTEST_CHECK_RC_OK(g_hTest, rc);
122 return rc;
123 }
124
125 uint64_t cMsElapsed = RTTimeMilliTS() - u64StartMS;
126 if (!g_fDoNotSpin)
127 RTTEST_CHECK_RET(g_hTest, cMsElapsed <= TEST_SMALL_TIMEOUT, VERR_TIMEOUT);
128
129 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
130 RTThreadSleep(/*g_fDoNotSpin ? TEST_DEBUG_TIMEOUT :*/ iLoop > 256 ? 1 : 0);
131 }
132}
133
134
135static int testCheckIfCritSectIsOwned(void *pvWhat)
136{
137 PRTCRITSECT pCritSect = (PRTCRITSECT)pvWhat;
138 if (!RTCritSectIsInitialized(pCritSect))
139 return VERR_SEM_DESTROYED;
140 if (RTCritSectIsOwned(pCritSect))
141 return VINF_SUCCESS;
142 return VERR_TRY_AGAIN;
143}
144
145
146static int testWaitForCritSectToBeOwned(PRTCRITSECT pCritSect)
147{
148 return testWaitForSomethingToBeOwned(testCheckIfCritSectIsOwned, pCritSect);
149}
150
151
152static int testCheckIfSemRWIsOwned(void *pvWhat)
153{
154 RTSEMRW hSemRW = (RTSEMRW)pvWhat;
155 if (RTSemRWGetWriteRecursion(hSemRW) > 0)
156 return VINF_SUCCESS;
157 if (RTSemRWGetReadCount(hSemRW) > 0)
158 return VINF_SUCCESS;
159 return VERR_TRY_AGAIN;
160}
161
162static int testWaitForSemRWToBeOwned(RTSEMRW hSemRW)
163{
164 return testWaitForSomethingToBeOwned(testCheckIfSemRWIsOwned, hSemRW);
165}
166
167
168static int testCheckIfSemMutexIsOwned(void *pvWhat)
169{
170 RTSEMMUTEX hSemRW = (RTSEMMUTEX)pvWhat;
171 if (RTSemMutexIsOwned(hSemRW))
172 return VINF_SUCCESS;
173 return VERR_TRY_AGAIN;
174}
175
176static int testWaitForSemMutexToBeOwned(RTSEMMUTEX hSemMutex)
177{
178 return testWaitForSomethingToBeOwned(testCheckIfSemMutexIsOwned, hSemMutex);
179}
180
181
182/**
183 * For reducing spin in testWaitForAllOtherThreadsToSleep.
184 */
185static void testThreadBlocking(void)
186{
187 if (ASMAtomicIncU32(&g_cThreadsBlocking) == g_cThreads)
188 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt));
189}
190
191
192/**
193 * Waits for all the other threads to enter sleeping states.
194 *
195 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
196 * @param enmDesiredState The desired thread sleep state.
197 * @param cWaitOn The distance to the lock they'll be waiting on,
198 * the lock type is derived from the desired state.
199 * UINT32_MAX means no special lock.
200 */
201static int testWaitForAllOtherThreadsToSleep(RTTHREADSTATE enmDesiredState, uint32_t cWaitOn)
202{
203 testThreadBlocking();
204 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
205 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsBlockingEvt, TEST_SMALL_TIMEOUT));
206
207 RTTHREAD hThreadSelf = RTThreadSelf();
208 for (uint32_t iOuterLoop = 0; ; iOuterLoop++)
209 {
210 uint32_t cMissing = 0;
211 uint32_t cWaitedOn = 0;
212 for (uint32_t i = 0; i < g_cThreads; i++)
213 {
214 RTTHREAD hThread = g_ahThreads[i];
215 if (hThread == NIL_RTTHREAD)
216 cMissing++;
217 else if (hThread != hThreadSelf)
218 {
219 /*
220 * Figure out which lock to wait for.
221 */
222 void *pvLock = NULL;
223 if (cWaitOn != UINT32_MAX)
224 {
225 uint32_t j = (i + cWaitOn) % g_cThreads;
226 switch (enmDesiredState)
227 {
228 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break;
229 case RTTHREADSTATE_RW_WRITE:
230 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break;
231 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break;
232 default: break;
233 }
234 }
235
236 /*
237 * Wait for this thread.
238 */
239 for (unsigned iLoop = 0; ; iLoop++)
240 {
241 RTTHREADSTATE enmState = RTThreadGetReallySleeping(hThread);
242 if (RTTHREAD_IS_SLEEPING(enmState))
243 {
244 if ( enmState == enmDesiredState
245 && ( !pvLock
246 || ( pvLock == RTLockValidatorQueryBlocking(hThread)
247 && !RTLockValidatorIsBlockedThreadInValidator(hThread) )
248 )
249 && RTThreadGetNativeState(hThread) != RTTHREADNATIVESTATE_RUNNING
250 )
251 break;
252 }
253 else if ( enmState != RTTHREADSTATE_RUNNING
254 && enmState != RTTHREADSTATE_INITIALIZING)
255 return VERR_INTERNAL_ERROR;
256 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
257 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop + iLoop > 256 ? 1 : 0);
258 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
259 cWaitedOn++;
260 }
261 }
262 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
263 }
264
265 if (!cMissing && !cWaitedOn)
266 break;
267 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
268 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop > 256 ? 1 : 0);
269 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
270 }
271
272 RTThreadSleep(0); /* fudge factor */
273 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
274 return VINF_SUCCESS;
275}
276
277
278/**
279 * Worker that starts the threads.
280 *
281 * @returns Same as RTThreadCreate.
282 * @param cThreads The number of threads to start.
283 * @param pfnThread Thread function.
284 */
285static int testStartThreads(uint32_t cThreads, PFNRTTHREAD pfnThread)
286{
287 RTSemEventMultiReset(g_hThreadsStartedEvt);
288
289 for (uint32_t i = 0; i < RT_ELEMENTS(g_ahThreads); i++)
290 g_ahThreads[i] = NIL_RTTHREAD;
291
292 int rc = VINF_SUCCESS;
293 for (uint32_t i = 0; i < cThreads; i++)
294 {
295 rc = RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0,
296 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i);
297 RTTEST_CHECK_RC_OK(g_hTest, rc);
298 if (RT_FAILURE(rc))
299 break;
300 }
301
302 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), rcCheck);
303 return rc;
304}
305
306
307/**
308 * Worker that waits for the threads to complete.
309 *
310 * @param cMillies How long to wait for each.
311 * @param fStopOnError Whether to stop on error and heed the thread
312 * return status.
313 */
314static void testWaitForThreads(uint32_t cMillies, bool fStopOnError)
315{
316 uint32_t i = RT_ELEMENTS(g_ahThreads);
317 while (i-- > 0)
318 if (g_ahThreads[i] != NIL_RTTHREAD)
319 {
320 int rcThread;
321 int rc2;
322 RTTEST_CHECK_RC_OK(g_hTest, rc2 = RTThreadWait(g_ahThreads[i], cMillies, &rcThread));
323 if (RT_SUCCESS(rc2))
324 g_ahThreads[i] = NIL_RTTHREAD;
325 if (fStopOnError && (RT_FAILURE(rc2) || RT_FAILURE(rcThread)))
326 return;
327 }
328}
329
330
331static void testIt(uint32_t cThreads, uint32_t cSecs, bool fLoops, PFNRTTHREAD pfnThread, const char *pszName)
332{
333 /*
334 * Init test.
335 */
336 if (cSecs > 0)
337 RTTestSubF(g_hTest, "%s, %u threads, %u secs", pszName, cThreads, cSecs);
338 else
339 RTTestSubF(g_hTest, "%s, %u threads, single pass", pszName, cThreads);
340
341 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_ahThreads) >= cThreads);
342 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_aCritSects) >= cThreads);
343
344 g_cThreads = cThreads;
345 g_fShutdown = false;
346
347 for (uint32_t i = 0; i < cThreads; i++)
348 {
349 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
350 RTLOCKVAL_SUB_CLASS_ANY, "RTCritSect"), VINF_SUCCESS);
351 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
352 RTLOCKVAL_SUB_CLASS_ANY, "RTSemRW"), VINF_SUCCESS);
353 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
354 RTLOCKVAL_SUB_CLASS_ANY, "RTSemMutex"), VINF_SUCCESS);
355 }
356 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventCreate(&g_hSemEvt), VINF_SUCCESS);
357 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hSemEvtMulti), VINF_SUCCESS);
358 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsStartedEvt), VINF_SUCCESS);
359 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsBlockingEvt), VINF_SUCCESS);
360
361 /*
362 * The test loop.
363 */
364 uint32_t cPasses = 0;
365 uint32_t cLoops = 0;
366 uint32_t cDeadlocks = 0;
367 uint32_t cErrors = RTTestErrorCount(g_hTest);
368 uint64_t uStartNS = RTTimeNanoTS();
369 g_NanoTSStop = uStartNS + cSecs * UINT64_C(1000000000);
370 do
371 {
372 g_iDeadlockThread = (cThreads - 1 + cPasses) % cThreads;
373 g_cLoops = 0;
374 g_cDeadlocks = 0;
375 g_cThreadsBlocking = 0;
376 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hThreadsBlockingEvt), VINF_SUCCESS);
377
378 int rc = testStartThreads(cThreads, pfnThread);
379 if (RT_SUCCESS(rc))
380 {
381 testWaitForThreads(TEST_LARGE_TIMEOUT + cSecs*1000, true);
382 if (g_fDoNotSpin && RTTestErrorCount(g_hTest) != cErrors)
383 testWaitForThreads(TEST_DEBUG_TIMEOUT, true);
384 }
385
386 RTTEST_CHECK(g_hTest, !fLoops || g_cLoops > 0);
387 cLoops += g_cLoops;
388 RTTEST_CHECK(g_hTest, !fLoops || g_cDeadlocks > 0);
389 cDeadlocks += g_cDeadlocks;
390 cPasses++;
391 } while ( RTTestErrorCount(g_hTest) == cErrors
392 && !fLoops
393 && RTTimeNanoTS() < g_NanoTSStop);
394
395 /*
396 * Cleanup.
397 */
398 ASMAtomicWriteBool(&g_fShutdown, true);
399 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt), VINF_SUCCESS);
400 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), VINF_SUCCESS);
401 RTThreadSleep(RTTestErrorCount(g_hTest) == cErrors ? 0 : 50);
402
403 for (uint32_t i = 0; i < cThreads; i++)
404 {
405 RTTEST_CHECK_RC(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
406 RTTEST_CHECK_RC(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
407 RTTEST_CHECK_RC(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
408 }
409 RTTEST_CHECK_RC(g_hTest, RTSemEventDestroy(g_hSemEvt), VINF_SUCCESS);
410 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hSemEvtMulti), VINF_SUCCESS);
411 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsStartedEvt), VINF_SUCCESS);
412 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsBlockingEvt), VINF_SUCCESS);
413
414 testWaitForThreads(TEST_SMALL_TIMEOUT, false);
415
416 /*
417 * Print results if applicable.
418 */
419 if (cSecs)
420 {
421 if (fLoops)
422 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cLoops=%u cDeadlocks=%u (%u%%)\n",
423 cLoops, cDeadlocks, cLoops ? cDeadlocks * 100 / cLoops : 0);
424 else
425 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cPasses=%u\n", cPasses);
426 }
427}
428
429
430static DECLCALLBACK(int) testDd1Thread(RTTHREAD ThreadSelf, void *pvUser)
431{
432 uintptr_t i = (uintptr_t)pvUser;
433 PRTCRITSECT pMine = &g_aCritSects[i];
434 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
435 RT_NOREF_PV(ThreadSelf);
436
437 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
438 if (!(i & 1))
439 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
440 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
441 {
442 int rc;
443 if (i != g_iDeadlockThread)
444 {
445 testThreadBlocking();
446 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
447 }
448 else
449 {
450 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
451 if (RT_SUCCESS(rc))
452 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VERR_SEM_LV_DEADLOCK);
453 }
454 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
455 if (RT_SUCCESS(rc))
456 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
457 }
458 if (!(i & 1))
459 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
460 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
461 return VINF_SUCCESS;
462}
463
464
465static void testDd1(uint32_t cThreads, uint32_t cSecs)
466{
467 testIt(cThreads, cSecs, false, testDd1Thread, "deadlock, critsect");
468}
469
470
471static DECLCALLBACK(int) testDd2Thread(RTTHREAD ThreadSelf, void *pvUser)
472{
473 uintptr_t i = (uintptr_t)pvUser;
474 RTSEMRW hMine = g_ahSemRWs[i];
475 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
476 int rc;
477 RT_NOREF_PV(ThreadSelf);
478
479 if (i & 1)
480 {
481 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
482 if ((i & 3) == 3)
483 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
484 }
485 else
486 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
487 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
488 {
489 if (i != g_iDeadlockThread)
490 {
491 testThreadBlocking();
492 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
493 }
494 else
495 {
496 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_RW_WRITE, 1));
497 if (RT_SUCCESS(rc))
498 {
499 if (g_cThreads > 1)
500 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
501 else
502 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_ILLEGAL_UPGRADE);
503 }
504 }
505 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
506 if (RT_SUCCESS(rc))
507 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
508 }
509 if (i & 1)
510 {
511 if ((i & 3) == 3)
512 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
513 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
514 }
515 else
516 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
517 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
518 return VINF_SUCCESS;
519}
520
521
522static void testDd2(uint32_t cThreads, uint32_t cSecs)
523{
524 testIt(cThreads, cSecs, false, testDd2Thread, "deadlock, read-write");
525}
526
527
528static DECLCALLBACK(int) testDd3Thread(RTTHREAD ThreadSelf, void *pvUser)
529{
530 uintptr_t i = (uintptr_t)pvUser;
531 RTSEMRW hMine = g_ahSemRWs[i];
532 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
533 int rc;
534 RT_NOREF_PV(ThreadSelf);
535
536 if (i & 1)
537 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
538 else
539 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
540 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
541 {
542 do
543 {
544 rc = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
545 if (rc != VINF_SUCCESS && rc != VERR_SEM_LV_DEADLOCK && rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
546 {
547 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc);
548 break;
549 }
550 if (RT_SUCCESS(rc))
551 {
552 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
553 if (RT_FAILURE(rc))
554 break;
555 }
556 else
557 ASMAtomicIncU32(&g_cDeadlocks);
558 ASMAtomicIncU32(&g_cLoops);
559 } while (RTTimeNanoTS() < g_NanoTSStop);
560 }
561 if (i & 1)
562 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
563 else
564 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
565 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
566 return VINF_SUCCESS;
567}
568
569
570static void testDd3(uint32_t cThreads, uint32_t cSecs)
571{
572 testIt(cThreads, cSecs, true, testDd3Thread, "deadlock, read-write race");
573}
574
575
576static DECLCALLBACK(int) testDd4Thread(RTTHREAD ThreadSelf, void *pvUser)
577{
578 uintptr_t i = (uintptr_t)pvUser;
579 RTSEMRW hMine = g_ahSemRWs[i];
580 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
581 RT_NOREF_PV(ThreadSelf);
582
583 do
584 {
585 int rc1 = (i & 1 ? RTSemRWRequestWrite : RTSemRWRequestRead)(hMine, TEST_SMALL_TIMEOUT); /* ugly ;-) */
586 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
587 if (rc1 != VINF_SUCCESS && rc1 != VERR_SEM_LV_DEADLOCK && rc1 != VERR_SEM_LV_ILLEGAL_UPGRADE)
588 {
589 RTTestFailed(g_hTest, "#%u: RTSemRWRequest%s(hMine,) -> %Rrc\n", i, i & 1 ? "Write" : "read", rc1);
590 break;
591 }
592 if (RT_SUCCESS(rc1))
593 {
594 for (unsigned iInner = 0; iInner < 4; iInner++)
595 {
596 int rc2 = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
597 if (rc2 != VINF_SUCCESS && rc2 != VERR_SEM_LV_DEADLOCK && rc2 != VERR_SEM_LV_ILLEGAL_UPGRADE)
598 {
599 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc2);
600 break;
601 }
602 if (RT_SUCCESS(rc2))
603 {
604 RTTEST_CHECK_RC(g_hTest, rc2 = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
605 if (RT_FAILURE(rc2))
606 break;
607 }
608 else
609 ASMAtomicIncU32(&g_cDeadlocks);
610 ASMAtomicIncU32(&g_cLoops);
611 }
612
613 RTTEST_CHECK_RC(g_hTest, rc1 = (i & 1 ? RTSemRWReleaseWrite : RTSemRWReleaseRead)(hMine), VINF_SUCCESS);
614 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
615 if (RT_FAILURE(rc1))
616 break;
617 }
618 else
619 ASMAtomicIncU32(&g_cDeadlocks);
620 ASMAtomicIncU32(&g_cLoops);
621 } while (RTTimeNanoTS() < g_NanoTSStop);
622
623 return VINF_SUCCESS;
624}
625
626
627static void testDd4(uint32_t cThreads, uint32_t cSecs)
628{
629 testIt(cThreads, cSecs, true, testDd4Thread, "deadlock, read-write race v2");
630}
631
632
633static DECLCALLBACK(int) testDd5Thread(RTTHREAD ThreadSelf, void *pvUser)
634{
635 uintptr_t i = (uintptr_t)pvUser;
636 RTSEMMUTEX hMine = g_ahSemMtxes[i];
637 RTSEMMUTEX hNext = g_ahSemMtxes[(i + 1) % g_cThreads];
638 RT_NOREF_PV(ThreadSelf);
639
640 RTTEST_CHECK_RC_RET(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
641 if (i & 1)
642 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
643 if (RT_SUCCESS(testWaitForSemMutexToBeOwned(hNext)))
644 {
645 int rc;
646 if (i != g_iDeadlockThread)
647 {
648 testThreadBlocking();
649 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
650 }
651 else
652 {
653 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_MUTEX, 1));
654 if (RT_SUCCESS(rc))
655 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
656 }
657 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
658 if (RT_SUCCESS(rc))
659 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRelease(hNext), VINF_SUCCESS);
660 }
661 if (i & 1)
662 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
663 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
664 return VINF_SUCCESS;
665}
666
667
668static void testDd5(uint32_t cThreads, uint32_t cSecs)
669{
670 testIt(cThreads, cSecs, false, testDd5Thread, "deadlock, mutex");
671}
672
673
674static DECLCALLBACK(int) testDd6Thread(RTTHREAD ThreadSelf, void *pvUser)
675{
676 uintptr_t i = (uintptr_t)pvUser;
677 PRTCRITSECT pMine = &g_aCritSects[i];
678 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
679 RT_NOREF_PV(ThreadSelf);
680
681 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
682 if (i & 1)
683 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
684 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
685 {
686 int rc;
687 if (i != g_iDeadlockThread)
688 {
689 testThreadBlocking();
690 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
691 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
692 if (RT_SUCCESS(rc))
693 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
694 }
695 else
696 {
697 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
698 if (RT_SUCCESS(rc))
699 {
700 RTSemEventSetSignaller(g_hSemEvt, g_ahThreads[0]);
701 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
702 RTSemEventAddSignaller(g_hSemEvt, g_ahThreads[iThread]);
703 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
704 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
705 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
706 RTTEST_CHECK_RC(g_hTest, RTSemEventSignal(g_hSemEvt), VINF_SUCCESS);
707 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
708 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
709 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
710 RTSemEventSetSignaller(g_hSemEvt, NIL_RTTHREAD);
711 }
712 }
713 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
714 }
715 if (i & 1)
716 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
717 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
718 return VINF_SUCCESS;
719}
720
721
722static void testDd6(uint32_t cThreads, uint32_t cSecs)
723{
724 testIt(cThreads, cSecs, false, testDd6Thread, "deadlock, event");
725}
726
727
728static DECLCALLBACK(int) testDd7Thread(RTTHREAD ThreadSelf, void *pvUser)
729{
730 uintptr_t i = (uintptr_t)pvUser;
731 PRTCRITSECT pMine = &g_aCritSects[i];
732 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
733 RT_NOREF_PV(ThreadSelf);
734
735 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
736 if (i & 1)
737 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
738 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
739 {
740 int rc;
741 if (i != g_iDeadlockThread)
742 {
743 testThreadBlocking();
744 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
745 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
746 if (RT_SUCCESS(rc))
747 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
748 }
749 else
750 {
751 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
752 if (RT_SUCCESS(rc))
753 {
754 RTSemEventMultiSetSignaller(g_hSemEvtMulti, g_ahThreads[0]);
755 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
756 RTSemEventMultiAddSignaller(g_hSemEvtMulti, g_ahThreads[iThread]);
757 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
758 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hSemEvtMulti), VINF_SUCCESS);
759 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
760 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
761 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hSemEvtMulti), VINF_SUCCESS);
762 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
763 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
764 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
765 RTSemEventMultiSetSignaller(g_hSemEvtMulti, NIL_RTTHREAD);
766 }
767 }
768 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
769 }
770 if (i & 1)
771 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
772 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
773 return VINF_SUCCESS;
774}
775
776
777static void testDd7(uint32_t cThreads, uint32_t cSecs)
778{
779 testIt(cThreads, cSecs, false, testDd7Thread, "deadlock, event multi");
780}
781
782
783static void testLo1(void)
784{
785 RTTestSub(g_hTest, "locking order basics");
786
787 /* Initialize the critsections, the first 4 has their own classes, the rest
788 use the same class and relies on the sub-class mechanism for ordering. */
789 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
790 {
791 if (i <= 3)
792 {
793 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo1-%u", i), VINF_SUCCESS);
794 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-Auto"), VINF_SUCCESS);
795 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
796 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
797 }
798 else
799 {
800 g_ahClasses[i] = RTLockValidatorClassForSrcPos(RT_SRC_POS, "testLo1-%u", i);
801 RTTEST_CHECK_RETV(g_hTest, g_ahClasses[i] != NIL_RTLOCKVALCLASS);
802 RTTEST_CHECK_RETV(g_hTest, i == 4 || g_ahClasses[i] == g_ahClasses[i - 1]);
803 if (i == 4)
804 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-None"), VINF_SUCCESS);
805 else if (i == 5)
806 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_ANY, "RTCritSectLO-Any"), VINF_SUCCESS);
807 else
808 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_USER + i, "RTCritSectLO-User"), VINF_SUCCESS);
809
810 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 1 + (i - 4 + 1) * 2); /* released in cleanup. */
811 }
812 }
813
814 /* Enter the first 4 critsects in ascending order and thereby defining
815 this as a valid lock order. */
816 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
817 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
818 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
819 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
820
821 /* Now, leave and re-enter the critsects in a way that should break the
822 order and check that we get the appropriate response. */
823 int rc;
824 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
825 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
826 if (RT_SUCCESS(rc))
827 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
828
829 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
830 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VERR_SEM_LV_WRONG_ORDER);
831 if (RT_SUCCESS(rc))
832 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
833
834 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
835 RTTEST_CHECK_RC(g_hTest, rc= RTCritSectEnter(&g_aCritSects[2]), VERR_SEM_LV_WRONG_ORDER);
836 if (RT_SUCCESS(rc))
837 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
838
839 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
840
841 /* Check that recursion isn't subject to order checks. */
842 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
843 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
844 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
845 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
846 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
847 if (RT_SUCCESS(rc))
848 {
849 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
850 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
851 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
852 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
853
854 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
855 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
856 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
857 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
858 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
859 }
860 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
861 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
862 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
863 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
864
865 /* Enable strict release order for class 2 and check that violations
866 are caught. */
867 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
868
869 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
870 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
871 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
872 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
873
874 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
875 if (RT_FAILURE(rc))
876 {
877 /* applies to recursions as well */
878 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
879 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
880 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
881 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
882 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
883 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
884 }
885 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
886 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
887 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
888 if (RT_FAILURE(rc))
889 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
890
891 /* Test that sub-class order works (4 = NONE, 5 = ANY, 6+ = USER). */
892 uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
893 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
894
895 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
896 if (RT_SUCCESS(rc))
897 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
898
899 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
900 if (RT_SUCCESS(rc))
901 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
902
903 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[8]), VINF_SUCCESS);
904 if (RT_SUCCESS(rc))
905 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[8]), VINF_SUCCESS);
906
907 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
908 if (RT_SUCCESS(rc))
909 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
910
911 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
912 if (RT_SUCCESS(rc))
913 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
914 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
915
916 /* Check that NONE trumps both ANY and USER. */
917 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VINF_SUCCESS);
918
919 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VERR_SEM_LV_WRONG_ORDER);
920 if (RT_SUCCESS(rc))
921 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
922
923 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
924 if (RT_SUCCESS(rc))
925 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
926
927 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
928
929 /* Take all the locks using sub-classes. */
930 if (cErrorsBefore == RTTestErrorCount(g_hTest))
931 {
932 bool fSavedQuiet = RTLockValidatorSetQuiet(true);
933 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
934 {
935 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[i]), VINF_SUCCESS);
936 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
937 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
938 }
939 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
940 {
941 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[i]), VINF_SUCCESS);
942 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
943 }
944 RTLockValidatorSetQuiet(fSavedQuiet);
945 }
946
947 /* Work up some hash statistics and trigger a violation to show them. */
948 for (uint32_t i = 0; i < 10240; i++)
949 {
950 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
951 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
952 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
953 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
954 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
955
956 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
957 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
958 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
959 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
960 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
961 }
962 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
963 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VERR_SEM_LV_WRONG_ORDER);
964 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
965
966 /* clean up */
967 //for (int i = RT_ELEMENTS(g_ahClasses) - 1; i >= 0; i--)
968 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
969 {
970 uint32_t c;
971 if (i <= 3)
972 RTTEST_CHECK_MSG(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == 5 - i,
973 (g_hTest, "c=%u i=%u\n", c, i));
974 else
975 {
976 uint32_t cExpect = 1 + (RT_ELEMENTS(g_ahClasses) - i) * 2 - 1;
977 RTTEST_CHECK_MSG(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == cExpect,
978 (g_hTest, "c=%u e=%u i=%u\n", c, cExpect, i));
979 }
980 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
981 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
982 }
983}
984
985
986static void testLo2(void)
987{
988 RTTestSub(g_hTest, "locking order, critsect");
989
990 /* Initialize the critsection with all different classes */
991 for (unsigned i = 0; i < 4; i++)
992 {
993 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo2-%u", i), VINF_SUCCESS);
994 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO"), VINF_SUCCESS);
995 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
996 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
997 }
998
999 /* Check the sub-class API.*/
1000 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1001 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1002
1003 /* Enter the first 4 critsects in ascending order and thereby defining
1004 this as a valid lock order. */
1005 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
1006 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1007 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
1008 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1009
1010 /* Now, leave and re-enter the critsects in a way that should break the
1011 order and check that we get the appropriate response. */
1012 int rc;
1013 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1014 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
1015 if (RT_SUCCESS(rc))
1016 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1017
1018 /* Check that recursion isn't subject to order checks. */
1019 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1020 if (RT_SUCCESS(rc))
1021 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1022
1023 /* Enable strict release order for class 2 and check that violations
1024 are caught - including recursion. */
1025 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1026 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS); /* start recursion */
1027 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1028 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1029 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1030 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS); /* end recursion */
1031 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1032 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1033 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1034 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
1035
1036 /* clean up */
1037 for (int i = 4 - 1; i >= 0; i--)
1038 {
1039 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1040 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1041 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
1042 }
1043}
1044
1045
1046static void testLo3(void)
1047{
1048 RTTestSub(g_hTest, "locking order, read-write");
1049
1050 /* Initialize the critsection with all different classes */
1051 for (unsigned i = 0; i < 6; i++)
1052 {
1053 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo3-%u", i), VINF_SUCCESS);
1054 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "hSemRW-Lo3-%u", i), VINF_SUCCESS);
1055 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 4);
1056 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 3);
1057 }
1058
1059 /* Check the sub-class API.*/
1060 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1061 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1062
1063 /* Enter the first 4 critsects in ascending order and thereby defining
1064 this as a valid lock order. */
1065 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1066 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1067 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1068 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1069 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1070 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[5], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1071
1072 /* Now, leave and re-enter the critsects in a way that should break the
1073 order and check that we get the appropriate response. */
1074 int rc;
1075 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1076 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1077 if (RT_SUCCESS(rc))
1078 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1079
1080 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1081 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[1], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1082 if (RT_SUCCESS(rc))
1083 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1084
1085 /* Check that recursion isn't subject to order checks. */
1086 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1087 if (RT_SUCCESS(rc))
1088 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[2]), VINF_SUCCESS);
1089 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1090
1091 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1092 if (RT_SUCCESS(rc))
1093 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1094 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1095
1096 /* Enable strict release order for class 2 and 3, then check that violations
1097 are caught - including recursion. */
1098 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1099 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[3], true), VINF_SUCCESS);
1100
1101 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1102 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1103 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1104 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1105 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* (mixed) */
1106
1107 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1108 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1109 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1110 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1111 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[4]), VINF_SUCCESS);
1112 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1113 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1114 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS); /* end recursion */
1115 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1116
1117 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1118 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1119 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1120 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1121 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[5]), VINF_SUCCESS);
1122 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[4]), VINF_SUCCESS);
1123 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1124 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS);
1125
1126 /* clean up */
1127 for (int i = 6 - 1; i >= 0; i--)
1128 {
1129 uint32_t c;
1130 RTTEST_CHECK_MSG(g_hTest, (c = RTLockValidatorClassRelease(g_ahClasses[i])) == 2, (g_hTest, "c=%u i=%u\n", c, i));
1131 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1132 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
1133 g_ahSemRWs[i] = NIL_RTSEMRW;
1134 }
1135}
1136
1137
1138static void testLo4(void)
1139{
1140 RTTestSub(g_hTest, "locking order, mutex");
1141
1142 /* Initialize the critsection with all different classes */
1143 for (unsigned i = 0; i < 4; i++)
1144 {
1145 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo4-%u", i), VINF_SUCCESS);
1146 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTSemMutexLo4-%u", i), VINF_SUCCESS);
1147 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
1148 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
1149 }
1150
1151 /* Check the sub-class API.*/
1152 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1153 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1154
1155 /* Enter the first 4 critsects in ascending order and thereby defining
1156 this as a valid lock order. */
1157 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1158 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1159 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1160 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1161
1162 /* Now, leave and re-enter the critsects in a way that should break the
1163 order and check that we get the appropriate response. */
1164 int rc;
1165 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1166 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1167 if (RT_SUCCESS(rc))
1168 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1169
1170 /* Check that recursion isn't subject to order checks. */
1171 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1172 if (RT_SUCCESS(rc))
1173 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1174
1175 /* Enable strict release order for class 2 and check that violations
1176 are caught - including recursion. */
1177 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1178
1179 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1180 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1181 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1182 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1183 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS); /* end recursion */
1184
1185 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1186 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1187 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1188 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS);
1189
1190 /* clean up */
1191 for (int i = 4 - 1; i >= 0; i--)
1192 {
1193 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1194 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1195 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
1196 }
1197}
1198
1199
1200
1201
1202static const char *testCheckIfLockValidationIsCompiledIn(void)
1203{
1204 RTCRITSECT CritSect;
1205 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectInit(&CritSect), "");
1206 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectEnter(&CritSect), "");
1207 bool fRet = CritSect.pValidatorRec
1208 && CritSect.pValidatorRec->hThread == RTThreadSelf();
1209 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectLeave(&CritSect), "");
1210 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectDelete(&CritSect), "");
1211 if (!fRet)
1212 return "Lock validation is not enabled for critical sections";
1213
1214 /* deadlock detection for RTSemRW */
1215 RTSEMRW hSemRW;
1216 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreateEx(&hSemRW, 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
1217 RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW-1"), NULL);
1218 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), "");
1219 int rc = RTSemRWRequestWrite(hSemRW, 1);
1220 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1221 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), "");
1222 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), "");
1223 if (rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
1224 return "Deadlock detection is not enabled for the read/write semaphores";
1225
1226 /* lock order for RTSemRW */
1227 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreateEx(&hSemRW, 0 /*fFlags*/,
1228 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1229 RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW-2"), "");
1230 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), "");
1231 rc = RTSemRWRequestWrite(hSemRW, 1);
1232 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1233 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), "");
1234 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), "");
1235 if (rc != VERR_SEM_LV_WRONG_ORDER)
1236 {
1237 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "%Rrc\n", rc);
1238 return "Lock order validation is not enabled for the read/write semaphores";
1239 }
1240
1241 /* lock order for RTSemMutex */
1242 RTSEMMUTEX hSemMtx1;
1243 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreateEx(&hSemMtx1, 0 /*fFlags*/,
1244 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1245 RTLOCKVAL_SUB_CLASS_NONE, "RTSemMtx-1"), "");
1246 RTSEMMUTEX hSemMtx2;
1247 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreateEx(&hSemMtx2, 0 /*fFlags*/,
1248 RTLockValidatorClassCreateUnique(RT_SRC_POS, NULL),
1249 RTLOCKVAL_SUB_CLASS_NONE, "RTSemMtx-2"), "");
1250 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx1, 50), "");
1251 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx2, 50), "");
1252 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx2), "");
1253 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx1), "");
1254
1255 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemMtx2, 50), "");
1256 rc = RTSemMutexRequest(hSemMtx1, 50);
1257 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1258 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRelease(hSemMtx2), "");
1259 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexDestroy(hSemMtx2), ""); hSemMtx2 = NIL_RTSEMMUTEX;
1260 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexDestroy(hSemMtx1), ""); hSemMtx1 = NIL_RTSEMMUTEX;
1261 if (rc != VERR_SEM_LV_WRONG_ORDER)
1262 return "Lock order validation is not enabled for the mutex semaphores";
1263
1264 /* signaller checks on event sems. */
1265 RTSEMEVENT hSemEvt;
1266 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventCreate(&hSemEvt), "");
1267 RTSemEventSetSignaller(hSemEvt, RTThreadSelf());
1268 RTSemEventSetSignaller(hSemEvt, NIL_RTTHREAD);
1269 rc = RTSemEventSignal(hSemEvt);
1270 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1271 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventDestroy(hSemEvt), "");
1272 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1273 return "Signalling checks are not enabled for the event semaphores";
1274
1275 /* signaller checks on multiple release event sems. */
1276 RTSEMEVENTMULTI hSemEvtMulti;
1277 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiCreate(&hSemEvtMulti), "");
1278 RTSemEventMultiSetSignaller(hSemEvtMulti, RTThreadSelf());
1279 RTSemEventMultiSetSignaller(hSemEvtMulti, NIL_RTTHREAD);
1280 rc = RTSemEventMultiSignal(hSemEvtMulti);
1281 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), "");
1282 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiDestroy(hSemEvtMulti), "");
1283 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1284 return "Signalling checks are not enabled for the multiple release event semaphores";
1285
1286 /* we're good */
1287 return NULL;
1288}
1289
1290
1291int main()
1292{
1293 /*
1294 * Init.
1295 */
1296 int rc = RTTestInitAndCreate("tstRTLockValidator", &g_hTest);
1297 if (rc)
1298 return rc;
1299 RTTestBanner(g_hTest);
1300
1301 RTLockValidatorSetEnabled(true);
1302 RTLockValidatorSetMayPanic(false);
1303 RTLockValidatorSetQuiet(true);
1304 const char *pszWhyDisabled = testCheckIfLockValidationIsCompiledIn();
1305 if (pszWhyDisabled)
1306 return RTTestErrorCount(g_hTest) > 0
1307 ? RTTestSummaryAndDestroy(g_hTest)
1308 : RTTestSkipAndDestroy(g_hTest, pszWhyDisabled);
1309 RTLockValidatorSetQuiet(false);
1310
1311 bool fTestDd = true;
1312 bool fTestLo = true;
1313
1314 /*
1315 * Some initial tests with verbose output (all single pass).
1316 */
1317 if (fTestDd)
1318 {
1319 testDd1(3, 0);
1320 testDd2(1, 0);
1321 testDd2(3, 0);
1322 testDd5(3, 0);
1323 testDd6(3, 0);
1324 testDd7(3, 0);
1325 }
1326 if (fTestLo)
1327 {
1328 testLo1();
1329 testLo2();
1330 testLo3();
1331 testLo4();
1332 }
1333
1334
1335 /*
1336 * If successful, perform more thorough testing without noisy output.
1337 */
1338 if (RTTestErrorCount(g_hTest) == 0)
1339 {
1340 RTLockValidatorSetQuiet(true);
1341
1342 if (fTestDd)
1343 {
1344 testDd1( 2, SECS_SIMPLE_TEST);
1345 testDd1( 3, SECS_SIMPLE_TEST);
1346 testDd1( 7, SECS_SIMPLE_TEST);
1347 testDd1(10, SECS_SIMPLE_TEST);
1348 testDd1(15, SECS_SIMPLE_TEST);
1349 testDd1(30, SECS_SIMPLE_TEST);
1350
1351 testDd2( 1, SECS_SIMPLE_TEST);
1352 testDd2( 2, SECS_SIMPLE_TEST);
1353 testDd2( 3, SECS_SIMPLE_TEST);
1354 testDd2( 7, SECS_SIMPLE_TEST);
1355 testDd2(10, SECS_SIMPLE_TEST);
1356 testDd2(15, SECS_SIMPLE_TEST);
1357 testDd2(30, SECS_SIMPLE_TEST);
1358
1359 testDd3( 2, SECS_SIMPLE_TEST);
1360 testDd3(10, SECS_SIMPLE_TEST);
1361
1362 testDd4( 2, SECS_RACE_TEST);
1363 testDd4( 6, SECS_RACE_TEST);
1364 testDd4(10, SECS_RACE_TEST);
1365 testDd4(30, SECS_RACE_TEST);
1366
1367 testDd5( 2, SECS_RACE_TEST);
1368 testDd5( 3, SECS_RACE_TEST);
1369 testDd5( 7, SECS_RACE_TEST);
1370 testDd5(10, SECS_RACE_TEST);
1371 testDd5(15, SECS_RACE_TEST);
1372 testDd5(30, SECS_RACE_TEST);
1373
1374 testDd6( 2, SECS_SIMPLE_TEST);
1375 testDd6( 3, SECS_SIMPLE_TEST);
1376 testDd6( 7, SECS_SIMPLE_TEST);
1377 testDd6(10, SECS_SIMPLE_TEST);
1378 testDd6(15, SECS_SIMPLE_TEST);
1379 testDd6(30, SECS_SIMPLE_TEST);
1380
1381 testDd7( 2, SECS_SIMPLE_TEST);
1382 testDd7( 3, SECS_SIMPLE_TEST);
1383 testDd7( 7, SECS_SIMPLE_TEST);
1384 testDd7(10, SECS_SIMPLE_TEST);
1385 testDd7(15, SECS_SIMPLE_TEST);
1386 testDd7(30, SECS_SIMPLE_TEST);
1387 }
1388 }
1389
1390 return RTTestSummaryAndDestroy(g_hTest);
1391}
1392
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette