VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsect-generic.cpp@ 59036

Last change on this file since 59036 was 59036, checked in by vboxsync, 9 years ago

IPRT,VMMR0: Added trace points to both the IPRT critical section types.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.1 KB
Line 
1/* $Id: critsect-generic.cpp 59036 2015-12-07 17:49:30Z vboxsync $ */
2/** @file
3 * IPRT - Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTCRITSECT_WITHOUT_REMAPPING
32#include <iprt/critsect.h>
33#include "internal/iprt.h"
34
35#include <iprt/semaphore.h>
36#include <iprt/thread.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/err.h>
40#include "internal/thread.h"
41#include "internal/strict.h"
42
43/* Two issues here, (1) the tracepoint generator uses IPRT, and (2) only one .d
44 file per module. */
45#ifdef IPRT_WITH_DTRACE
46# include IPRT_DTRACE_INCLUDE
47# define IPRT_CRITSECT_ENTERED RT_CONCAT(IPRT_WITH_DTRACE,IPRT_CRITSECT_ENTERED)
48# define IPRT_CRITSECT_LEAVING RT_CONCAT(IPRT_WITH_DTRACE,IPRT_CRITSECT_LEAVING)
49# define IPRT_CRITSECT_BUSY RT_CONCAT(IPRT_WITH_DTRACE,IPRT_CRITSECT_BUSY)
50# define IPRT_CRITSECT_WAITING RT_CONCAT(IPRT_WITH_DTRACE,IPRT_CRITSECT_WAITING)
51#else
52# define IPRT_CRITSECT_ENTERED(a_pvCritSect, a_pszName, a_cLockers, a_cNestings) do {} while (0)
53# define IPRT_CRITSECT_LEAVING(a_pvCritSect, a_pszName, a_cLockers, a_cNestings) do {} while (0)
54# define IPRT_CRITSECT_BUSY( a_pvCritSect, a_pszName, a_cLockers, a_pvNativeOwnerThread) do {} while (0)
55# define IPRT_CRITSECT_WAITING(a_pvCritSect, a_pszName, a_cLockers, a_pvNativeOwnerThread) do {} while (0)
56#endif
57
58
59
60RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
61{
62 return RTCritSectInitEx(pCritSect, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTCritSect");
63}
64RT_EXPORT_SYMBOL(RTCritSectInit);
65
66
67RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass,
68 const char *pszNameFmt, ...)
69{
70 AssertReturn(!(fFlags & ~(RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_BOOTSTRAP_HACK | RTCRITSECT_FLAGS_NOP)),
71 VERR_INVALID_PARAMETER);
72
73 /*
74 * Initialize the structure and
75 */
76 pCritSect->u32Magic = RTCRITSECT_MAGIC;
77#ifdef IN_RING0
78 pCritSect->fFlags = fFlags | RTCRITSECT_FLAGS_RING0;
79#else
80 pCritSect->fFlags = fFlags & ~RTCRITSECT_FLAGS_RING0;
81#endif
82 pCritSect->cNestings = 0;
83 pCritSect->cLockers = -1;
84 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD;
85 pCritSect->pValidatorRec = NULL;
86 int rc = VINF_SUCCESS;
87#ifdef RTCRITSECT_STRICT
88 if (!(fFlags & (RTCRITSECT_FLAGS_BOOTSTRAP_HACK | RTCRITSECT_FLAGS_NOP)))
89 {
90 if (!pszNameFmt)
91 {
92 static uint32_t volatile s_iCritSectAnon = 0;
93 rc = RTLockValidatorRecExclCreate(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
94 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL),
95 "RTCritSect-%u", ASMAtomicIncU32(&s_iCritSectAnon) - 1);
96 }
97 else
98 {
99 va_list va;
100 va_start(va, pszNameFmt);
101 rc = RTLockValidatorRecExclCreateV(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
102 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL), pszNameFmt, va);
103 va_end(va);
104 }
105 }
106#endif
107 if (RT_SUCCESS(rc))
108 {
109#ifdef IN_RING0
110 rc = RTSemEventCreate(&pCritSect->EventSem);
111
112#else
113 rc = RTSemEventCreateEx(&pCritSect->EventSem,
114 fFlags & RTCRITSECT_FLAGS_BOOTSTRAP_HACK
115 ? RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK
116 : RTSEMEVENT_FLAGS_NO_LOCK_VAL,
117 NIL_RTLOCKVALCLASS,
118 NULL);
119#endif
120 if (RT_SUCCESS(rc))
121 return VINF_SUCCESS;
122#ifdef RTCRITSECT_STRICT
123 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
124#endif
125 }
126
127 AssertRC(rc);
128 pCritSect->EventSem = NULL;
129 pCritSect->u32Magic = (uint32_t)rc;
130 return rc;
131}
132RT_EXPORT_SYMBOL(RTCritSectInitEx);
133
134
135RTDECL(uint32_t) RTCritSectSetSubClass(PRTCRITSECT pCritSect, uint32_t uSubClass)
136{
137# ifdef RTCRITSECT_STRICT
138 AssertPtrReturn(pCritSect, RTLOCKVAL_SUB_CLASS_INVALID);
139 AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
140 AssertReturn(!(pCritSect->fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
141 return RTLockValidatorRecExclSetSubClass(pCritSect->pValidatorRec, uSubClass);
142# else
143 return RTLOCKVAL_SUB_CLASS_INVALID;
144# endif
145}
146
147
148DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
149{
150 Assert(pCritSect);
151 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
152 /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/
153#ifdef IN_RING0
154 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
155#else
156 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
157#endif
158
159 /*
160 * Return straight away if NOP.
161 */
162 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
163 return VINF_SUCCESS;
164
165 /*
166 * Try take the lock. (cLockers is -1 if it's free)
167 */
168 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
169 if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
170 {
171 /*
172 * Somebody is owning it (or will be soon). Perhaps it's us?
173 */
174 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
175 {
176 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
177 {
178#ifdef RTCRITSECT_STRICT
179 int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
180 if (RT_FAILURE(rc9))
181 return rc9;
182#endif
183 int32_t cLockers = ASMAtomicIncS32(&pCritSect->cLockers); NOREF(cLockers);
184 pCritSect->cNestings++;
185 IPRT_CRITSECT_ENTERED(pCritSect, NULL, cLockers, pCritSect->cNestings);
186 return VINF_SUCCESS;
187 }
188 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
189 return VERR_SEM_NESTED;
190 }
191 IPRT_CRITSECT_BUSY(pCritSect, NULL, pCritSect->cLockers, (void *)pCritSect->NativeThreadOwner);
192 return VERR_SEM_BUSY;
193 }
194
195 /*
196 * First time
197 */
198 pCritSect->cNestings = 1;
199 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
200#ifdef RTCRITSECT_STRICT
201 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
202#endif
203 IPRT_CRITSECT_ENTERED(pCritSect, NULL, 0, 1);
204
205 return VINF_SUCCESS;
206}
207
208
209RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
210{
211#ifndef RTCRTISECT_STRICT
212 return rtCritSectTryEnter(pCritSect, NULL);
213#else
214 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
215 return rtCritSectTryEnter(pCritSect, &SrcPos);
216#endif
217}
218RT_EXPORT_SYMBOL(RTCritSectTryEnter);
219
220
221RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
222{
223 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
224 return rtCritSectTryEnter(pCritSect, &SrcPos);
225}
226RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);
227
228
229DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
230{
231 AssertPtr(pCritSect);
232 AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
233#ifdef IN_RING0
234 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
235#else
236 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
237#endif
238
239 /*
240 * Return straight away if NOP.
241 */
242 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
243 return VINF_SUCCESS;
244
245 /*
246 * How is calling and is the order right?
247 */
248 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
249#ifdef RTCRITSECT_STRICT
250 RTTHREAD hThreadSelf = pCritSect->pValidatorRec
251 ? RTThreadSelfAutoAdopt()
252 : RTThreadSelf();
253 int rc9;
254 if (pCritSect->pValidatorRec) /* (bootstap) */
255 {
256 rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
257 if (RT_FAILURE(rc9))
258 return rc9;
259 }
260#endif
261
262 /*
263 * Increment the waiter counter.
264 * This becomes 0 when the section is free.
265 */
266 int32_t cLockers = ASMAtomicIncS32(&pCritSect->cLockers);
267 if (cLockers > 0)
268 {
269 /*
270 * Nested?
271 */
272 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
273 {
274 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
275 {
276#ifdef RTCRITSECT_STRICT
277 rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
278 if (RT_FAILURE(rc9))
279 {
280 ASMAtomicDecS32(&pCritSect->cLockers);
281 return rc9;
282 }
283#endif
284 pCritSect->cNestings++;
285 IPRT_CRITSECT_ENTERED(pCritSect, NULL, cLockers, pCritSect->cNestings);
286 return VINF_SUCCESS;
287 }
288
289 AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
290 ASMAtomicDecS32(&pCritSect->cLockers);
291 return VERR_SEM_NESTED;
292 }
293
294 /*
295 * Wait for the current owner to release it.
296 */
297 IPRT_CRITSECT_WAITING(pCritSect, NULL, cLockers, (void *)pCritSect->NativeThreadOwner);
298#ifndef RTCRITSECT_STRICT
299 RTTHREAD hThreadSelf = RTThreadSelf();
300#endif
301 for (;;)
302 {
303#ifdef RTCRITSECT_STRICT
304 rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
305 !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
306 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
307 if (RT_FAILURE(rc9))
308 {
309 ASMAtomicDecS32(&pCritSect->cLockers);
310 return rc9;
311 }
312#elif defined(IN_RING3)
313 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
314#endif
315 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
316#ifdef IN_RING3
317 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
318#endif
319
320 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
321 return VERR_SEM_DESTROYED;
322 if (rc == VINF_SUCCESS)
323 break;
324 AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
325 }
326 AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
327 }
328
329 /*
330 * First time
331 */
332 pCritSect->cNestings = 1;
333 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
334#ifdef RTCRITSECT_STRICT
335 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
336#endif
337 IPRT_CRITSECT_ENTERED(pCritSect, NULL, 0, 1);
338
339 return VINF_SUCCESS;
340}
341
342
343RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
344{
345#ifndef RTCRITSECT_STRICT
346 return rtCritSectEnter(pCritSect, NULL);
347#else
348 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
349 return rtCritSectEnter(pCritSect, &SrcPos);
350#endif
351}
352RT_EXPORT_SYMBOL(RTCritSectEnter);
353
354
355RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
356{
357 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
358 return rtCritSectEnter(pCritSect, &SrcPos);
359}
360RT_EXPORT_SYMBOL(RTCritSectEnterDebug);
361
362
363RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
364{
365 /*
366 * Assert sanity and check for NOP.
367 */
368 Assert(pCritSect);
369 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
370#ifdef IN_RING0
371 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
372#else
373 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
374#endif
375 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
376 return VINF_SUCCESS;
377
378 /*
379 * Assert ownership and so on.
380 */
381 Assert(pCritSect->cNestings > 0);
382 Assert(pCritSect->cLockers >= 0);
383 Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
384
385#ifdef RTCRITSECT_STRICT
386 int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1);
387 if (RT_FAILURE(rc9))
388 return rc9;
389#endif
390
391 /*
392 * Decrement nestings, if <= 0 when we'll release the critsec.
393 */
394 uint32_t cNestings = --pCritSect->cNestings;
395 IPRT_CRITSECT_LEAVING(pCritSect, NULL, ASMAtomicUoReadS32(&pCritSect->cLockers) - 1, cNestings);
396 if (cNestings > 0)
397 ASMAtomicDecS32(&pCritSect->cLockers);
398 else
399 {
400 /*
401 * Set owner to zero.
402 * Decrement waiters, if >= 0 then we have to wake one of them up.
403 */
404 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
405 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
406 {
407 int rc = RTSemEventSignal(pCritSect->EventSem);
408 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
409 }
410 }
411 return VINF_SUCCESS;
412}
413RT_EXPORT_SYMBOL(RTCritSectLeave);
414
415
416
417#ifdef IN_RING3
418
419static int rtCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects, PCRTLOCKVALSRCPOS pSrcPos)
420{
421 Assert(cCritSects > 0);
422 AssertPtr(papCritSects);
423
424 /*
425 * Try get them all.
426 */
427 int rc = VERR_INVALID_PARAMETER;
428 size_t i;
429 for (i = 0; i < cCritSects; i++)
430 {
431 rc = rtCritSectTryEnter(papCritSects[i], pSrcPos);
432 if (RT_FAILURE(rc))
433 break;
434 }
435 if (RT_SUCCESS(rc))
436 return rc;
437
438 /*
439 * The retry loop.
440 */
441 for (unsigned cTries = 0; ; cTries++)
442 {
443 /*
444 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
445 */
446 size_t j = i;
447 while (j-- > 0)
448 {
449 int rc2 = RTCritSectLeave(papCritSects[j]);
450 AssertRC(rc2);
451 }
452 if (rc != VERR_SEM_BUSY)
453 return rc;
454
455 /*
456 * Try prevent any theoretical synchronous races with other threads.
457 */
458 Assert(cTries < 1000000);
459 if (cTries > 10000)
460 RTThreadSleep(cTries % 3);
461
462 /*
463 * Wait on the one we failed to get.
464 */
465 rc = rtCritSectEnter(papCritSects[i], pSrcPos);
466 if (RT_FAILURE(rc))
467 return rc;
468
469 /*
470 * Try take the others.
471 */
472 for (j = 0; j < cCritSects; j++)
473 {
474 if (j != i)
475 {
476 rc = rtCritSectTryEnter(papCritSects[j], pSrcPos);
477 if (RT_FAILURE(rc))
478 break;
479 }
480 }
481 if (RT_SUCCESS(rc))
482 return rc;
483
484 /*
485 * We failed.
486 */
487 if (i > j)
488 {
489 int rc2 = RTCritSectLeave(papCritSects[i]);
490 AssertRC(rc2);
491 }
492 i = j;
493 }
494}
495
496
497RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
498{
499#ifndef RTCRITSECT_STRICT
500 return rtCritSectEnterMultiple(cCritSects, papCritSects, NULL);
501#else
502 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
503 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
504#endif
505}
506RT_EXPORT_SYMBOL(RTCritSectEnterMultiple);
507
508
509RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTHCUINTPTR uId, RT_SRC_POS_DECL)
510{
511 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
512 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
513}
514RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);
515
516
517
518RTDECL(int) RTCritSectLeaveMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
519{
520 int rc = VINF_SUCCESS;
521 for (size_t i = 0; i < cCritSects; i++)
522 {
523 int rc2 = RTCritSectLeave(papCritSects[i]);
524 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
525 rc = rc2;
526 }
527 return rc;
528}
529RT_EXPORT_SYMBOL(RTCritSectLeaveMultiple);
530
531#endif /* IN_RING3 */
532
533
534
535RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
536{
537 /*
538 * Assert free waiters and so on.
539 */
540 Assert(pCritSect);
541 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
542 Assert(pCritSect->cNestings == 0);
543 Assert(pCritSect->cLockers == -1);
544 Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
545#ifdef IN_RING0
546 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
547#else
548 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
549#endif
550
551 /*
552 * Invalidate the structure and free the mutex.
553 * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
554 */
555 ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC);
556 pCritSect->fFlags = 0;
557 pCritSect->cNestings = 0;
558 pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
559 RTSEMEVENT EventSem = pCritSect->EventSem;
560 pCritSect->EventSem = NIL_RTSEMEVENT;
561
562 while (pCritSect->cLockers-- >= 0)
563 RTSemEventSignal(EventSem);
564 ASMAtomicWriteS32(&pCritSect->cLockers, -1);
565 int rc = RTSemEventDestroy(EventSem);
566 AssertRC(rc);
567
568#ifdef RTCRITSECT_STRICT
569 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
570#endif
571
572 return rc;
573}
574RT_EXPORT_SYMBOL(RTCritSectDelete);
575
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette