VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsect-generic.cpp@ 74460

Last change on this file since 74460 was 74373, checked in by vboxsync, 6 years ago

IPRT:critsect: Corrected RTCritSectEnterMultipleDebug signature again, prototype was wrong it turns out.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.4 KB
Line 
1/* $Id: critsect-generic.cpp 74373 2018-09-19 15:09:27Z vboxsync $ */
2/** @file
3 * IPRT - Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTCRITSECT_WITHOUT_REMAPPING
32#include <iprt/critsect.h>
33#include "internal/iprt.h"
34
35#include <iprt/semaphore.h>
36#include <iprt/thread.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/err.h>
40#include "internal/thread.h"
41#include "internal/strict.h"
42
43/* Two issues here, (1) the tracepoint generator uses IPRT, and (2) only one .d
44 file per module. */
45#ifdef IPRT_WITH_DTRACE
46# include IPRT_DTRACE_INCLUDE
47# ifdef IPRT_DTRACE_PREFIX
48# define IPRT_CRITSECT_ENTERED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECT_ENTERED)
49# define IPRT_CRITSECT_LEAVING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECT_LEAVING)
50# define IPRT_CRITSECT_BUSY RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECT_BUSY)
51# define IPRT_CRITSECT_WAITING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECT_WAITING)
52# endif
53#else
54# define IPRT_CRITSECT_ENTERED(a_pvCritSect, a_pszName, a_cLockers, a_cNestings) do {} while (0)
55# define IPRT_CRITSECT_LEAVING(a_pvCritSect, a_pszName, a_cLockers, a_cNestings) do {} while (0)
56# define IPRT_CRITSECT_BUSY( a_pvCritSect, a_pszName, a_cLockers, a_pvNativeOwnerThread) do {} while (0)
57# define IPRT_CRITSECT_WAITING(a_pvCritSect, a_pszName, a_cLockers, a_pvNativeOwnerThread) do {} while (0)
58#endif
59
60
61
62RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
63{
64 return RTCritSectInitEx(pCritSect, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTCritSect");
65}
66RT_EXPORT_SYMBOL(RTCritSectInit);
67
68
69RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass,
70 const char *pszNameFmt, ...)
71{
72 AssertReturn(!(fFlags & ~(RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_BOOTSTRAP_HACK | RTCRITSECT_FLAGS_NOP)),
73 VERR_INVALID_PARAMETER);
74 RT_NOREF_PV(hClass); RT_NOREF_PV(uSubClass); RT_NOREF_PV(pszNameFmt);
75
76 /*
77 * Initialize the structure and
78 */
79 pCritSect->u32Magic = RTCRITSECT_MAGIC;
80#ifdef IN_RING0
81 pCritSect->fFlags = fFlags | RTCRITSECT_FLAGS_RING0;
82#else
83 pCritSect->fFlags = fFlags & ~RTCRITSECT_FLAGS_RING0;
84#endif
85 pCritSect->cNestings = 0;
86 pCritSect->cLockers = -1;
87 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD;
88 pCritSect->pValidatorRec = NULL;
89 int rc = VINF_SUCCESS;
90#ifdef RTCRITSECT_STRICT
91 if (!(fFlags & (RTCRITSECT_FLAGS_BOOTSTRAP_HACK | RTCRITSECT_FLAGS_NOP)))
92 {
93 if (!pszNameFmt)
94 {
95 static uint32_t volatile s_iCritSectAnon = 0;
96 rc = RTLockValidatorRecExclCreate(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
97 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL),
98 "RTCritSect-%u", ASMAtomicIncU32(&s_iCritSectAnon) - 1);
99 }
100 else
101 {
102 va_list va;
103 va_start(va, pszNameFmt);
104 rc = RTLockValidatorRecExclCreateV(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
105 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL), pszNameFmt, va);
106 va_end(va);
107 }
108 }
109#endif
110 if (RT_SUCCESS(rc))
111 {
112#ifdef IN_RING0
113 rc = RTSemEventCreate(&pCritSect->EventSem);
114
115#else
116 rc = RTSemEventCreateEx(&pCritSect->EventSem,
117 fFlags & RTCRITSECT_FLAGS_BOOTSTRAP_HACK
118 ? RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK
119 : RTSEMEVENT_FLAGS_NO_LOCK_VAL,
120 NIL_RTLOCKVALCLASS,
121 NULL);
122#endif
123 if (RT_SUCCESS(rc))
124 return VINF_SUCCESS;
125#ifdef RTCRITSECT_STRICT
126 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
127#endif
128 }
129
130 AssertRC(rc);
131 pCritSect->EventSem = NULL;
132 pCritSect->u32Magic = (uint32_t)rc;
133 return rc;
134}
135RT_EXPORT_SYMBOL(RTCritSectInitEx);
136
137
138RTDECL(uint32_t) RTCritSectSetSubClass(PRTCRITSECT pCritSect, uint32_t uSubClass)
139{
140# ifdef RTCRITSECT_STRICT
141 AssertPtrReturn(pCritSect, RTLOCKVAL_SUB_CLASS_INVALID);
142 AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
143 AssertReturn(!(pCritSect->fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
144 return RTLockValidatorRecExclSetSubClass(pCritSect->pValidatorRec, uSubClass);
145# else
146 RT_NOREF_PV(pCritSect); RT_NOREF_PV(uSubClass);
147 return RTLOCKVAL_SUB_CLASS_INVALID;
148# endif
149}
150
151
152DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
153{
154 Assert(pCritSect);
155 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
156 /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/
157#ifdef IN_RING0
158 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
159#else
160 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
161#endif
162 RT_NOREF_PV(pSrcPos);
163
164 /*
165 * Return straight away if NOP.
166 */
167 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
168 return VINF_SUCCESS;
169
170 /*
171 * Try take the lock. (cLockers is -1 if it's free)
172 */
173 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
174 if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
175 {
176 /*
177 * Somebody is owning it (or will be soon). Perhaps it's us?
178 */
179 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
180 {
181 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
182 {
183#ifdef RTCRITSECT_STRICT
184 int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
185 if (RT_FAILURE(rc9))
186 return rc9;
187#endif
188 int32_t cLockers = ASMAtomicIncS32(&pCritSect->cLockers); NOREF(cLockers);
189 pCritSect->cNestings++;
190 IPRT_CRITSECT_ENTERED(pCritSect, NULL, cLockers, pCritSect->cNestings);
191 return VINF_SUCCESS;
192 }
193 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
194 return VERR_SEM_NESTED;
195 }
196 IPRT_CRITSECT_BUSY(pCritSect, NULL, pCritSect->cLockers, (void *)pCritSect->NativeThreadOwner);
197 return VERR_SEM_BUSY;
198 }
199
200 /*
201 * First time
202 */
203 pCritSect->cNestings = 1;
204 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
205#ifdef RTCRITSECT_STRICT
206 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
207#endif
208 IPRT_CRITSECT_ENTERED(pCritSect, NULL, 0, 1);
209
210 return VINF_SUCCESS;
211}
212
213
214RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
215{
216#ifndef RTCRTISECT_STRICT
217 return rtCritSectTryEnter(pCritSect, NULL);
218#else
219 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
220 return rtCritSectTryEnter(pCritSect, &SrcPos);
221#endif
222}
223RT_EXPORT_SYMBOL(RTCritSectTryEnter);
224
225
226RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
227{
228 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
229 return rtCritSectTryEnter(pCritSect, &SrcPos);
230}
231RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);
232
233
234DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
235{
236 AssertPtr(pCritSect);
237 AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
238#ifdef IN_RING0
239 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
240#else
241 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
242#endif
243 RT_NOREF_PV(pSrcPos);
244
245 /*
246 * Return straight away if NOP.
247 */
248 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
249 return VINF_SUCCESS;
250
251 /*
252 * How is calling and is the order right?
253 */
254 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
255#ifdef RTCRITSECT_STRICT
256 RTTHREAD hThreadSelf = pCritSect->pValidatorRec
257 ? RTThreadSelfAutoAdopt()
258 : RTThreadSelf();
259 int rc9;
260 if (pCritSect->pValidatorRec) /* (bootstap) */
261 {
262 rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
263 if (RT_FAILURE(rc9))
264 return rc9;
265 }
266#endif
267
268 /*
269 * Increment the waiter counter.
270 * This becomes 0 when the section is free.
271 */
272 int32_t cLockers = ASMAtomicIncS32(&pCritSect->cLockers);
273 if (cLockers > 0)
274 {
275 /*
276 * Nested?
277 */
278 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
279 {
280 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
281 {
282#ifdef RTCRITSECT_STRICT
283 rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
284 if (RT_FAILURE(rc9))
285 {
286 ASMAtomicDecS32(&pCritSect->cLockers);
287 return rc9;
288 }
289#endif
290 pCritSect->cNestings++;
291 IPRT_CRITSECT_ENTERED(pCritSect, NULL, cLockers, pCritSect->cNestings);
292 return VINF_SUCCESS;
293 }
294
295 AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
296 ASMAtomicDecS32(&pCritSect->cLockers);
297 return VERR_SEM_NESTED;
298 }
299
300 /*
301 * Wait for the current owner to release it.
302 */
303 IPRT_CRITSECT_WAITING(pCritSect, NULL, cLockers, (void *)pCritSect->NativeThreadOwner);
304#if !defined(RTCRITSECT_STRICT) && defined(IN_RING3)
305 RTTHREAD hThreadSelf = RTThreadSelf();
306#endif
307 for (;;)
308 {
309#ifdef RTCRITSECT_STRICT
310 rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
311 !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
312 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
313 if (RT_FAILURE(rc9))
314 {
315 ASMAtomicDecS32(&pCritSect->cLockers);
316 return rc9;
317 }
318#elif defined(IN_RING3)
319 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
320#endif
321 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
322#ifdef IN_RING3
323 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
324#endif
325
326 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
327 return VERR_SEM_DESTROYED;
328 if (rc == VINF_SUCCESS)
329 break;
330 AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
331 }
332 AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
333 }
334
335 /*
336 * First time
337 */
338 pCritSect->cNestings = 1;
339 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
340#ifdef RTCRITSECT_STRICT
341 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
342#endif
343 IPRT_CRITSECT_ENTERED(pCritSect, NULL, 0, 1);
344
345 return VINF_SUCCESS;
346}
347
348
349RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
350{
351#ifndef RTCRITSECT_STRICT
352 return rtCritSectEnter(pCritSect, NULL);
353#else
354 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
355 return rtCritSectEnter(pCritSect, &SrcPos);
356#endif
357}
358RT_EXPORT_SYMBOL(RTCritSectEnter);
359
360
361RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
362{
363 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
364 return rtCritSectEnter(pCritSect, &SrcPos);
365}
366RT_EXPORT_SYMBOL(RTCritSectEnterDebug);
367
368
369RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
370{
371 /*
372 * Assert sanity and check for NOP.
373 */
374 Assert(pCritSect);
375 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
376#ifdef IN_RING0
377 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
378#else
379 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
380#endif
381 if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
382 return VINF_SUCCESS;
383
384 /*
385 * Assert ownership and so on.
386 */
387 Assert(pCritSect->cNestings > 0);
388 Assert(pCritSect->cLockers >= 0);
389 Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
390
391#ifdef RTCRITSECT_STRICT
392 int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1);
393 if (RT_FAILURE(rc9))
394 return rc9;
395#endif
396
397 /*
398 * Decrement nestings, if <= 0 when we'll release the critsec.
399 */
400 uint32_t cNestings = --pCritSect->cNestings;
401 IPRT_CRITSECT_LEAVING(pCritSect, NULL, ASMAtomicUoReadS32(&pCritSect->cLockers) - 1, cNestings);
402 if (cNestings > 0)
403 ASMAtomicDecS32(&pCritSect->cLockers);
404 else
405 {
406 /*
407 * Set owner to zero.
408 * Decrement waiters, if >= 0 then we have to wake one of them up.
409 */
410 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
411 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
412 {
413 int rc = RTSemEventSignal(pCritSect->EventSem);
414 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
415 }
416 }
417 return VINF_SUCCESS;
418}
419RT_EXPORT_SYMBOL(RTCritSectLeave);
420
421
422
423#ifdef IN_RING3
424
425static int rtCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects, PCRTLOCKVALSRCPOS pSrcPos)
426{
427 Assert(cCritSects > 0);
428 AssertPtr(papCritSects);
429
430 /*
431 * Try get them all.
432 */
433 int rc = VERR_INVALID_PARAMETER;
434 size_t i;
435 for (i = 0; i < cCritSects; i++)
436 {
437 rc = rtCritSectTryEnter(papCritSects[i], pSrcPos);
438 if (RT_FAILURE(rc))
439 break;
440 }
441 if (RT_SUCCESS(rc))
442 return rc;
443
444 /*
445 * The retry loop.
446 */
447 for (unsigned cTries = 0; ; cTries++)
448 {
449 /*
450 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
451 */
452 size_t j = i;
453 while (j-- > 0)
454 {
455 int rc2 = RTCritSectLeave(papCritSects[j]);
456 AssertRC(rc2);
457 }
458 if (rc != VERR_SEM_BUSY)
459 return rc;
460
461 /*
462 * Try prevent any theoretical synchronous races with other threads.
463 */
464 Assert(cTries < 1000000);
465 if (cTries > 10000)
466 RTThreadSleep(cTries % 3);
467
468 /*
469 * Wait on the one we failed to get.
470 */
471 rc = rtCritSectEnter(papCritSects[i], pSrcPos);
472 if (RT_FAILURE(rc))
473 return rc;
474
475 /*
476 * Try take the others.
477 */
478 for (j = 0; j < cCritSects; j++)
479 {
480 if (j != i)
481 {
482 rc = rtCritSectTryEnter(papCritSects[j], pSrcPos);
483 if (RT_FAILURE(rc))
484 break;
485 }
486 }
487 if (RT_SUCCESS(rc))
488 return rc;
489
490 /*
491 * We failed.
492 */
493 if (i > j)
494 {
495 int rc2 = RTCritSectLeave(papCritSects[i]);
496 AssertRC(rc2);
497 }
498 i = j;
499 }
500}
501
502
503RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
504{
505#ifndef RTCRITSECT_STRICT
506 return rtCritSectEnterMultiple(cCritSects, papCritSects, NULL);
507#else
508 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
509 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
510#endif
511}
512RT_EXPORT_SYMBOL(RTCritSectEnterMultiple);
513
514
515RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTHCUINTPTR uId, RT_SRC_POS_DECL)
516{
517 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
518 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
519}
520RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);
521
522
523
524RTDECL(int) RTCritSectLeaveMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
525{
526 int rc = VINF_SUCCESS;
527 for (size_t i = 0; i < cCritSects; i++)
528 {
529 int rc2 = RTCritSectLeave(papCritSects[i]);
530 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
531 rc = rc2;
532 }
533 return rc;
534}
535RT_EXPORT_SYMBOL(RTCritSectLeaveMultiple);
536
537#endif /* IN_RING3 */
538
539
540
541RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
542{
543 /*
544 * Assert free waiters and so on.
545 */
546 Assert(pCritSect);
547 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
548 Assert(pCritSect->cNestings == 0);
549 Assert(pCritSect->cLockers == -1);
550 Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
551#ifdef IN_RING0
552 Assert(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0);
553#else
554 Assert(!(pCritSect->fFlags & RTCRITSECT_FLAGS_RING0));
555#endif
556
557 /*
558 * Invalidate the structure and free the mutex.
559 * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
560 */
561 ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC);
562 pCritSect->fFlags = 0;
563 pCritSect->cNestings = 0;
564 pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
565 RTSEMEVENT EventSem = pCritSect->EventSem;
566 pCritSect->EventSem = NIL_RTSEMEVENT;
567
568 while (pCritSect->cLockers-- >= 0)
569 RTSemEventSignal(EventSem);
570 ASMAtomicWriteS32(&pCritSect->cLockers, -1);
571 int rc = RTSemEventDestroy(EventSem);
572 AssertRC(rc);
573
574#ifdef RTCRITSECT_STRICT
575 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
576#endif
577
578 return rc;
579}
580RT_EXPORT_SYMBOL(RTCritSectDelete);
581
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette