VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsect-generic.cpp@ 35761

Last change on this file since 35761 was 33269, checked in by vboxsync, 14 years ago

IPRT: A quick replacement of the RTMemPage* and RTMemExec* APIs on posix. (Turned out to be a bit more work than expected because of the electric fence heap and init dependencies.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.0 KB
Line 
1/* $Id: critsect-generic.cpp 33269 2010-10-20 15:42:28Z vboxsync $ */
2/** @file
3 * IPRT - Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include <iprt/critsect.h>
32#include "internal/iprt.h"
33
34#include <iprt/semaphore.h>
35#include <iprt/thread.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/err.h>
39#include "internal/thread.h"
40#include "internal/strict.h"
41
42
43#undef RTCritSectInit
44RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
45{
46 return RTCritSectInitEx(pCritSect, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTCritSect");
47}
48RT_EXPORT_SYMBOL(RTCritSectInit);
49
50
51RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass,
52 const char *pszNameFmt, ...)
53{
54 AssertReturn(!(fFlags & ~(RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_BOOTSTRAP_HACK)),
55 VERR_INVALID_PARAMETER);
56
57 /*
58 * Initialize the structure and
59 */
60 pCritSect->u32Magic = RTCRITSECT_MAGIC;
61 pCritSect->fFlags = fFlags;
62 pCritSect->cNestings = 0;
63 pCritSect->cLockers = -1;
64 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD;
65 pCritSect->pValidatorRec = NULL;
66 int rc = VINF_SUCCESS;
67#ifdef RTCRITSECT_STRICT
68 if (!(fFlags & RTCRITSECT_FLAGS_BOOTSTRAP_HACK))
69 {
70 if (!pszNameFmt)
71 {
72 static uint32_t volatile s_iCritSectAnon = 0;
73 rc = RTLockValidatorRecExclCreate(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
74 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL),
75 "RTCritSect-%u", ASMAtomicIncU32(&s_iCritSectAnon) - 1);
76 }
77 else
78 {
79 va_list va;
80 va_start(va, pszNameFmt);
81 rc = RTLockValidatorRecExclCreateV(&pCritSect->pValidatorRec, hClass, uSubClass, pCritSect,
82 !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL), pszNameFmt, va);
83 va_end(va);
84 }
85 }
86#endif
87 if (RT_SUCCESS(rc))
88 {
89 rc = RTSemEventCreateEx(&pCritSect->EventSem,
90 fFlags & RTCRITSECT_FLAGS_BOOTSTRAP_HACK
91 ? RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK
92 : RTSEMEVENT_FLAGS_NO_LOCK_VAL,
93 NIL_RTLOCKVALCLASS,
94 NULL);
95 if (RT_SUCCESS(rc))
96 return VINF_SUCCESS;
97 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
98 }
99
100 AssertRC(rc);
101 pCritSect->EventSem = NULL;
102 pCritSect->u32Magic = (uint32_t)rc;
103 return rc;
104}
105RT_EXPORT_SYMBOL(RTCritSectInitEx);
106
107
108RTDECL(uint32_t) RTCritSectSetSubClass(PRTCRITSECT pCritSect, uint32_t uSubClass)
109{
110#ifdef RTCRITSECT_STRICT
111 AssertPtrReturn(pCritSect, RTLOCKVAL_SUB_CLASS_INVALID);
112 AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
113 return RTLockValidatorRecExclSetSubClass(pCritSect->pValidatorRec, uSubClass);
114#else
115 return RTLOCKVAL_SUB_CLASS_INVALID;
116#endif
117}
118
119
120DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
121{
122 Assert(pCritSect);
123 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
124 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
125
126 /*
127 * Try take the lock. (cLockers is -1 if it's free)
128 */
129 if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
130 {
131 /*
132 * Somebody is owning it (or will be soon). Perhaps it's us?
133 */
134 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
135 {
136 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
137 {
138#ifdef RTCRITSECT_STRICT
139 int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
140 if (RT_FAILURE(rc9))
141 return rc9;
142#endif
143 ASMAtomicIncS32(&pCritSect->cLockers);
144 pCritSect->cNestings++;
145 return VINF_SUCCESS;
146 }
147 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
148 return VERR_SEM_NESTED;
149 }
150 return VERR_SEM_BUSY;
151 }
152
153 /*
154 * First time
155 */
156 pCritSect->cNestings = 1;
157 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
158#ifdef RTCRITSECT_STRICT
159 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
160#endif
161
162 return VINF_SUCCESS;
163}
164
165
166#undef RTCritSectTryEnter
167RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
168{
169#ifndef RTCRTISECT_STRICT
170 return rtCritSectTryEnter(pCritSect, NULL);
171#else
172 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
173 return rtCritSectTryEnter(pCritSect, &SrcPos);
174#endif
175}
176RT_EXPORT_SYMBOL(RTCritSectTryEnter);
177
178
179RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
180{
181 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
182 return rtCritSectTryEnter(pCritSect, &SrcPos);
183}
184RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);
185
186
187DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
188{
189 Assert(pCritSect);
190 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
191 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
192
193 /* If the critical section has already been destroyed, then inform the caller. */
194 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
195 return VERR_SEM_DESTROYED;
196
197#ifdef RTCRITSECT_STRICT
198 RTTHREAD hThreadSelf = pCritSect->pValidatorRec
199 ? RTThreadSelfAutoAdopt()
200 : RTThreadSelf();
201 int rc9;
202 if (pCritSect->pValidatorRec) /* (bootstap) */
203 {
204 rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
205 if (RT_FAILURE(rc9))
206 return rc9;
207 }
208#endif
209
210 /*
211 * Increment the waiter counter.
212 * This becomes 0 when the section is free.
213 */
214 if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
215 {
216 /*
217 * Nested?
218 */
219 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
220 {
221 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
222 {
223#ifdef RTCRITSECT_STRICT
224 rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
225 if (RT_FAILURE(rc9))
226 {
227 ASMAtomicDecS32(&pCritSect->cLockers);
228 return rc9;
229 }
230#endif
231 pCritSect->cNestings++;
232 return VINF_SUCCESS;
233 }
234
235 AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
236 ASMAtomicDecS32(&pCritSect->cLockers);
237 return VERR_SEM_NESTED;
238 }
239
240 /*
241 * Wait for the current owner to release it.
242 */
243#ifndef RTCRITSECT_STRICT
244 RTTHREAD hThreadSelf = RTThreadSelf();
245#endif
246 for (;;)
247 {
248#ifdef RTCRITSECT_STRICT
249 rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
250 !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
251 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
252 if (RT_FAILURE(rc9))
253 {
254 ASMAtomicDecS32(&pCritSect->cLockers);
255 return rc9;
256 }
257#else
258 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
259#endif
260 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
261 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
262
263 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
264 return VERR_SEM_DESTROYED;
265 if (rc == VINF_SUCCESS)
266 break;
267 AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
268 }
269 AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
270 }
271
272 /*
273 * First time
274 */
275 pCritSect->cNestings = 1;
276 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
277#ifdef RTCRITSECT_STRICT
278 RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
279#endif
280
281 return VINF_SUCCESS;
282}
283
284
285#undef RTCritSectEnter
286RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
287{
288#ifndef RTCRITSECT_STRICT
289 return rtCritSectEnter(pCritSect, NULL);
290#else
291 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
292 return rtCritSectEnter(pCritSect, &SrcPos);
293#endif
294}
295RT_EXPORT_SYMBOL(RTCritSectEnter);
296
297
298RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
299{
300 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
301 return rtCritSectEnter(pCritSect, &SrcPos);
302}
303RT_EXPORT_SYMBOL(RTCritSectEnterDebug);
304
305
306RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
307{
308 /*
309 * Assert ownership and so on.
310 */
311 Assert(pCritSect);
312 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
313 Assert(pCritSect->cNestings > 0);
314 Assert(pCritSect->cLockers >= 0);
315 Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
316
317#ifdef RTCRITSECT_STRICT
318 int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1);
319 if (RT_FAILURE(rc9))
320 return rc9;
321#endif
322
323 /*
324 * Decrement nestings, if <= 0 when we'll release the critsec.
325 */
326 pCritSect->cNestings--;
327 if (pCritSect->cNestings > 0)
328 ASMAtomicDecS32(&pCritSect->cLockers);
329 else
330 {
331 /*
332 * Set owner to zero.
333 * Decrement waiters, if >= 0 then we have to wake one of them up.
334 */
335 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
336 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
337 {
338 int rc = RTSemEventSignal(pCritSect->EventSem);
339 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
340 }
341 }
342 return VINF_SUCCESS;
343}
344RT_EXPORT_SYMBOL(RTCritSectLeave);
345
346
347
348
349
350static int rtCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects, PCRTLOCKVALSRCPOS pSrcPos)
351{
352 Assert(cCritSects > 0);
353 AssertPtr(papCritSects);
354
355 /*
356 * Try get them all.
357 */
358 int rc = VERR_INVALID_PARAMETER;
359 size_t i;
360 for (i = 0; i < cCritSects; i++)
361 {
362 rc = rtCritSectTryEnter(papCritSects[i], pSrcPos);
363 if (RT_FAILURE(rc))
364 break;
365 }
366 if (RT_SUCCESS(rc))
367 return rc;
368
369 /*
370 * The retry loop.
371 */
372 for (unsigned cTries = 0; ; cTries++)
373 {
374 /*
375 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
376 */
377 size_t j = i;
378 while (j-- > 0)
379 {
380 int rc2 = RTCritSectLeave(papCritSects[j]);
381 AssertRC(rc2);
382 }
383 if (rc != VERR_SEM_BUSY)
384 return rc;
385
386 /*
387 * Try prevent any theoretical synchronous races with other threads.
388 */
389 Assert(cTries < 1000000);
390 if (cTries > 10000)
391 RTThreadSleep(cTries % 3);
392
393 /*
394 * Wait on the one we failed to get.
395 */
396 rc = rtCritSectEnter(papCritSects[i], pSrcPos);
397 if (RT_FAILURE(rc))
398 return rc;
399
400 /*
401 * Try take the others.
402 */
403 for (j = 0; j < cCritSects; j++)
404 {
405 if (j != i)
406 {
407 rc = rtCritSectTryEnter(papCritSects[j], pSrcPos);
408 if (RT_FAILURE(rc))
409 break;
410 }
411 }
412 if (RT_SUCCESS(rc))
413 return rc;
414
415 /*
416 * We failed.
417 */
418 if (i > j)
419 {
420 int rc2 = RTCritSectLeave(papCritSects[i]);
421 AssertRC(rc2);
422 }
423 i = j;
424 }
425}
426
427
428#undef RTCritSectEnterMultiple
429RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
430{
431#ifndef RTCRITSECT_STRICT
432 return rtCritSectEnterMultiple(cCritSects, papCritSects, NULL);
433#else
434 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
435 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
436#endif
437}
438RT_EXPORT_SYMBOL(RTCritSectEnterMultiple);
439
440
441RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTUINTPTR uId, RT_SRC_POS_DECL)
442{
443 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
444 return rtCritSectEnterMultiple(cCritSects, papCritSects, &SrcPos);
445}
446RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);
447
448
449
450RTDECL(int) RTCritSectLeaveMultiple(size_t cCritSects, PRTCRITSECT *papCritSects)
451{
452 int rc = VINF_SUCCESS;
453 for (size_t i = 0; i < cCritSects; i++)
454 {
455 int rc2 = RTCritSectLeave(papCritSects[i]);
456 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
457 rc = rc2;
458 }
459 return rc;
460}
461RT_EXPORT_SYMBOL(RTCritSectLeaveMultiple);
462
463
464RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
465{
466 /*
467 * Assert free waiters and so on.
468 */
469 Assert(pCritSect);
470 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
471 Assert(pCritSect->cNestings == 0);
472 Assert(pCritSect->cLockers == -1);
473 Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
474
475 /*
476 * Invalidate the structure and free the mutex.
477 * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
478 */
479 ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC);
480 pCritSect->fFlags = 0;
481 pCritSect->cNestings = 0;
482 pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
483 RTSEMEVENT EventSem = pCritSect->EventSem;
484 pCritSect->EventSem = NIL_RTSEMEVENT;
485
486 while (pCritSect->cLockers-- >= 0)
487 RTSemEventSignal(EventSem);
488 ASMAtomicWriteS32(&pCritSect->cLockers, -1);
489 int rc = RTSemEventDestroy(EventSem);
490 AssertRC(rc);
491
492 RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec);
493
494 return rc;
495}
496RT_EXPORT_SYMBOL(RTCritSectDelete);
497
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette