VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsect-generic.cpp@ 5938

Last change on this file since 5938 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 13.8 KB
Line 
1/* $Id: critsect-generic.cpp 4071 2007-08-07 17:07:59Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#include <iprt/critsect.h>
22#include <iprt/semaphore.h>
23#include <iprt/thread.h>
24#include <iprt/assert.h>
25#include <iprt/asm.h>
26#include <iprt/err.h>
27#include "internal/thread.h"
28
29/** @def RTCRITSECT_STRICT
30 * Define this to enable deadlock detection.
31 *
32 * @remark This won't work safely on L4 since we have to traverse the AVL tree
33 * in order to get the RT thread structure there and this tree is
34 * protected by a critsect atm.
35 */
36#if !defined(RTCRITSECT_STRICT) && defined(RT_STRICT) && !defined(RT_OS_L4)
37# define RTCRITSECT_STRICT
38#endif
39
40/* in strict mode we're redefining this, so undefine it now for the implementation. */
41#undef RTCritSectEnter
42#undef RTCritSectTryEnter
43#undef RTCritSectEnterMultiple
44
45
46/**
47 * Initialize a critical section.
48 */
49RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
50{
51 return RTCritSectInitEx(pCritSect, 0);
52}
53
54
55/**
56 * Initialize a critical section.
57 *
58 * @returns iprt status code.
59 * @param pCritSect Pointer to the critical section structure.
60 * @param fFlags Flags, any combination of the RTCRITSECT_FLAGS \#defines.
61 */
62RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags)
63{
64 /*
65 * Initialize the structure and
66 */
67 pCritSect->u32Magic = RTCRITSECT_MAGIC;
68 pCritSect->fFlags = fFlags;
69 pCritSect->cNestings = 0;
70 pCritSect->cLockers = -1;
71 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD;
72 pCritSect->Strict.ThreadOwner = NIL_RTTHREAD;
73 pCritSect->Strict.pszEnterFile = NULL;
74 pCritSect->Strict.u32EnterLine = 0;
75 pCritSect->Strict.uEnterId = 0;
76 int rc = RTSemEventCreate(&pCritSect->EventSem);
77 if (RT_SUCCESS(rc))
78 return VINF_SUCCESS;
79
80 AssertRC(rc);
81 pCritSect->EventSem = NULL;
82 pCritSect->u32Magic = (uint32_t)rc;
83 return rc;
84}
85
86
87/**
88 * Enter multiple critical sections.
89 *
90 * This function will enter ALL the specified critical sections before returning.
91 *
92 * @returns VINF_SUCCESS on success.
93 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
94 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
95 * @param cCritSects Number of critical sections in the array.
96 * @param papCritSects Array of critical section pointers.
97 *
98 * @remark Please note that this function will not necessarily come out favourable in a
99 * fight with other threads which are using the normal RTCritSectEnter() function.
100 * Therefore, avoid having to enter multiple critical sections!
101 */
102RTDECL(int) RTCritSectEnterMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
103#ifdef RTCRITSECT_STRICT
104{
105 return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, __FILE__, __LINE__, 0);
106}
107RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
108#endif /* RTCRITSECT_STRICT */
109{
110 Assert(cCritSects > 0);
111 Assert(VALID_PTR(papCritSects));
112
113 /*
114 * Try get them all.
115 */
116 int rc = VERR_INVALID_PARAMETER;
117 unsigned i;
118 for (i = 0; i < cCritSects; i++)
119 {
120#ifdef RTCRITSECT_STRICT
121 rc = RTCritSectTryEnterDebug(papCritSects[i], pszFile, uLine, uId);
122#else
123 rc = RTCritSectTryEnter(papCritSects[i]);
124#endif
125 if (RT_FAILURE(rc))
126 break;
127 }
128 if (RT_SUCCESS(rc))
129 return rc;
130
131 /*
132 * The retry loop.
133 */
134 for (unsigned cTries = 0; ; cTries++)
135 {
136 /*
137 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
138 */
139 unsigned j = i;
140 while (j-- > 0)
141 {
142 int rc2 = RTCritSectLeave(papCritSects[j]);
143 AssertRC(rc2);
144 }
145 if (rc != VERR_SEM_BUSY)
146 return rc;
147
148 /*
149 * Try prevent any theoretical synchronous races with other threads.
150 */
151 Assert(cTries < 1000000);
152 if (cTries > 10000)
153 RTThreadSleep(cTries % 3);
154
155 /*
156 * Wait on the one we failed to get.
157 */
158#ifdef RTCRITSECT_STRICT
159 rc = RTCritSectEnterDebug(papCritSects[i], pszFile, uLine, uId);
160#else
161 rc = RTCritSectEnter(papCritSects[i]);
162#endif
163 if (RT_FAILURE(rc))
164 return rc;
165
166 /*
167 * Try take the others.
168 */
169 for (j = 0; j < cCritSects; j++)
170 {
171 if (j != i)
172 {
173#ifdef RTCRITSECT_STRICT
174 rc = RTCritSectTryEnterDebug(papCritSects[j], pszFile, uLine, uId);
175#else
176 rc = RTCritSectTryEnter(papCritSects[j]);
177#endif
178 if (RT_FAILURE(rc))
179 break;
180 }
181 }
182 if (RT_SUCCESS(rc))
183 return rc;
184
185 /*
186 * We failed.
187 */
188 if (i > j)
189 {
190 int rc2 = RTCritSectLeave(papCritSects[i]);
191 AssertRC(rc2);
192 }
193 i = j;
194 }
195}
196
197
198/**
199 * Try enter a critical section.
200 *
201 * @returns VINF_SUCCESS on success.
202 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
203 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
204 * @param pCritSect The critical section.
205 */
206RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
207#ifdef RTCRITSECT_STRICT
208{
209 return RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0);
210}
211RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
212#endif /* RTCRITSECT_STRICT */
213{
214 Assert(pCritSect);
215 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
216 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
217#ifdef RTCRITSECT_STRICT
218 RTTHREAD ThreadSelf = RTThreadSelf();
219 if (ThreadSelf == NIL_RTTHREAD)
220 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
221#endif
222
223 /*
224 * Try take the lock. (cLockers is -1 if it's free)
225 */
226 if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
227 {
228 /*
229 * Somebody is owning it (or will be soon). Perhaps it's us?
230 */
231 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
232 {
233 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
234 {
235 ASMAtomicIncS32(&pCritSect->cLockers);
236 pCritSect->cNestings++;
237 return VINF_SUCCESS;
238 }
239 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
240 return VERR_SEM_NESTED;
241 }
242 return VERR_SEM_BUSY;
243 }
244
245 /*
246 * First time
247 */
248 pCritSect->cNestings = 1;
249 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
250#ifdef RTCRITSECT_STRICT
251 pCritSect->Strict.pszEnterFile = pszFile;
252 pCritSect->Strict.u32EnterLine = uLine;
253 pCritSect->Strict.uEnterId = uId;
254 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
255#endif
256
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * Enter a critical section.
263 *
264 * @returns VINF_SUCCESS on success.
265 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
266 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
267 * @param pCritSect The critical section.
268 */
269RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
270#ifdef RTCRITSECT_STRICT
271{
272 return RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0);
273}
274RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
275#endif /* RTCRITSECT_STRICT */
276{
277 Assert(pCritSect);
278 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
279 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
280#ifdef RTCRITSECT_STRICT
281 RTTHREAD ThreadSelf = RTThreadSelf();
282 if (ThreadSelf == NIL_RTTHREAD)
283 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
284#endif
285
286 /** If the critical section has already been destroyed, then inform the caller. */
287 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
288 return VERR_SEM_DESTROYED;
289
290 /*
291 * Increment the waiter counter.
292 * This becomes 0 when the section is free.
293 */
294 if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
295 {
296 /*
297 * Nested?
298 */
299 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
300 {
301 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
302 {
303 pCritSect->cNestings++;
304 return VINF_SUCCESS;
305 }
306 else
307 {
308 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
309 ASMAtomicDecS32(&pCritSect->cLockers);
310 return VERR_SEM_NESTED;
311 }
312 }
313
314 for (;;)
315 {
316#ifdef RTCRITSECT_STRICT
317 rtThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
318#endif
319 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
320#ifdef RTCRITSECT_STRICT
321 rtThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
322#endif
323 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
324 return VERR_SEM_DESTROYED;
325 if (rc == VINF_SUCCESS)
326 break;
327 AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Vrc\n", rc));
328 }
329 AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
330 }
331
332 /*
333 * First time
334 */
335 pCritSect->cNestings = 1;
336 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
337#ifdef RTCRITSECT_STRICT
338 pCritSect->Strict.pszEnterFile = pszFile;
339 pCritSect->Strict.u32EnterLine = uLine;
340 pCritSect->Strict.uEnterId = uId;
341 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
342#endif
343
344 return VINF_SUCCESS;
345}
346
347
348/**
349 * Leave a critical section.
350 *
351 * @returns VINF_SUCCESS.
352 * @param pCritSect The critical section.
353 */
354RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
355{
356 /*
357 * Assert ownership and so on.
358 */
359 Assert(pCritSect);
360 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
361 Assert(pCritSect->cNestings > 0);
362 Assert(pCritSect->cLockers >= 0);
363 Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
364
365 /*
366 * Decrement nestings, if <= 0 when we'll release the critsec.
367 */
368 pCritSect->cNestings--;
369 if (pCritSect->cNestings > 0)
370 ASMAtomicDecS32(&pCritSect->cLockers);
371 else
372 {
373 /*
374 * Set owner to zero.
375 * Decrement waiters, if >= 0 then we have to wake one of them up.
376 */
377#ifdef RTCRITSECT_STRICT
378 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
379#endif
380 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
381 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
382 {
383 int rc = RTSemEventSignal(pCritSect->EventSem);
384 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Vrc\n", rc));
385 }
386 }
387 return VINF_SUCCESS;
388}
389
390
391/**
392 * Leave multiple critical sections.
393 *
394 * @returns VINF_SUCCESS.
395 * @param cCritSects Number of critical sections in the array.
396 * @param papCritSects Array of critical section pointers.
397 */
398RTDECL(int) RTCritSectLeaveMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
399{
400 int rc = VINF_SUCCESS;
401 for (unsigned i = 0; i < cCritSects; i++)
402 {
403 int rc2 = RTCritSectLeave(papCritSects[i]);
404 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
405 rc = rc2;
406 }
407 return rc;
408}
409
410
411#ifndef RTCRITSECT_STRICT
412RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
413{
414 return RTCritSectEnter(pCritSect);
415}
416
417RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
418{
419 return RTCritSectTryEnter(pCritSect);
420}
421
422RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
423{
424 return RTCritSectEnterMultiple(cCritSects, papCritSects);
425}
426#endif /* RT_STRICT */
427
428
429/**
430 * Deletes a critical section.
431 *
432 * @returns VINF_SUCCESS.
433 * @param pCritSect The critical section.
434 */
435RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
436{
437 /*
438 * Assert free waiters and so on.
439 */
440 Assert(pCritSect);
441 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
442 Assert(pCritSect->cNestings == 0);
443 Assert(pCritSect->cLockers == -1);
444 Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
445
446 /*
447 * Invalidate the structure and free the mutex.
448 * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
449 */
450 ASMAtomicXchgU32(&pCritSect->u32Magic, 0);
451 pCritSect->fFlags = 0;
452 pCritSect->cNestings = 0;
453 pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
454 RTSEMEVENT EventSem = pCritSect->EventSem;
455 pCritSect->EventSem = NULL;
456 while (pCritSect->cLockers-- >= 0)
457 RTSemEventSignal(EventSem);
458 ASMAtomicXchgS32(&pCritSect->cLockers, -1);
459 int rc = RTSemEventDestroy(EventSem);
460 AssertRC(rc);
461
462 return rc;
463}
464
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette