VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsectrw-generic.cpp@ 106401

Last change on this file since 106401 was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 41.2 KB
Line 
1/* $Id: critsectrw-generic.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define RTCRITSECTRW_WITHOUT_REMAPPING
42#define RTASSERT_QUIET
43#include <iprt/critsect.h>
44#include "internal/iprt.h"
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/err.h>
49#include <iprt/lockvalidator.h>
50#include <iprt/mem.h>
51#include <iprt/semaphore.h>
52#include <iprt/thread.h>
53
54#include "internal/magics.h"
55#include "internal/strict.h"
56
57/* Two issues here, (1) the tracepoint generator uses IPRT, and (2) only one .d
58 file per module. */
59#ifdef IPRT_WITH_DTRACE
60# include IPRT_DTRACE_INCLUDE
61# ifdef IPRT_DTRACE_PREFIX
62# define IPRT_CRITSECTRW_EXCL_ENTERED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED)
63# define IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED)
64# define IPRT_CRITSECTRW_EXCL_LEAVING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING)
65# define IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED)
66# define IPRT_CRITSECTRW_EXCL_BUSY RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_BUSY)
67# define IPRT_CRITSECTRW_EXCL_WAITING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_WAITING)
68# define IPRT_CRITSECTRW_EXCL_ENTERED_SHARED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED_SHARED)
69# define IPRT_CRITSECTRW_EXCL_LEAVING_SHARED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING_SHARED)
70# define IPRT_CRITSECTRW_SHARED_ENTERED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_ENTERED)
71# define IPRT_CRITSECTRW_SHARED_LEAVING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_LEAVING)
72# define IPRT_CRITSECTRW_SHARED_BUSY RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_BUSY)
73# define IPRT_CRITSECTRW_SHARED_WAITING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_WAITING)
74# endif
75#else
76# define IPRT_CRITSECTRW_EXCL_ENTERED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
77# define IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED() (false)
78# define IPRT_CRITSECTRW_EXCL_LEAVING(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
79# define IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED() (false)
80# define IPRT_CRITSECTRW_EXCL_BUSY( a_pvCritSect, a_pszName, a_fWriteMode, a_cWaitingReaders, a_cReaders, cWriters, a_pvNativeOwnerThread) do {} while (0)
81# define IPRT_CRITSECTRW_EXCL_WAITING(a_pvCritSect, a_pszName, a_fWriteMode, a_cWaitingReaders, a_cReaders, cWriters, a_pvNativeOwnerThread) do {} while (0)
82# define IPRT_CRITSECTRW_EXCL_ENTERED_SHARED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
83# define IPRT_CRITSECTRW_EXCL_LEAVING_SHARED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
84# define IPRT_CRITSECTRW_SHARED_ENTERED(a_pvCritSect, a_pszName, a_cReaders, a_cWaitingWriters) do {} while (0)
85# define IPRT_CRITSECTRW_SHARED_LEAVING(a_pvCritSect, a_pszName, a_cReaders, a_cWaitingWriters) do {} while (0)
86# define IPRT_CRITSECTRW_SHARED_BUSY( a_pvCritSect, a_pszName, a_pvNativeOwnerThread, a_cWaitingReaders, a_cWriters) do {} while (0)
87# define IPRT_CRITSECTRW_SHARED_WAITING(a_pvCritSect, a_pszName, a_pvNativeOwnerThread, a_cWaitingReaders, a_cWriters) do {} while (0)
88#endif
89
90
91
92RTDECL(int) RTCritSectRwInit(PRTCRITSECTRW pThis)
93{
94 return RTCritSectRwInitEx(pThis, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectRw");
95}
96RT_EXPORT_SYMBOL(RTCritSectRwInit);
97
98
99RTDECL(int) RTCritSectRwInitNamed(PRTCRITSECTRW pThis, const char *pszName)
100{
101 return RTCritSectRwInitEx(pThis, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "%s", pszName);
102}
103RT_EXPORT_SYMBOL(RTCritSectRwInitNamed);
104
105
106RTDECL(int) RTCritSectRwInitEx(PRTCRITSECTRW pThis, uint32_t fFlags,
107 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
108{
109 int rc;
110 AssertReturn(!(fFlags & ~( RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_BOOTSTRAP_HACK
111 | RTCRITSECT_FLAGS_NOP )),
112 VERR_INVALID_PARAMETER);
113 RT_NOREF_PV(hClass); RT_NOREF_PV(uSubClass); RT_NOREF_PV(pszNameFmt);
114
115
116 /*
117 * Initialize the structure, allocate the lock validator stuff and sems.
118 */
119 pThis->u32Magic = RTCRITSECTRW_MAGIC_DEAD;
120 pThis->fNeedReset = false;
121#ifdef IN_RING0
122 pThis->fFlags = (uint16_t)(fFlags | RTCRITSECT_FLAGS_RING0);
123#else
124 pThis->fFlags = (uint16_t)(fFlags & ~RTCRITSECT_FLAGS_RING0);
125#endif
126 pThis->u.u128.s.Hi = 0;
127 pThis->u.u128.s.Lo = 0;
128 pThis->u.s.hNativeWriter= NIL_RTNATIVETHREAD;
129 AssertCompile(sizeof(pThis->u.u128) >= sizeof(pThis->u.s));
130 pThis->cWriterReads = 0;
131 pThis->cWriteRecursions = 0;
132 pThis->hEvtWrite = NIL_RTSEMEVENT;
133 pThis->hEvtRead = NIL_RTSEMEVENTMULTI;
134 pThis->pValidatorWrite = NULL;
135 pThis->pValidatorRead = NULL;
136
137#ifdef RTCRITSECTRW_STRICT
138 bool const fLVEnabled = !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL);
139 if (!pszNameFmt)
140 {
141 static uint32_t volatile s_iAnon = 0;
142 uint32_t i = ASMAtomicIncU32(&s_iAnon) - 1;
143 rc = RTLockValidatorRecExclCreate(&pThis->pValidatorWrite, hClass, uSubClass, pThis,
144 fLVEnabled, "RTCritSectRw-%u", i);
145 if (RT_SUCCESS(rc))
146 rc = RTLockValidatorRecSharedCreate(&pThis->pValidatorRead, hClass, uSubClass, pThis,
147 false /*fSignaller*/, fLVEnabled, "RTCritSectRw-%u", i);
148 }
149 else
150 {
151 va_list va;
152 va_start(va, pszNameFmt);
153 rc = RTLockValidatorRecExclCreateV(&pThis->pValidatorWrite, hClass, uSubClass, pThis,
154 fLVEnabled, pszNameFmt, va);
155 va_end(va);
156 if (RT_SUCCESS(rc))
157 {
158 va_start(va, pszNameFmt);
159 RTLockValidatorRecSharedCreateV(&pThis->pValidatorRead, hClass, uSubClass, pThis,
160 false /*fSignaller*/, fLVEnabled, pszNameFmt, va);
161 va_end(va);
162 }
163 }
164 if (RT_SUCCESS(rc))
165 rc = RTLockValidatorRecMakeSiblings(&pThis->pValidatorWrite->Core, &pThis->pValidatorRead->Core);
166
167 if (RT_SUCCESS(rc))
168#endif
169 {
170 rc = RTSemEventMultiCreate(&pThis->hEvtRead);
171 if (RT_SUCCESS(rc))
172 {
173 rc = RTSemEventCreate(&pThis->hEvtWrite);
174 if (RT_SUCCESS(rc))
175 {
176 pThis->u32Magic = RTCRITSECTRW_MAGIC;
177 return VINF_SUCCESS;
178 }
179 RTSemEventMultiDestroy(pThis->hEvtRead);
180 }
181 }
182
183#ifdef RTCRITSECTRW_STRICT
184 RTLockValidatorRecSharedDestroy(&pThis->pValidatorRead);
185 RTLockValidatorRecExclDestroy(&pThis->pValidatorWrite);
186#endif
187 return rc;
188}
189RT_EXPORT_SYMBOL(RTCritSectRwInitEx);
190
191
192RTDECL(uint32_t) RTCritSectRwSetSubClass(PRTCRITSECTRW pThis, uint32_t uSubClass)
193{
194 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
195 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
196#ifdef IN_RING0
197 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
198#else
199 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
200#endif
201#ifdef RTCRITSECTRW_STRICT
202 AssertReturn(!(pThis->fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
203
204 RTLockValidatorRecSharedSetSubClass(pThis->pValidatorRead, uSubClass);
205 return RTLockValidatorRecExclSetSubClass(pThis->pValidatorWrite, uSubClass);
206#else
207 NOREF(uSubClass);
208 return RTLOCKVAL_SUB_CLASS_INVALID;
209#endif
210}
211RT_EXPORT_SYMBOL(RTCritSectRwSetSubClass);
212
213
214static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
215{
216 /*
217 * Validate input.
218 */
219 AssertPtr(pThis);
220 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
221#ifdef IN_RING0
222 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
223#else
224 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
225#endif
226 RT_NOREF_PV(pSrcPos);
227
228#ifdef RTCRITSECTRW_STRICT
229 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
230 if (!fTryOnly)
231 {
232 int rc9;
233 RTNATIVETHREAD hNativeWriter;
234 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
235 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
236 rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
237 else
238 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
239 if (RT_FAILURE(rc9))
240 return rc9;
241 }
242#endif
243
244 /*
245 * Get cracking...
246 */
247 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
248 uint64_t u64OldState = u64State;
249
250 for (;;)
251 {
252 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
253 {
254 /* It flows in the right direction, try follow it before it changes. */
255 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
256 c++;
257 Assert(c < RTCSRW_CNT_MASK / 2);
258 u64State &= ~RTCSRW_CNT_RD_MASK;
259 u64State |= c << RTCSRW_CNT_RD_SHIFT;
260 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
261 {
262#ifdef RTCRITSECTRW_STRICT
263 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
264#endif
265 break;
266 }
267 }
268 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
269 {
270 /* Wrong direction, but we're alone here and can simply try switch the direction. */
271 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
272 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
273 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
274 {
275 Assert(!pThis->fNeedReset);
276#ifdef RTCRITSECTRW_STRICT
277 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
278#endif
279 break;
280 }
281 }
282 else
283 {
284 /* Is the writer perhaps doing a read recursion? */
285 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
286 RTNATIVETHREAD hNativeWriter;
287 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
288 if (hNativeSelf == hNativeWriter)
289 {
290#ifdef RTCRITSECTRW_STRICT
291 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->pValidatorWrite, &pThis->pValidatorRead->Core, pSrcPos);
292 if (RT_FAILURE(rc9))
293 return rc9;
294#endif
295 Assert(pThis->cWriterReads < UINT32_MAX / 2);
296 uint32_t const cReads = ASMAtomicIncU32(&pThis->cWriterReads); NOREF(cReads);
297 IPRT_CRITSECTRW_EXCL_ENTERED_SHARED(pThis, NULL,
298 cReads + pThis->cWriteRecursions,
299 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
300 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
301
302 return VINF_SUCCESS; /* don't break! */
303 }
304
305 /* If we're only trying, return already. */
306 if (fTryOnly)
307 {
308 IPRT_CRITSECTRW_SHARED_BUSY(pThis, NULL,
309 (void *)pThis->u.s.hNativeWriter,
310 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
311 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
312 return VERR_SEM_BUSY;
313 }
314
315 /* Add ourselves to the queue and wait for the direction to change. */
316 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
317 c++;
318 Assert(c < RTCSRW_CNT_MASK / 2);
319
320 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
321 cWait++;
322 Assert(cWait <= c);
323 Assert(cWait < RTCSRW_CNT_MASK / 2);
324
325 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
326 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
327
328 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
329 {
330 IPRT_CRITSECTRW_SHARED_WAITING(pThis, NULL,
331 (void *)pThis->u.s.hNativeWriter,
332 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
333 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
334 for (uint32_t iLoop = 0; ; iLoop++)
335 {
336 int rc;
337#ifdef RTCRITSECTRW_STRICT
338 rc = RTLockValidatorRecSharedCheckBlocking(pThis->pValidatorRead, hThreadSelf, pSrcPos, true,
339 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
340 if (RT_SUCCESS(rc))
341#elif defined(IN_RING3)
342 RTTHREAD hThreadSelf = RTThreadSelf();
343 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
344#endif
345 {
346 rc = RTSemEventMultiWait(pThis->hEvtRead, RT_INDEFINITE_WAIT);
347#ifdef IN_RING3
348 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
349#endif
350 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
351 return VERR_SEM_DESTROYED;
352 }
353 if (RT_FAILURE(rc))
354 {
355 /* Decrement the counts and return the error. */
356 for (;;)
357 {
358 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
359 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
360 c--;
361 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
362 cWait--;
363 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
364 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
365 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
366 break;
367 }
368 return rc;
369 }
370
371 Assert(pThis->fNeedReset);
372 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
373 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
374 break;
375 AssertMsg(iLoop < 1, ("%u\n", iLoop));
376 }
377
378 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
379 for (;;)
380 {
381 u64OldState = u64State;
382
383 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
384 Assert(cWait > 0);
385 cWait--;
386 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
387 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
388
389 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
390 {
391 if (cWait == 0)
392 {
393 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
394 {
395 int rc = RTSemEventMultiReset(pThis->hEvtRead);
396 AssertRCReturn(rc, rc);
397 }
398 }
399 break;
400 }
401 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
402 }
403
404#ifdef RTCRITSECTRW_STRICT
405 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
406#endif
407 break;
408 }
409 }
410
411 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
412 return VERR_SEM_DESTROYED;
413
414 ASMNopPause();
415 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
416 u64OldState = u64State;
417 }
418
419 /* got it! */
420 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
421 IPRT_CRITSECTRW_SHARED_ENTERED(pThis, NULL,
422 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
423 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
424 return VINF_SUCCESS;
425}
426
427
428RTDECL(int) RTCritSectRwEnterShared(PRTCRITSECTRW pThis)
429{
430#ifndef RTCRITSECTRW_STRICT
431 return rtCritSectRwEnterShared(pThis, NULL, false /*fTryOnly*/);
432#else
433 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
434 return rtCritSectRwEnterShared(pThis, &SrcPos, false /*fTryOnly*/);
435#endif
436}
437RT_EXPORT_SYMBOL(RTCritSectRwEnterShared);
438
439
440RTDECL(int) RTCritSectRwEnterSharedDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
441{
442 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
443 return rtCritSectRwEnterShared(pThis, &SrcPos, false /*fTryOnly*/);
444}
445RT_EXPORT_SYMBOL(RTCritSectRwEnterSharedDebug);
446
447
448RTDECL(int) RTCritSectRwTryEnterShared(PRTCRITSECTRW pThis)
449{
450#ifndef RTCRITSECTRW_STRICT
451 return rtCritSectRwEnterShared(pThis, NULL, true /*fTryOnly*/);
452#else
453 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
454 return rtCritSectRwEnterShared(pThis, &SrcPos, true /*fTryOnly*/);
455#endif
456}
457RT_EXPORT_SYMBOL(RTCritSectRwEnterShared);
458
459
460RTDECL(int) RTCritSectRwTryEnterSharedDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
461{
462 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
463 return rtCritSectRwEnterShared(pThis, &SrcPos, true /*fTryOnly*/);
464}
465RT_EXPORT_SYMBOL(RTCritSectRwEnterSharedDebug);
466
467
468
469RTDECL(int) RTCritSectRwLeaveShared(PRTCRITSECTRW pThis)
470{
471 /*
472 * Validate handle.
473 */
474 AssertPtr(pThis);
475 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
476#ifdef IN_RING0
477 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
478#else
479 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
480#endif
481
482 /*
483 * Check the direction and take action accordingly.
484 */
485 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
486 uint64_t u64OldState = u64State;
487 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
488 {
489#ifdef RTCRITSECTRW_STRICT
490 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->pValidatorRead, NIL_RTTHREAD);
491 if (RT_FAILURE(rc9))
492 return rc9;
493#endif
494 IPRT_CRITSECTRW_SHARED_LEAVING(pThis, NULL,
495 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT) - 1,
496 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
497
498 for (;;)
499 {
500 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
501 AssertReturn(c > 0, VERR_NOT_OWNER);
502 c--;
503
504 if ( c > 0
505 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
506 {
507 /* Don't change the direction. */
508 u64State &= ~RTCSRW_CNT_RD_MASK;
509 u64State |= c << RTCSRW_CNT_RD_SHIFT;
510 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
511 break;
512 }
513 else
514 {
515 /* Reverse the direction and signal the reader threads. */
516 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
517 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
518 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
519 {
520 int rc = RTSemEventSignal(pThis->hEvtWrite);
521 AssertRC(rc);
522 break;
523 }
524 }
525
526 ASMNopPause();
527 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
528 u64OldState = u64State;
529 }
530 }
531 else
532 {
533 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
534 RTNATIVETHREAD hNativeWriter;
535 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
536 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
537 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
538#ifdef RTCRITSECTRW_STRICT
539 int rc = RTLockValidatorRecExclUnwindMixed(pThis->pValidatorWrite, &pThis->pValidatorRead->Core);
540 if (RT_FAILURE(rc))
541 return rc;
542#endif
543 uint32_t cReads = ASMAtomicDecU32(&pThis->cWriterReads); NOREF(cReads);
544 IPRT_CRITSECTRW_EXCL_LEAVING_SHARED(pThis, NULL,
545 cReads + pThis->cWriteRecursions,
546 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
547 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
548 }
549
550 return VINF_SUCCESS;
551}
552RT_EXPORT_SYMBOL(RTCritSectRwLeaveShared);
553
554
555static int rtCritSectRwEnterExcl(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
556{
557 /*
558 * Validate input.
559 */
560 AssertPtr(pThis);
561 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
562#ifdef IN_RING0
563 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
564#else
565 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
566#endif
567 RT_NOREF_PV(pSrcPos);
568
569#ifdef RTCRITSECTRW_STRICT
570 RTTHREAD hThreadSelf = NIL_RTTHREAD;
571 if (!fTryOnly)
572 {
573 hThreadSelf = RTThreadSelfAutoAdopt();
574 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
575 if (RT_FAILURE(rc9))
576 return rc9;
577 }
578#endif
579
580 /*
581 * Check if we're already the owner and just recursing.
582 */
583 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
584 RTNATIVETHREAD hNativeWriter;
585 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
586 if (hNativeSelf == hNativeWriter)
587 {
588 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
589#ifdef RTCRITSECTRW_STRICT
590 int rc9 = RTLockValidatorRecExclRecursion(pThis->pValidatorWrite, pSrcPos);
591 if (RT_FAILURE(rc9))
592 return rc9;
593#endif
594 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
595 uint32_t cNestings = ASMAtomicIncU32(&pThis->cWriteRecursions); NOREF(cNestings);
596
597#ifdef IPRT_WITH_DTRACE
598 if (IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED())
599 {
600 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
601 IPRT_CRITSECTRW_EXCL_ENTERED(pThis, NULL, cNestings + pThis->cWriterReads,
602 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
603 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
604 }
605#endif
606 return VINF_SUCCESS;
607 }
608
609 /*
610 * Get cracking.
611 */
612 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
613 uint64_t u64OldState = u64State;
614
615 for (;;)
616 {
617 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
618 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
619 {
620 /* It flows in the right direction, try follow it before it changes. */
621 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
622 c++;
623 Assert(c < RTCSRW_CNT_MASK / 2);
624 u64State &= ~RTCSRW_CNT_WR_MASK;
625 u64State |= c << RTCSRW_CNT_WR_SHIFT;
626 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
627 break;
628 }
629 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
630 {
631 /* Wrong direction, but we're alone here and can simply try switch the direction. */
632 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
633 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
634 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
635 break;
636 }
637 else if (fTryOnly)
638 /* Wrong direction and we're not supposed to wait, just return. */
639 return VERR_SEM_BUSY;
640 else
641 {
642 /* Add ourselves to the write count and break out to do the wait. */
643 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
644 c++;
645 Assert(c < RTCSRW_CNT_MASK / 2);
646 u64State &= ~RTCSRW_CNT_WR_MASK;
647 u64State |= c << RTCSRW_CNT_WR_SHIFT;
648 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
649 break;
650 }
651
652 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
653 return VERR_SEM_DESTROYED;
654
655 ASMNopPause();
656 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
657 u64OldState = u64State;
658 }
659
660 /*
661 * If we're in write mode now try grab the ownership. Play fair if there
662 * are threads already waiting.
663 */
664 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
665 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
666 || fTryOnly);
667 if (fDone)
668 ASMAtomicCmpXchgHandle(&pThis->u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
669 if (!fDone)
670 {
671 /*
672 * If only trying, undo the above writer incrementation and return.
673 */
674 if (fTryOnly)
675 {
676 for (;;)
677 {
678 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
679 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
680 c--;
681 u64State &= ~RTCSRW_CNT_WR_MASK;
682 u64State |= c << RTCSRW_CNT_WR_SHIFT;
683 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
684 break;
685 }
686 IPRT_CRITSECTRW_EXCL_BUSY(pThis, NULL,
687 (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) /*fWrite*/,
688 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
689 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
690 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT),
691 (void *)pThis->u.s.hNativeWriter);
692 return VERR_SEM_BUSY;
693 }
694
695 /*
696 * Wait for our turn.
697 */
698 IPRT_CRITSECTRW_EXCL_WAITING(pThis, NULL,
699 (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) /*fWrite*/,
700 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
701 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
702 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT),
703 (void *)pThis->u.s.hNativeWriter);
704 for (uint32_t iLoop = 0; ; iLoop++)
705 {
706 int rc;
707#ifdef RTCRITSECTRW_STRICT
708 if (hThreadSelf == NIL_RTTHREAD)
709 hThreadSelf = RTThreadSelfAutoAdopt();
710 rc = RTLockValidatorRecExclCheckBlocking(pThis->pValidatorWrite, hThreadSelf, pSrcPos, true,
711 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
712 if (RT_SUCCESS(rc))
713#elif defined(IN_RING3)
714 RTTHREAD hThreadSelf = RTThreadSelf();
715 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
716#endif
717 {
718 rc = RTSemEventWait(pThis->hEvtWrite, RT_INDEFINITE_WAIT);
719#ifdef IN_RING3
720 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
721#endif
722 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
723 return VERR_SEM_DESTROYED;
724 }
725 if (RT_FAILURE(rc))
726 {
727 /* Decrement the counts and return the error. */
728 for (;;)
729 {
730 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
731 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
732 c--;
733 u64State &= ~RTCSRW_CNT_WR_MASK;
734 u64State |= c << RTCSRW_CNT_WR_SHIFT;
735 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
736 break;
737 }
738 return rc;
739 }
740
741 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
742 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
743 {
744 ASMAtomicCmpXchgHandle(&pThis->u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
745 if (fDone)
746 break;
747 }
748 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
749 }
750 }
751
752 /*
753 * Got it!
754 */
755 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
756 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
757 Assert(pThis->cWriterReads == 0);
758#ifdef RTCRITSECTRW_STRICT
759 RTLockValidatorRecExclSetOwner(pThis->pValidatorWrite, hThreadSelf, pSrcPos, true);
760#endif
761 IPRT_CRITSECTRW_EXCL_ENTERED(pThis, NULL, 1,
762 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
763 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
764
765 return VINF_SUCCESS;
766}
767
768
769RTDECL(int) RTCritSectRwEnterExcl(PRTCRITSECTRW pThis)
770{
771#ifndef RTCRITSECTRW_STRICT
772 return rtCritSectRwEnterExcl(pThis, NULL, false /*fTryAgain*/);
773#else
774 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
775 return rtCritSectRwEnterExcl(pThis, &SrcPos, false /*fTryAgain*/);
776#endif
777}
778RT_EXPORT_SYMBOL(RTCritSectRwEnterExcl);
779
780
781RTDECL(int) RTCritSectRwEnterExclDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
782{
783 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
784 return rtCritSectRwEnterExcl(pThis, &SrcPos, false /*fTryAgain*/);
785}
786RT_EXPORT_SYMBOL(RTCritSectRwEnterExclDebug);
787
788
789RTDECL(int) RTCritSectRwTryEnterExcl(PRTCRITSECTRW pThis)
790{
791#ifndef RTCRITSECTRW_STRICT
792 return rtCritSectRwEnterExcl(pThis, NULL, true /*fTryAgain*/);
793#else
794 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
795 return rtCritSectRwEnterExcl(pThis, &SrcPos, true /*fTryAgain*/);
796#endif
797}
798RT_EXPORT_SYMBOL(RTCritSectRwTryEnterExcl);
799
800
801RTDECL(int) RTCritSectRwTryEnterExclDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
802{
803 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
804 return rtCritSectRwEnterExcl(pThis, &SrcPos, true /*fTryAgain*/);
805}
806RT_EXPORT_SYMBOL(RTCritSectRwTryEnterExclDebug);
807
808
809RTDECL(int) RTCritSectRwLeaveExcl(PRTCRITSECTRW pThis)
810{
811 /*
812 * Validate handle.
813 */
814 AssertPtr(pThis);
815 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
816#ifdef IN_RING0
817 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
818#else
819 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
820#endif
821
822 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
823 RTNATIVETHREAD hNativeWriter;
824 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
825 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
826
827 /*
828 * Unwind a recursion.
829 */
830 if (pThis->cWriteRecursions == 1)
831 {
832 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
833#ifdef RTCRITSECTRW_STRICT
834 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->pValidatorWrite, true);
835 if (RT_FAILURE(rc9))
836 return rc9;
837#endif
838 /*
839 * Update the state.
840 */
841 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
842 ASMAtomicWriteHandle(&pThis->u.s.hNativeWriter, NIL_RTNATIVETHREAD);
843
844 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
845 IPRT_CRITSECTRW_EXCL_LEAVING(pThis, NULL, 0,
846 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
847 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
848
849 for (;;)
850 {
851 uint64_t u64OldState = u64State;
852
853 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
854 Assert(c > 0);
855 c--;
856
857 if ( c > 0
858 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
859 {
860 /* Don't change the direction, wait up the next writer if any. */
861 u64State &= ~RTCSRW_CNT_WR_MASK;
862 u64State |= c << RTCSRW_CNT_WR_SHIFT;
863 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
864 {
865 if (c > 0)
866 {
867 int rc = RTSemEventSignal(pThis->hEvtWrite);
868 AssertRC(rc);
869 }
870 break;
871 }
872 }
873 else
874 {
875 /* Reverse the direction and signal the reader threads. */
876 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
877 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
878 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
879 {
880 Assert(!pThis->fNeedReset);
881 ASMAtomicWriteBool(&pThis->fNeedReset, true);
882 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
883 AssertRC(rc);
884 break;
885 }
886 }
887
888 ASMNopPause();
889 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
890 return VERR_SEM_DESTROYED;
891 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
892 }
893 }
894 else
895 {
896 Assert(pThis->cWriteRecursions != 0);
897#ifdef RTCRITSECTRW_STRICT
898 int rc9 = RTLockValidatorRecExclUnwind(pThis->pValidatorWrite);
899 if (RT_FAILURE(rc9))
900 return rc9;
901#endif
902 uint32_t cNestings = ASMAtomicDecU32(&pThis->cWriteRecursions); NOREF(cNestings);
903#ifdef IPRT_WITH_DTRACE
904 if (IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED())
905 {
906 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
907 IPRT_CRITSECTRW_EXCL_LEAVING(pThis, NULL, cNestings + pThis->cWriterReads,
908 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
909 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
910 }
911#endif
912 }
913
914 return VINF_SUCCESS;
915}
916RT_EXPORT_SYMBOL(RTCritSectRwLeaveExcl);
917
918
919RTDECL(bool) RTCritSectRwIsWriteOwner(PRTCRITSECTRW pThis)
920{
921 /*
922 * Validate handle.
923 */
924 AssertPtr(pThis);
925 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, false);
926#ifdef IN_RING0
927 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
928#else
929 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
930#endif
931
932 /*
933 * Check ownership.
934 */
935 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
936 RTNATIVETHREAD hNativeWriter;
937 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
938 return hNativeWriter == hNativeSelf;
939}
940RT_EXPORT_SYMBOL(RTCritSectRwIsWriteOwner);
941
942
943RTDECL(bool) RTCritSectRwIsReadOwner(PRTCRITSECTRW pThis, bool fWannaHear)
944{
945 RT_NOREF_PV(fWannaHear);
946
947 /*
948 * Validate handle.
949 */
950 AssertPtr(pThis);
951 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, false);
952#ifdef IN_RING0
953 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
954#else
955 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
956#endif
957
958 /*
959 * Inspect the state.
960 */
961 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
962 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
963 {
964 /*
965 * It's in write mode, so we can only be a reader if we're also the
966 * current writer.
967 */
968 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
969 RTNATIVETHREAD hWriter;
970 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hWriter);
971 return hWriter == hNativeSelf;
972 }
973
974 /*
975 * Read mode. If there are no current readers, then we cannot be a reader.
976 */
977 if (!(u64State & RTCSRW_CNT_RD_MASK))
978 return false;
979
980#ifdef RTCRITSECTRW_STRICT
981 /*
982 * Ask the lock validator.
983 */
984 return RTLockValidatorRecSharedIsOwner(pThis->pValidatorRead, NIL_RTTHREAD);
985#else
986 /*
987 * Ok, we don't know, just tell the caller what he want to hear.
988 */
989 return fWannaHear;
990#endif
991}
992RT_EXPORT_SYMBOL(RTCritSectRwIsReadOwner);
993
994
995RTDECL(uint32_t) RTCritSectRwGetWriteRecursion(PRTCRITSECTRW pThis)
996{
997 /*
998 * Validate handle.
999 */
1000 AssertPtr(pThis);
1001 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1002
1003 /*
1004 * Return the requested data.
1005 */
1006 return pThis->cWriteRecursions;
1007}
1008RT_EXPORT_SYMBOL(RTCritSectRwGetWriteRecursion);
1009
1010
1011RTDECL(uint32_t) RTCritSectRwGetWriterReadRecursion(PRTCRITSECTRW pThis)
1012{
1013 /*
1014 * Validate handle.
1015 */
1016 AssertPtr(pThis);
1017 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1018
1019 /*
1020 * Return the requested data.
1021 */
1022 return pThis->cWriterReads;
1023}
1024RT_EXPORT_SYMBOL(RTCritSectRwGetWriterReadRecursion);
1025
1026
1027RTDECL(uint32_t) RTCritSectRwGetReadCount(PRTCRITSECTRW pThis)
1028{
1029 /*
1030 * Validate input.
1031 */
1032 AssertPtr(pThis);
1033 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1034
1035 /*
1036 * Return the requested data.
1037 */
1038 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
1039 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1040 return 0;
1041 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1042}
1043RT_EXPORT_SYMBOL(RTCritSectRwGetReadCount);
1044
1045
1046RTDECL(int) RTCritSectRwDelete(PRTCRITSECTRW pThis)
1047{
1048 /*
1049 * Assert free waiters and so on.
1050 */
1051 AssertPtr(pThis);
1052 Assert(pThis->u32Magic == RTCRITSECTRW_MAGIC);
1053 //Assert(pThis->cNestings == 0);
1054 //Assert(pThis->cLockers == -1);
1055 Assert(pThis->u.s.hNativeWriter == NIL_RTNATIVETHREAD);
1056#ifdef IN_RING0
1057 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
1058#else
1059 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
1060#endif
1061
1062 /*
1063 * Invalidate the structure and free the semaphores.
1064 */
1065 if (!ASMAtomicCmpXchgU32(&pThis->u32Magic, RTCRITSECTRW_MAGIC_DEAD, RTCRITSECTRW_MAGIC))
1066 return VERR_INVALID_PARAMETER;
1067
1068 pThis->fFlags = 0;
1069 pThis->u.s.u64State = 0;
1070
1071 RTSEMEVENT hEvtWrite = pThis->hEvtWrite;
1072 pThis->hEvtWrite = NIL_RTSEMEVENT;
1073 RTSEMEVENTMULTI hEvtRead = pThis->hEvtRead;
1074 pThis->hEvtRead = NIL_RTSEMEVENTMULTI;
1075
1076 int rc1 = RTSemEventDestroy(hEvtWrite); AssertRC(rc1);
1077 int rc2 = RTSemEventMultiDestroy(hEvtRead); AssertRC(rc2);
1078
1079#ifndef IN_RING0
1080 RTLockValidatorRecSharedDestroy(&pThis->pValidatorRead);
1081 RTLockValidatorRecExclDestroy(&pThis->pValidatorWrite);
1082#endif
1083
1084 return RT_SUCCESS(rc1) ? rc2 : rc1;
1085}
1086RT_EXPORT_SYMBOL(RTCritSectRwDelete);
1087
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette