VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 58116

Last change on this file since 58116 was 58116, checked in by vboxsync, 9 years ago

VMM: Doxygen fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.4 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 58116 2015-10-08 14:51:53Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pThis Pointer to the read/write critical section.
130 */
131static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis)
132{
133 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
134 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pThis Pointer to the read/write critical section.
146 * @param rcBusy The busy return code for ring-0 and ring-3.
147 * @param fTryOnly Only try enter it, don't wait.
148 * @param pSrcPos The source position. (Can be NULL.)
149 * @param fNoVal No validation records.
150 */
151static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
152{
153 /*
154 * Validate input.
155 */
156 AssertPtr(pThis);
157 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
158
159#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
160 NOREF(pSrcPos);
161 NOREF(fNoVal);
162#endif
163
164#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
165 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
166 if (!fTryOnly)
167 {
168 int rc9;
169 RTNATIVETHREAD hNativeWriter;
170 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
171 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
172 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
173 else
174 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
175 if (RT_FAILURE(rc9))
176 return rc9;
177 }
178#endif
179
180 /*
181 * Get cracking...
182 */
183 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
184 uint64_t u64OldState = u64State;
185
186 for (;;)
187 {
188 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
189 {
190 /* It flows in the right direction, try follow it before it changes. */
191 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
192 c++;
193 Assert(c < RTCSRW_CNT_MASK / 2);
194 u64State &= ~RTCSRW_CNT_RD_MASK;
195 u64State |= c << RTCSRW_CNT_RD_SHIFT;
196 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
197 {
198#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
199 if (!fNoVal)
200 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
201#endif
202 break;
203 }
204 }
205 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
206 {
207 /* Wrong direction, but we're alone here and can simply try switch the direction. */
208 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
209 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
210 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
211 {
212 Assert(!pThis->s.Core.fNeedReset);
213#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
214 if (!fNoVal)
215 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
216#endif
217 break;
218 }
219 }
220 else
221 {
222 /* Is the writer perhaps doing a read recursion? */
223 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
224 RTNATIVETHREAD hNativeWriter;
225 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
226 if (hNativeSelf == hNativeWriter)
227 {
228#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
229 if (!fNoVal)
230 {
231 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
232 if (RT_FAILURE(rc9))
233 return rc9;
234 }
235#endif
236 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
237 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
238 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
239 return VINF_SUCCESS; /* don't break! */
240 }
241
242 /*
243 * If we're only trying, return already.
244 */
245 if (fTryOnly)
246 {
247 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
248 return VERR_SEM_BUSY;
249 }
250
251#if defined(IN_RING3) || defined(IN_RING0)
252# ifdef IN_RING0
253 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
254 && ASMIntAreEnabled())
255# endif
256 {
257 /*
258 * Add ourselves to the queue and wait for the direction to change.
259 */
260 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
261 c++;
262 Assert(c < RTCSRW_CNT_MASK / 2);
263
264 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
265 cWait++;
266 Assert(cWait <= c);
267 Assert(cWait < RTCSRW_CNT_MASK / 2);
268
269 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
270 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
271
272 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
273 {
274 for (uint32_t iLoop = 0; ; iLoop++)
275 {
276 int rc;
277# ifdef IN_RING3
278# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
279 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
280 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
281 if (RT_SUCCESS(rc))
282# else
283 RTTHREAD hThreadSelf = RTThreadSelf();
284 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
285# endif
286# endif
287 {
288 for (;;)
289 {
290 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
291 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
292 RT_INDEFINITE_WAIT);
293 if ( rc != VERR_INTERRUPTED
294 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
295 break;
296# ifdef IN_RING0
297 pdmR0CritSectRwYieldToRing3(pThis);
298# endif
299 }
300# ifdef IN_RING3
301 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
302# endif
303 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
304 return VERR_SEM_DESTROYED;
305 }
306 if (RT_FAILURE(rc))
307 {
308 /* Decrement the counts and return the error. */
309 for (;;)
310 {
311 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
312 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
313 c--;
314 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
315 cWait--;
316 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
317 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
318 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
319 break;
320 }
321 return rc;
322 }
323
324 Assert(pThis->s.Core.fNeedReset);
325 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
326 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
327 break;
328 AssertMsg(iLoop < 1, ("%u\n", iLoop));
329 }
330
331 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
332 for (;;)
333 {
334 u64OldState = u64State;
335
336 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
337 Assert(cWait > 0);
338 cWait--;
339 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
340 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
341
342 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
343 {
344 if (cWait == 0)
345 {
346 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
347 {
348 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
349 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
350 AssertRCReturn(rc, rc);
351 }
352 }
353 break;
354 }
355 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
356 }
357
358# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
359 if (!fNoVal)
360 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
361# endif
362 break;
363 }
364 }
365#endif /* IN_RING3 || IN_RING3 */
366#ifndef IN_RING3
367# ifdef IN_RING0
368 else
369# endif
370 {
371 /*
372 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
373 * back to ring-3 and do it there or return rcBusy.
374 */
375 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
376 if (rcBusy == VINF_SUCCESS)
377 {
378 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
379 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
380 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
381 * back to ring-3. Goes for both kind of crit sects. */
382 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
383 }
384 return rcBusy;
385 }
386#endif /* !IN_RING3 */
387 }
388
389 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
390 return VERR_SEM_DESTROYED;
391
392 ASMNopPause();
393 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
394 u64OldState = u64State;
395 }
396
397 /* got it! */
398 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
399 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
400 return VINF_SUCCESS;
401
402}
403
404
405/**
406 * Enter a critical section with shared (read) access.
407 *
408 * @returns VBox status code.
409 * @retval VINF_SUCCESS on success.
410 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
411 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
412 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
413 * during the operation.
414 *
415 * @param pThis Pointer to the read/write critical section.
416 * @param rcBusy The status code to return when we're in RC or R0 and the
417 * section is busy. Pass VINF_SUCCESS to acquired the
418 * critical section thru a ring-3 call if necessary.
419 * @param uId Where we're entering the section.
420 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
421 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
422 * RTCritSectRwEnterShared.
423 */
424VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
425{
426#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
427 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
428#else
429 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
430 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
431#endif
432}
433
434
435/**
436 * Enter a critical section with shared (read) access.
437 *
438 * @returns VBox status code.
439 * @retval VINF_SUCCESS on success.
440 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
441 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
442 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
443 * during the operation.
444 *
445 * @param pThis Pointer to the read/write critical section.
446 * @param rcBusy The status code to return when we're in RC or R0 and the
447 * section is busy. Pass VINF_SUCCESS to acquired the
448 * critical section thru a ring-3 call if necessary.
449 * @param uId Where we're entering the section.
450 * @param SRC_POS The source position.
451 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
452 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
453 * RTCritSectRwEnterSharedDebug.
454 */
455VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
456{
457 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
458#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
459 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
460#else
461 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
462 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
463#endif
464}
465
466
467/**
468 * Try enter a critical section with shared (read) access.
469 *
470 * @returns VBox status code.
471 * @retval VINF_SUCCESS on success.
472 * @retval VERR_SEM_BUSY if the critsect was owned.
473 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
474 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
475 * during the operation.
476 *
477 * @param pThis Pointer to the read/write critical section.
478 * @param uId Where we're entering the section.
479 * @param SRC_POS The source position.
480 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
481 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
482 * RTCritSectRwTryEnterShared.
483 */
484VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
485{
486#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
487 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
488#else
489 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
490 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
491#endif
492}
493
494
495/**
496 * Try enter a critical section with shared (read) access.
497 *
498 * @returns VBox status code.
499 * @retval VINF_SUCCESS on success.
500 * @retval VERR_SEM_BUSY if the critsect was owned.
501 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
502 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
503 * during the operation.
504 *
505 * @param pThis Pointer to the read/write critical section.
506 * @param uId Where we're entering the section.
507 * @param SRC_POS The source position.
508 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
509 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
510 * RTCritSectRwTryEnterSharedDebug.
511 */
512VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
513{
514 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
515#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
516 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
517#else
518 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
519 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
520#endif
521}
522
523
524#ifdef IN_RING3
525/**
526 * Enters a PDM read/write critical section with shared (read) access.
527 *
528 * @returns VINF_SUCCESS if entered successfully.
529 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
530 * during the operation.
531 *
532 * @param pThis Pointer to the read/write critical section.
533 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
534 */
535VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
536{
537 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
538}
539#endif
540
541
542/**
543 * Leave a critical section held with shared access.
544 *
545 * @returns VBox status code.
546 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
547 * during the operation.
548 * @param pThis Pointer to the read/write critical section.
549 * @param fNoVal No validation records (i.e. queued release).
550 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
551 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
552 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
553 */
554static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
555{
556 /*
557 * Validate handle.
558 */
559 AssertPtr(pThis);
560 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
561
562#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
563 NOREF(fNoVal);
564#endif
565
566 /*
567 * Check the direction and take action accordingly.
568 */
569 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
570 uint64_t u64OldState = u64State;
571 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
572 {
573#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
574 if (fNoVal)
575 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
576 else
577 {
578 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
579 if (RT_FAILURE(rc9))
580 return rc9;
581 }
582#endif
583 for (;;)
584 {
585 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
586 AssertReturn(c > 0, VERR_NOT_OWNER);
587 c--;
588
589 if ( c > 0
590 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
591 {
592 /* Don't change the direction. */
593 u64State &= ~RTCSRW_CNT_RD_MASK;
594 u64State |= c << RTCSRW_CNT_RD_SHIFT;
595 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
596 break;
597 }
598 else
599 {
600#if defined(IN_RING3) || defined(IN_RING0)
601# ifdef IN_RING0
602 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
603 && ASMIntAreEnabled())
604# endif
605 {
606 /* Reverse the direction and signal the writer threads. */
607 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
608 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
609 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
610 {
611 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
612 AssertRC(rc);
613 break;
614 }
615 }
616#endif /* IN_RING3 || IN_RING0 */
617#ifndef IN_RING3
618# ifdef IN_RING0
619 else
620# endif
621 {
622 /* Queue the exit request (ring-3). */
623 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
624 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
625 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
626 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
627 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
628 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
629 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
630 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
631 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
632 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
633 break;
634 }
635#endif
636 }
637
638 ASMNopPause();
639 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
640 u64OldState = u64State;
641 }
642 }
643 else
644 {
645 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
646 RTNATIVETHREAD hNativeWriter;
647 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
648 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
649 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
650#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
651 if (!fNoVal)
652 {
653 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
654 if (RT_FAILURE(rc))
655 return rc;
656 }
657#endif
658 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
659 }
660
661 return VINF_SUCCESS;
662}
663
664/**
665 * Leave a critical section held with shared access.
666 *
667 * @returns VBox status code.
668 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
669 * during the operation.
670 * @param pThis Pointer to the read/write critical section.
671 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
672 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
673 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
674 */
675VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
676{
677 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
678}
679
680
681#if defined(IN_RING3) || defined(IN_RING0)
682/**
683 * PDMCritSectBothFF interface.
684 *
685 * @param pThis Pointer to the read/write critical section.
686 */
687void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
688{
689 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
690}
691#endif
692
693
694/**
695 * Worker that enters a read/write critical section with exclusive access.
696 *
697 * @returns VBox status code.
698 * @param pThis Pointer to the read/write critical section.
699 * @param rcBusy The busy return code for ring-0 and ring-3.
700 * @param fTryOnly Only try enter it, don't wait.
701 * @param pSrcPos The source position. (Can be NULL.)
702 * @param fNoVal No validation records.
703 */
704static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
705{
706 /*
707 * Validate input.
708 */
709 AssertPtr(pThis);
710 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
711
712#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
713 NOREF(pSrcPos);
714 NOREF(fNoVal);
715#endif
716
717#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
718 RTTHREAD hThreadSelf = NIL_RTTHREAD;
719 if (!fTryOnly)
720 {
721 hThreadSelf = RTThreadSelfAutoAdopt();
722 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
723 if (RT_FAILURE(rc9))
724 return rc9;
725 }
726#endif
727
728 /*
729 * Check if we're already the owner and just recursing.
730 */
731 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
732 RTNATIVETHREAD hNativeWriter;
733 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
734 if (hNativeSelf == hNativeWriter)
735 {
736 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
737#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
738 if (!fNoVal)
739 {
740 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
741 if (RT_FAILURE(rc9))
742 return rc9;
743 }
744#endif
745 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
746 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
747 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
748 return VINF_SUCCESS;
749 }
750
751 /*
752 * Get cracking.
753 */
754 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
755 uint64_t u64OldState = u64State;
756
757 for (;;)
758 {
759 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
760 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
761 {
762 /* It flows in the right direction, try follow it before it changes. */
763 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
764 c++;
765 Assert(c < RTCSRW_CNT_MASK / 2);
766 u64State &= ~RTCSRW_CNT_WR_MASK;
767 u64State |= c << RTCSRW_CNT_WR_SHIFT;
768 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
769 break;
770 }
771 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
772 {
773 /* Wrong direction, but we're alone here and can simply try switch the direction. */
774 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
775 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
776 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
777 break;
778 }
779 else if (fTryOnly)
780 {
781 /* Wrong direction and we're not supposed to wait, just return. */
782 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
783 return VERR_SEM_BUSY;
784 }
785 else
786 {
787 /* Add ourselves to the write count and break out to do the wait. */
788 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
789 c++;
790 Assert(c < RTCSRW_CNT_MASK / 2);
791 u64State &= ~RTCSRW_CNT_WR_MASK;
792 u64State |= c << RTCSRW_CNT_WR_SHIFT;
793 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
794 break;
795 }
796
797 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
798 return VERR_SEM_DESTROYED;
799
800 ASMNopPause();
801 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
802 u64OldState = u64State;
803 }
804
805 /*
806 * If we're in write mode now try grab the ownership. Play fair if there
807 * are threads already waiting.
808 */
809 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
810#if defined(IN_RING3)
811 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
812 || fTryOnly)
813#endif
814 ;
815 if (fDone)
816 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
817 if (!fDone)
818 {
819 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
820
821#if defined(IN_RING3) || defined(IN_RING0)
822 if ( !fTryOnly
823# ifdef IN_RING0
824 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
825 && ASMIntAreEnabled()
826# endif
827 )
828 {
829
830 /*
831 * Wait for our turn.
832 */
833 for (uint32_t iLoop = 0; ; iLoop++)
834 {
835 int rc;
836# ifdef IN_RING3
837# ifdef PDMCRITSECTRW_STRICT
838 if (hThreadSelf == NIL_RTTHREAD)
839 hThreadSelf = RTThreadSelfAutoAdopt();
840 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
841 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
842 if (RT_SUCCESS(rc))
843# else
844 RTTHREAD hThreadSelf = RTThreadSelf();
845 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
846# endif
847# endif
848 {
849 for (;;)
850 {
851 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
852 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
853 RT_INDEFINITE_WAIT);
854 if ( rc != VERR_INTERRUPTED
855 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
856 break;
857# ifdef IN_RING0
858 pdmR0CritSectRwYieldToRing3(pThis);
859# endif
860 }
861# ifdef IN_RING3
862 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
863# endif
864 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
865 return VERR_SEM_DESTROYED;
866 }
867 if (RT_FAILURE(rc))
868 {
869 /* Decrement the counts and return the error. */
870 for (;;)
871 {
872 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
873 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
874 c--;
875 u64State &= ~RTCSRW_CNT_WR_MASK;
876 u64State |= c << RTCSRW_CNT_WR_SHIFT;
877 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
878 break;
879 }
880 return rc;
881 }
882
883 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
884 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
885 {
886 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
887 if (fDone)
888 break;
889 }
890 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
891 }
892
893 }
894 else
895#endif /* IN_RING3 || IN_RING0 */
896 {
897#ifdef IN_RING3
898 /* TryEnter call - decrement the number of (waiting) writers. */
899#else
900 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
901 ring-3 and do it there or return rcBusy. */
902#endif
903
904 for (;;)
905 {
906 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
907 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
908 c--;
909 u64State &= ~RTCSRW_CNT_WR_MASK;
910 u64State |= c << RTCSRW_CNT_WR_SHIFT;
911 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
912 break;
913 }
914
915#ifdef IN_RING3
916 return VERR_SEM_BUSY;
917#else
918 if (rcBusy == VINF_SUCCESS)
919 {
920 Assert(!fTryOnly);
921 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
922 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
923 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
924 * back to ring-3. Goes for both kind of crit sects. */
925 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
926 }
927 return rcBusy;
928#endif
929 }
930 }
931
932 /*
933 * Got it!
934 */
935 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
936 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
937 Assert(pThis->s.Core.cWriterReads == 0);
938#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
939 if (!fNoVal)
940 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
941#endif
942 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
943 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
944
945 return VINF_SUCCESS;
946}
947
948
949/**
950 * Try enter a critical section with exclusive (write) access.
951 *
952 * @returns VBox status code.
953 * @retval VINF_SUCCESS on success.
954 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
955 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
956 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
957 * during the operation.
958 *
959 * @param pThis Pointer to the read/write critical section.
960 * @param rcBusy The status code to return when we're in RC or R0 and the
961 * section is busy. Pass VINF_SUCCESS to acquired the
962 * critical section thru a ring-3 call if necessary.
963 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
964 * PDMCritSectRwTryEnterExclDebug,
965 * PDMCritSectEnterDebug, PDMCritSectEnter,
966 * RTCritSectRwEnterExcl.
967 */
968VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
969{
970#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
971 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
972#else
973 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
974 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
975#endif
976}
977
978
979/**
980 * Try enter a critical section with exclusive (write) access.
981 *
982 * @returns VBox status code.
983 * @retval VINF_SUCCESS on success.
984 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
985 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
986 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
987 * during the operation.
988 *
989 * @param pThis Pointer to the read/write critical section.
990 * @param rcBusy The status code to return when we're in RC or R0 and the
991 * section is busy. Pass VINF_SUCCESS to acquired the
992 * critical section thru a ring-3 call if necessary.
993 * @param uId Where we're entering the section.
994 * @param SRC_POS The source position.
995 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
996 * PDMCritSectRwTryEnterExclDebug,
997 * PDMCritSectEnterDebug, PDMCritSectEnter,
998 * RTCritSectRwEnterExclDebug.
999 */
1000VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1001{
1002 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1003#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1004 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1005#else
1006 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1007 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1008#endif
1009}
1010
1011
1012/**
1013 * Try enter a critical section with exclusive (write) access.
1014 *
1015 * @retval VINF_SUCCESS on success.
1016 * @retval VERR_SEM_BUSY if the critsect was owned.
1017 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1018 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1019 * during the operation.
1020 *
1021 * @param pThis Pointer to the read/write critical section.
1022 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1023 * PDMCritSectRwEnterExclDebug,
1024 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1025 * RTCritSectRwTryEnterExcl.
1026 */
1027VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
1028{
1029#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1030 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1031#else
1032 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1033 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1034#endif
1035}
1036
1037
1038/**
1039 * Try enter a critical section with exclusive (write) access.
1040 *
1041 * @retval VINF_SUCCESS on success.
1042 * @retval VERR_SEM_BUSY if the critsect was owned.
1043 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1044 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1045 * during the operation.
1046 *
1047 * @param pThis Pointer to the read/write critical section.
1048 * @param uId Where we're entering the section.
1049 * @param SRC_POS The source position.
1050 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1051 * PDMCritSectRwEnterExclDebug,
1052 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1053 * RTCritSectRwTryEnterExclDebug.
1054 */
1055VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1056{
1057 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1058#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1059 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1060#else
1061 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1062 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1063#endif
1064}
1065
1066
1067#ifdef IN_RING3
1068/**
1069 * Enters a PDM read/write critical section with exclusive (write) access.
1070 *
1071 * @returns VINF_SUCCESS if entered successfully.
1072 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1073 * during the operation.
1074 *
1075 * @param pThis Pointer to the read/write critical section.
1076 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1077 */
1078VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1079{
1080 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1081}
1082#endif /* IN_RING3 */
1083
1084
1085/**
1086 * Leave a critical section held exclusively.
1087 *
1088 * @returns VBox status code.
1089 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1090 * during the operation.
1091 * @param pThis Pointer to the read/write critical section.
1092 * @param fNoVal No validation records (i.e. queued release).
1093 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1094 */
1095static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1096{
1097 /*
1098 * Validate handle.
1099 */
1100 AssertPtr(pThis);
1101 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1102
1103#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1104 NOREF(fNoVal);
1105#endif
1106
1107 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1108 RTNATIVETHREAD hNativeWriter;
1109 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1110 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1111
1112 /*
1113 * Unwind one recursion. Is it the final one?
1114 */
1115 if (pThis->s.Core.cWriteRecursions == 1)
1116 {
1117 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1118#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1119 if (fNoVal)
1120 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1121 else
1122 {
1123 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1124 if (RT_FAILURE(rc9))
1125 return rc9;
1126 }
1127#endif
1128 /*
1129 * Update the state.
1130 */
1131#if defined(IN_RING3) || defined(IN_RING0)
1132# ifdef IN_RING0
1133 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1134 && ASMIntAreEnabled())
1135# endif
1136 {
1137 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1138 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1139 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1140
1141 for (;;)
1142 {
1143 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1144 uint64_t u64OldState = u64State;
1145
1146 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1147 Assert(c > 0);
1148 c--;
1149
1150 if ( c > 0
1151 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1152 {
1153 /* Don't change the direction, wake up the next writer if any. */
1154 u64State &= ~RTCSRW_CNT_WR_MASK;
1155 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1156 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1157 {
1158 if (c > 0)
1159 {
1160 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1161 AssertRC(rc);
1162 }
1163 break;
1164 }
1165 }
1166 else
1167 {
1168 /* Reverse the direction and signal the reader threads. */
1169 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1170 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1171 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1172 {
1173 Assert(!pThis->s.Core.fNeedReset);
1174 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1175 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1176 AssertRC(rc);
1177 break;
1178 }
1179 }
1180
1181 ASMNopPause();
1182 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1183 return VERR_SEM_DESTROYED;
1184 }
1185 }
1186#endif /* IN_RING3 || IN_RING0 */
1187#ifndef IN_RING3
1188# ifdef IN_RING0
1189 else
1190# endif
1191 {
1192 /*
1193 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1194 * so queue the exit request (ring-3).
1195 */
1196 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1197 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1198 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1199 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1200 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1201 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1202 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1203 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1204 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1205 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1206 }
1207#endif
1208 }
1209 else
1210 {
1211 /*
1212 * Not the final recursion.
1213 */
1214 Assert(pThis->s.Core.cWriteRecursions != 0);
1215#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1216 if (fNoVal)
1217 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1218 else
1219 {
1220 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1221 if (RT_FAILURE(rc9))
1222 return rc9;
1223 }
1224#endif
1225 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1226 }
1227
1228 return VINF_SUCCESS;
1229}
1230
1231
1232/**
1233 * Leave a critical section held exclusively.
1234 *
1235 * @returns VBox status code.
1236 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1237 * during the operation.
1238 * @param pThis Pointer to the read/write critical section.
1239 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1240 */
1241VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1242{
1243 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1244}
1245
1246
1247#if defined(IN_RING3) || defined(IN_RING0)
1248/**
1249 * PDMCritSectBothFF interface.
1250 *
1251 * @param pThis Pointer to the read/write critical section.
1252 */
1253void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1254{
1255 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1256}
1257#endif
1258
1259
1260/**
1261 * Checks the caller is the exclusive (write) owner of the critical section.
1262 *
1263 * @retval true if owner.
1264 * @retval false if not owner.
1265 * @param pThis Pointer to the read/write critical section.
1266 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1267 * RTCritSectRwIsWriteOwner.
1268 */
1269VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1270{
1271 /*
1272 * Validate handle.
1273 */
1274 AssertPtr(pThis);
1275 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1276
1277 /*
1278 * Check ownership.
1279 */
1280 RTNATIVETHREAD hNativeWriter;
1281 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1282 if (hNativeWriter == NIL_RTNATIVETHREAD)
1283 return false;
1284 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1285}
1286
1287
1288/**
1289 * Checks if the caller is one of the read owners of the critical section.
1290 *
1291 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1292 * enabled. Meaning, the answer is not trustworhty unless
1293 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1294 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1295 * creating the semaphore. And finally, if you used a locking class,
1296 * don't disable deadlock detection by setting cMsMinDeadlock to
1297 * RT_INDEFINITE_WAIT.
1298 *
1299 * In short, only use this for assertions.
1300 *
1301 * @returns @c true if reader, @c false if not.
1302 * @param pThis Pointer to the read/write critical section.
1303 * @param fWannaHear What you'd like to hear when lock validation is not
1304 * available. (For avoiding asserting all over the place.)
1305 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1306 */
1307VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1308{
1309 /*
1310 * Validate handle.
1311 */
1312 AssertPtr(pThis);
1313 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1314
1315 /*
1316 * Inspect the state.
1317 */
1318 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1319 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1320 {
1321 /*
1322 * It's in write mode, so we can only be a reader if we're also the
1323 * current writer.
1324 */
1325 RTNATIVETHREAD hWriter;
1326 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1327 if (hWriter == NIL_RTNATIVETHREAD)
1328 return false;
1329 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1330 }
1331
1332 /*
1333 * Read mode. If there are no current readers, then we cannot be a reader.
1334 */
1335 if (!(u64State & RTCSRW_CNT_RD_MASK))
1336 return false;
1337
1338#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1339 /*
1340 * Ask the lock validator.
1341 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1342 */
1343 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1344#else
1345 /*
1346 * Ok, we don't know, just tell the caller what he want to hear.
1347 */
1348 return fWannaHear;
1349#endif
1350}
1351
1352
1353/**
1354 * Gets the write recursion count.
1355 *
1356 * @returns The write recursion count (0 if bad critsect).
1357 * @param pThis Pointer to the read/write critical section.
1358 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1359 * RTCritSectRwGetWriteRecursion.
1360 */
1361VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1362{
1363 /*
1364 * Validate handle.
1365 */
1366 AssertPtr(pThis);
1367 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1368
1369 /*
1370 * Return the requested data.
1371 */
1372 return pThis->s.Core.cWriteRecursions;
1373}
1374
1375
1376/**
1377 * Gets the read recursion count of the current writer.
1378 *
1379 * @returns The read recursion count (0 if bad critsect).
1380 * @param pThis Pointer to the read/write critical section.
1381 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1382 * RTCritSectRwGetWriterReadRecursion.
1383 */
1384VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1385{
1386 /*
1387 * Validate handle.
1388 */
1389 AssertPtr(pThis);
1390 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1391
1392 /*
1393 * Return the requested data.
1394 */
1395 return pThis->s.Core.cWriterReads;
1396}
1397
1398
1399/**
1400 * Gets the current number of reads.
1401 *
1402 * This includes all read recursions, so it might be higher than the number of
1403 * read owners. It does not include reads done by the current writer.
1404 *
1405 * @returns The read count (0 if bad critsect).
1406 * @param pThis Pointer to the read/write critical section.
1407 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1408 * RTCritSectRwGetReadCount.
1409 */
1410VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1411{
1412 /*
1413 * Validate input.
1414 */
1415 AssertPtr(pThis);
1416 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1417
1418 /*
1419 * Return the requested data.
1420 */
1421 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1422 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1423 return 0;
1424 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1425}
1426
1427
1428/**
1429 * Checks if the read/write critical section is initialized or not.
1430 *
1431 * @retval true if initialized.
1432 * @retval false if not initialized.
1433 * @param pThis Pointer to the read/write critical section.
1434 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1435 */
1436VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1437{
1438 AssertPtr(pThis);
1439 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1440}
1441
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette