VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 47705

Last change on this file since 47705 was 45310, checked in by vboxsync, 12 years ago

PDMCritSectRw: Fixed shared leave bugs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.5 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 45310 2013-04-03 14:54:09Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125/**
126 * Worker that enters a read/write critical section with shard access.
127 *
128 * @returns VBox status code.
129 * @param pThis Pointer to the read/write critical section.
130 * @param rcBusy The busy return code for ring-0 and ring-3.
131 * @param fTryOnly Only try enter it, don't wait.
132 * @param pSrcPos The source position. (Can be NULL.)
133 * @param fNoVal No validation records.
134 */
135static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
136{
137 /*
138 * Validate input.
139 */
140 AssertPtr(pThis);
141 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
142
143#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
144 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
145 if (!fTryOnly)
146 {
147 int rc9;
148 RTNATIVETHREAD hNativeWriter;
149 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
150 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
151 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
152 else
153 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
154 if (RT_FAILURE(rc9))
155 return rc9;
156 }
157#endif
158
159 /*
160 * Get cracking...
161 */
162 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
163 uint64_t u64OldState = u64State;
164
165 for (;;)
166 {
167 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
168 {
169 /* It flows in the right direction, try follow it before it changes. */
170 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
171 c++;
172 Assert(c < RTCSRW_CNT_MASK / 2);
173 u64State &= ~RTCSRW_CNT_RD_MASK;
174 u64State |= c << RTCSRW_CNT_RD_SHIFT;
175 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
176 {
177#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
178 if (!fNoVal)
179 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
180#endif
181 break;
182 }
183 }
184 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
185 {
186 /* Wrong direction, but we're alone here and can simply try switch the direction. */
187 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
188 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
189 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
190 {
191 Assert(!pThis->s.Core.fNeedReset);
192#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
193 if (!fNoVal)
194 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
195#endif
196 break;
197 }
198 }
199 else
200 {
201 /* Is the writer perhaps doing a read recursion? */
202 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
203 RTNATIVETHREAD hNativeWriter;
204 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
205 if (hNativeSelf == hNativeWriter)
206 {
207#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
208 if (!fNoVal)
209 {
210 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
211 if (RT_FAILURE(rc9))
212 return rc9;
213 }
214#endif
215 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
216 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
217 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
218 return VINF_SUCCESS; /* don't break! */
219 }
220
221 /*
222 * If we're only trying, return already.
223 */
224 if (fTryOnly)
225 {
226 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
227 return VERR_SEM_BUSY;
228 }
229
230#if defined(IN_RING3)
231 /*
232 * Add ourselves to the queue and wait for the direction to change.
233 */
234 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
235 c++;
236 Assert(c < RTCSRW_CNT_MASK / 2);
237
238 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
239 cWait++;
240 Assert(cWait <= c);
241 Assert(cWait < RTCSRW_CNT_MASK / 2);
242
243 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
244 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
245
246 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
247 {
248 for (uint32_t iLoop = 0; ; iLoop++)
249 {
250 int rc;
251# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
252 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
253 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
254 if (RT_SUCCESS(rc))
255# else
256 RTTHREAD hThreadSelf = RTThreadSelf();
257 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
258# endif
259 {
260 do
261 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
262 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
263 RT_INDEFINITE_WAIT);
264 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
265 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
266 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
267 return VERR_SEM_DESTROYED;
268 }
269 if (RT_FAILURE(rc))
270 {
271 /* Decrement the counts and return the error. */
272 for (;;)
273 {
274 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
275 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
276 c--;
277 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
278 cWait--;
279 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
280 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
281 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
282 break;
283 }
284 return rc;
285 }
286
287 Assert(pThis->s.Core.fNeedReset);
288 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
289 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
290 break;
291 AssertMsg(iLoop < 1, ("%u\n", iLoop));
292 }
293
294 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
295 for (;;)
296 {
297 u64OldState = u64State;
298
299 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
300 Assert(cWait > 0);
301 cWait--;
302 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
303 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
304
305 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
306 {
307 if (cWait == 0)
308 {
309 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
310 {
311 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
312 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
313 AssertRCReturn(rc, rc);
314 }
315 }
316 break;
317 }
318 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
319 }
320
321# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
322 if (!fNoVal)
323 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
324# endif
325 break;
326 }
327
328#else
329 /*
330 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
331 * back to ring-3 and do it there or return rcBusy.
332 */
333 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
334 if (rcBusy == VINF_SUCCESS)
335 {
336 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
337 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
338 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
339 * back to ring-3. Goes for both kind of crit sects. */
340 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
341 }
342 return rcBusy;
343#endif
344 }
345
346 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
347 return VERR_SEM_DESTROYED;
348
349 ASMNopPause();
350 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
351 u64OldState = u64State;
352 }
353
354 /* got it! */
355 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
356 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
357 return VINF_SUCCESS;
358
359}
360
361
362/**
363 * Enter a critical section with shared (read) access.
364 *
365 * @returns VBox status code.
366 * @retval VINF_SUCCESS on success.
367 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
368 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
369 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
370 * during the operation.
371 *
372 * @param pThis Pointer to the read/write critical section.
373 * @param rcBusy The status code to return when we're in RC or R0 and the
374 * section is busy. Pass VINF_SUCCESS to acquired the
375 * critical section thru a ring-3 call if necessary.
376 * @param uId Where we're entering the section.
377 * @param pszFile The source position - file.
378 * @param iLine The source position - line.
379 * @param pszFunction The source position - function.
380 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
381 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
382 * RTCritSectRwEnterShared.
383 */
384VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
385{
386#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
387 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
388#else
389 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
390 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
391#endif
392}
393
394
395/**
396 * Enter a critical section with shared (read) access.
397 *
398 * @returns VBox status code.
399 * @retval VINF_SUCCESS on success.
400 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
401 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
402 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
403 * during the operation.
404 *
405 * @param pThis Pointer to the read/write critical section.
406 * @param rcBusy The status code to return when we're in RC or R0 and the
407 * section is busy. Pass VINF_SUCCESS to acquired the
408 * critical section thru a ring-3 call if necessary.
409 * @param uId Where we're entering the section.
410 * @param pszFile The source position - file.
411 * @param iLine The source position - line.
412 * @param pszFunction The source position - function.
413 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
414 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
415 * RTCritSectRwEnterSharedDebug.
416 */
417VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
418{
419#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
420 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
421#else
422 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
423 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
424#endif
425}
426
427
428/**
429 * Try enter a critical section with shared (read) access.
430 *
431 * @returns VBox status code.
432 * @retval VINF_SUCCESS on success.
433 * @retval VERR_SEM_BUSY if the critsect was owned.
434 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
435 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
436 * during the operation.
437 *
438 * @param pThis Pointer to the read/write critical section.
439 * @param uId Where we're entering the section.
440 * @param pszFile The source position - file.
441 * @param iLine The source position - line.
442 * @param pszFunction The source position - function.
443 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
444 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
445 * RTCritSectRwTryEnterShared.
446 */
447VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
448{
449#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
450 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
451#else
452 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
453 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
454#endif
455}
456
457
458/**
459 * Try enter a critical section with shared (read) access.
460 *
461 * @returns VBox status code.
462 * @retval VINF_SUCCESS on success.
463 * @retval VERR_SEM_BUSY if the critsect was owned.
464 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
465 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
466 * during the operation.
467 *
468 * @param pThis Pointer to the read/write critical section.
469 * @param uId Where we're entering the section.
470 * @param pszFile The source position - file.
471 * @param iLine The source position - line.
472 * @param pszFunction The source position - function.
473 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
474 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
475 * RTCritSectRwTryEnterSharedDebug.
476 */
477VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
478{
479#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
480 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
481#else
482 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
483 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
484#endif
485}
486
487
488#ifdef IN_RING3
489/**
490 * Enters a PDM read/write critical section with shared (read) access.
491 *
492 * @returns VINF_SUCCESS if entered successfully.
493 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
494 * during the operation.
495 *
496 * @param pThis Pointer to the read/write critical section.
497 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
498 */
499VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
500{
501 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
502}
503#endif
504
505
506/**
507 * Leave a critical section held with shared access.
508 *
509 * @returns VBox status code.
510 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
511 * during the operation.
512 * @param pThis Pointer to the read/write critical section.
513 * @param fNoVal No validation records (i.e. queued release).
514 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
515 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
516 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
517 */
518static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
519{
520 /*
521 * Validate handle.
522 */
523 AssertPtr(pThis);
524 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
525
526 /*
527 * Check the direction and take action accordingly.
528 */
529 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
530 uint64_t u64OldState = u64State;
531 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
532 {
533#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
534 if (fNoVal)
535 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
536 else
537 {
538 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
539 if (RT_FAILURE(rc9))
540 return rc9;
541 }
542#endif
543 for (;;)
544 {
545 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
546 AssertReturn(c > 0, VERR_NOT_OWNER);
547 c--;
548
549 if ( c > 0
550 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
551 {
552 /* Don't change the direction. */
553 u64State &= ~RTCSRW_CNT_RD_MASK;
554 u64State |= c << RTCSRW_CNT_RD_SHIFT;
555 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
556 break;
557 }
558 else
559 {
560#if defined(IN_RING3)
561 /* Reverse the direction and signal the writer threads. */
562 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
563 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
564 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
565 {
566 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
567 AssertRC(rc);
568 break;
569 }
570#else
571 /* Queue the exit request (ring-3). */
572 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
573 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
574 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
575 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
576 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
577 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
578 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
579 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
580 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
581 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
582 break;
583#endif
584 }
585
586 ASMNopPause();
587 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
588 u64OldState = u64State;
589 }
590 }
591 else
592 {
593 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
594 RTNATIVETHREAD hNativeWriter;
595 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
596 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
597 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
598#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
599 if (!fNoVal)
600 {
601 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
602 if (RT_FAILURE(rc))
603 return rc;
604 }
605#endif
606 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
607 }
608
609 return VINF_SUCCESS;
610}
611
612/**
613 * Leave a critical section held with shared access.
614 *
615 * @returns VBox status code.
616 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
617 * during the operation.
618 * @param pThis Pointer to the read/write critical section.
619 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
620 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
621 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
622 */
623VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
624{
625 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
626}
627
628
629#if defined(IN_RING3) || defined(IN_RING0)
630/**
631 * PDMCritSectBothFF interface.
632 *
633 * @param pThis Pointer to the read/write critical section.
634 */
635void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
636{
637 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
638}
639#endif
640
641
642/**
643 * Worker that enters a read/write critical section with exclusive access.
644 *
645 * @returns VBox status code.
646 * @param pThis Pointer to the read/write critical section.
647 * @param rcBusy The busy return code for ring-0 and ring-3.
648 * @param fTryOnly Only try enter it, don't wait.
649 * @param pSrcPos The source position. (Can be NULL.)
650 * @param fNoVal No validation records.
651 */
652static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
653{
654 /*
655 * Validate input.
656 */
657 AssertPtr(pThis);
658 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
659
660#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
661 RTTHREAD hThreadSelf = NIL_RTTHREAD;
662 if (!fTryOnly)
663 {
664 hThreadSelf = RTThreadSelfAutoAdopt();
665 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
666 if (RT_FAILURE(rc9))
667 return rc9;
668 }
669#endif
670
671 /*
672 * Check if we're already the owner and just recursing.
673 */
674 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
675 RTNATIVETHREAD hNativeWriter;
676 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
677 if (hNativeSelf == hNativeWriter)
678 {
679 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
680#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
681 if (!fNoVal)
682 {
683 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
684 if (RT_FAILURE(rc9))
685 return rc9;
686 }
687#endif
688 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
689 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
690 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
691 return VINF_SUCCESS;
692 }
693
694 /*
695 * Get cracking.
696 */
697 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
698 uint64_t u64OldState = u64State;
699
700 for (;;)
701 {
702 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
703 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
704 {
705 /* It flows in the right direction, try follow it before it changes. */
706 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
707 c++;
708 Assert(c < RTCSRW_CNT_MASK / 2);
709 u64State &= ~RTCSRW_CNT_WR_MASK;
710 u64State |= c << RTCSRW_CNT_WR_SHIFT;
711 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
712 break;
713 }
714 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
715 {
716 /* Wrong direction, but we're alone here and can simply try switch the direction. */
717 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
718 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
719 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
720 break;
721 }
722 else if (fTryOnly)
723 {
724 /* Wrong direction and we're not supposed to wait, just return. */
725 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
726 return VERR_SEM_BUSY;
727 }
728 else
729 {
730 /* Add ourselves to the write count and break out to do the wait. */
731 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
732 c++;
733 Assert(c < RTCSRW_CNT_MASK / 2);
734 u64State &= ~RTCSRW_CNT_WR_MASK;
735 u64State |= c << RTCSRW_CNT_WR_SHIFT;
736 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
737 break;
738 }
739
740 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
741 return VERR_SEM_DESTROYED;
742
743 ASMNopPause();
744 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
745 u64OldState = u64State;
746 }
747
748 /*
749 * If we're in write mode now try grab the ownership. Play fair if there
750 * are threads already waiting.
751 */
752 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
753#if defined(IN_RING3)
754 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
755 || fTryOnly)
756#endif
757 ;
758 if (fDone)
759 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
760 if (!fDone)
761 {
762 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
763
764#if defined(IN_RING3)
765 if (!fTryOnly)
766 {
767 /*
768 * Wait for our turn.
769 */
770 for (uint32_t iLoop = 0; ; iLoop++)
771 {
772 int rc;
773# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
774 if (hThreadSelf == NIL_RTTHREAD)
775 hThreadSelf = RTThreadSelfAutoAdopt();
776 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
777 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
778 if (RT_SUCCESS(rc))
779# else
780 RTTHREAD hThreadSelf = RTThreadSelf();
781 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
782# endif
783 {
784 do
785 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
786 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
787 RT_INDEFINITE_WAIT);
788 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
789 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
790 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
791 return VERR_SEM_DESTROYED;
792 }
793 if (RT_FAILURE(rc))
794 {
795 /* Decrement the counts and return the error. */
796 for (;;)
797 {
798 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
799 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
800 c--;
801 u64State &= ~RTCSRW_CNT_WR_MASK;
802 u64State |= c << RTCSRW_CNT_WR_SHIFT;
803 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
804 break;
805 }
806 return rc;
807 }
808
809 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
810 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
811 {
812 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
813 if (fDone)
814 break;
815 }
816 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
817 }
818
819 }
820 else
821#endif /* IN_RING3 */
822 {
823#ifdef IN_RING3
824 /* TryEnter call - decrement the number of (waiting) writers. */
825#else
826 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
827 ring-3 and do it there or return rcBusy. */
828#endif
829
830 for (;;)
831 {
832 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
833 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
834 c--;
835 u64State &= ~RTCSRW_CNT_WR_MASK;
836 u64State |= c << RTCSRW_CNT_WR_SHIFT;
837 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
838 break;
839 }
840
841#ifdef IN_RING3
842 return VERR_SEM_BUSY;
843#else
844 if (rcBusy == VINF_SUCCESS)
845 {
846 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
847 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
848 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
849 * back to ring-3. Goes for both kind of crit sects. */
850 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
851 }
852 return rcBusy;
853#endif
854 }
855 }
856
857 /*
858 * Got it!
859 */
860 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
861 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
862 Assert(pThis->s.Core.cWriterReads == 0);
863#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
864 if (!fNoVal)
865 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
866#endif
867 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
868 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
869
870 return VINF_SUCCESS;
871}
872
873
874/**
875 * Try enter a critical section with exclusive (write) access.
876 *
877 * @returns VBox status code.
878 * @retval VINF_SUCCESS on success.
879 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
880 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
881 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
882 * during the operation.
883 *
884 * @param pThis Pointer to the read/write critical section.
885 * @param rcBusy The status code to return when we're in RC or R0 and the
886 * section is busy. Pass VINF_SUCCESS to acquired the
887 * critical section thru a ring-3 call if necessary.
888 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
889 * PDMCritSectRwTryEnterExclDebug,
890 * PDMCritSectEnterDebug, PDMCritSectEnter,
891 * RTCritSectRwEnterExcl.
892 */
893VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
894{
895#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
896 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
897#else
898 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
899 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
900#endif
901}
902
903
904/**
905 * Try enter a critical section with exclusive (write) access.
906 *
907 * @returns VBox status code.
908 * @retval VINF_SUCCESS on success.
909 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
910 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
911 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
912 * during the operation.
913 *
914 * @param pThis Pointer to the read/write critical section.
915 * @param rcBusy The status code to return when we're in RC or R0 and the
916 * section is busy. Pass VINF_SUCCESS to acquired the
917 * critical section thru a ring-3 call if necessary.
918 * @param uId Where we're entering the section.
919 * @param pszFile The source position - file.
920 * @param iLine The source position - line.
921 * @param pszFunction The source position - function.
922 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
923 * PDMCritSectRwTryEnterExclDebug,
924 * PDMCritSectEnterDebug, PDMCritSectEnter,
925 * RTCritSectRwEnterExclDebug.
926 */
927VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
928{
929#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
930 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
931#else
932 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
933 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
934#endif
935}
936
937
938/**
939 * Try enter a critical section with exclusive (write) access.
940 *
941 * @retval VINF_SUCCESS on success.
942 * @retval VERR_SEM_BUSY if the critsect was owned.
943 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
944 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
945 * during the operation.
946 *
947 * @param pThis Pointer to the read/write critical section.
948 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
949 * PDMCritSectRwEnterExclDebug,
950 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
951 * RTCritSectRwTryEnterExcl.
952 */
953VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
954{
955#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
956 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
957#else
958 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
959 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
960#endif
961}
962
963
964/**
965 * Try enter a critical section with exclusive (write) access.
966 *
967 * @retval VINF_SUCCESS on success.
968 * @retval VERR_SEM_BUSY if the critsect was owned.
969 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
970 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
971 * during the operation.
972 *
973 * @param pThis Pointer to the read/write critical section.
974 * @param uId Where we're entering the section.
975 * @param pszFile The source position - file.
976 * @param iLine The source position - line.
977 * @param pszFunction The source position - function.
978 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
979 * PDMCritSectRwEnterExclDebug,
980 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
981 * RTCritSectRwTryEnterExclDebug.
982 */
983VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
984{
985#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
986 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
987#else
988 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
989 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
990#endif
991}
992
993
994#ifdef IN_RING3
995/**
996 * Enters a PDM read/write critical section with exclusive (write) access.
997 *
998 * @returns VINF_SUCCESS if entered successfully.
999 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1000 * during the operation.
1001 *
1002 * @param pThis Pointer to the read/write critical section.
1003 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1004 */
1005VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1006{
1007 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1008}
1009#endif /* IN_RING3 */
1010
1011
1012/**
1013 * Leave a critical section held exclusively.
1014 *
1015 * @returns VBox status code.
1016 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1017 * during the operation.
1018 * @param pThis Pointer to the read/write critical section.
1019 * @param fNoVal No validation records (i.e. queued release).
1020 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1021 */
1022static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1023{
1024 /*
1025 * Validate handle.
1026 */
1027 AssertPtr(pThis);
1028 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1029
1030 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1031 RTNATIVETHREAD hNativeWriter;
1032 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1033 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1034
1035 /*
1036 * Unwind one recursion. Is it the final one?
1037 */
1038 if (pThis->s.Core.cWriteRecursions == 1)
1039 {
1040 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1041#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1042 if (fNoVal)
1043 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1044 else
1045 {
1046 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1047 if (RT_FAILURE(rc9))
1048 return rc9;
1049 }
1050#endif
1051 /*
1052 * Update the state.
1053 */
1054#if defined(IN_RING3)
1055 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1056 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1057 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1058
1059 for (;;)
1060 {
1061 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1062 uint64_t u64OldState = u64State;
1063
1064 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1065 Assert(c > 0);
1066 c--;
1067
1068 if ( c > 0
1069 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1070 {
1071 /* Don't change the direction, wake up the next writer if any. */
1072 u64State &= ~RTCSRW_CNT_WR_MASK;
1073 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1074 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1075 {
1076 if (c > 0)
1077 {
1078 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1079 AssertRC(rc);
1080 }
1081 break;
1082 }
1083 }
1084 else
1085 {
1086 /* Reverse the direction and signal the reader threads. */
1087 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1088 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1089 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1090 {
1091 Assert(!pThis->s.Core.fNeedReset);
1092 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1093 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1094 AssertRC(rc);
1095 break;
1096 }
1097 }
1098
1099 ASMNopPause();
1100 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1101 return VERR_SEM_DESTROYED;
1102 }
1103#else
1104 /*
1105 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1106 * so queue the exit request (ring-3).
1107 */
1108 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1109 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1110 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1111 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1112 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1113 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1114 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1115 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1116 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1117 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1118#endif
1119 }
1120 else
1121 {
1122 /*
1123 * Not the final recursion.
1124 */
1125 Assert(pThis->s.Core.cWriteRecursions != 0);
1126#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1127 if (fNoVal)
1128 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1129 else
1130 {
1131 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1132 if (RT_FAILURE(rc9))
1133 return rc9;
1134 }
1135#endif
1136 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1137 }
1138
1139 return VINF_SUCCESS;
1140}
1141
1142
1143/**
1144 * Leave a critical section held exclusively.
1145 *
1146 * @returns VBox status code.
1147 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1148 * during the operation.
1149 * @param pThis Pointer to the read/write critical section.
1150 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1151 */
1152VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1153{
1154 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1155}
1156
1157
1158#if defined(IN_RING3) || defined(IN_RING0)
1159/**
1160 * PDMCritSectBothFF interface.
1161 *
1162 * @param pThis Pointer to the read/write critical section.
1163 */
1164void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1165{
1166 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1167}
1168#endif
1169
1170
1171/**
1172 * Checks the caller is the exclusive (write) owner of the critical section.
1173 *
1174 * @retval @c true if owner.
1175 * @retval @c false if not owner.
1176 * @param pThis Pointer to the read/write critical section.
1177 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1178 * RTCritSectRwIsWriteOwner.
1179 */
1180VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1181{
1182 /*
1183 * Validate handle.
1184 */
1185 AssertPtr(pThis);
1186 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1187
1188 /*
1189 * Check ownership.
1190 */
1191 RTNATIVETHREAD hNativeWriter;
1192 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1193 if (hNativeWriter == NIL_RTNATIVETHREAD)
1194 return false;
1195 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1196}
1197
1198
1199/**
1200 * Checks if the caller is one of the read owners of the critical section.
1201 *
1202 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1203 * enabled. Meaning, the answer is not trustworhty unless
1204 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1205 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1206 * creating the semaphore. And finally, if you used a locking class,
1207 * don't disable deadlock detection by setting cMsMinDeadlock to
1208 * RT_INDEFINITE_WAIT.
1209 *
1210 * In short, only use this for assertions.
1211 *
1212 * @returns @c true if reader, @c false if not.
1213 * @param pThis Pointer to the read/write critical section.
1214 * @param fWannaHear What you'd like to hear when lock validation is not
1215 * available. (For avoiding asserting all over the place.)
1216 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1217 */
1218VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1219{
1220 /*
1221 * Validate handle.
1222 */
1223 AssertPtr(pThis);
1224 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1225
1226 /*
1227 * Inspect the state.
1228 */
1229 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1230 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1231 {
1232 /*
1233 * It's in write mode, so we can only be a reader if we're also the
1234 * current writer.
1235 */
1236 RTNATIVETHREAD hWriter;
1237 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1238 if (hWriter == NIL_RTNATIVETHREAD)
1239 return false;
1240 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1241 }
1242
1243 /*
1244 * Read mode. If there are no current readers, then we cannot be a reader.
1245 */
1246 if (!(u64State & RTCSRW_CNT_RD_MASK))
1247 return false;
1248
1249#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1250 /*
1251 * Ask the lock validator.
1252 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1253 */
1254 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1255#else
1256 /*
1257 * Ok, we don't know, just tell the caller what he want to hear.
1258 */
1259 return fWannaHear;
1260#endif
1261}
1262
1263
1264/**
1265 * Gets the write recursion count.
1266 *
1267 * @returns The write recursion count (0 if bad critsect).
1268 * @param pThis Pointer to the read/write critical section.
1269 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1270 * RTCritSectRwGetWriteRecursion.
1271 */
1272VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1273{
1274 /*
1275 * Validate handle.
1276 */
1277 AssertPtr(pThis);
1278 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1279
1280 /*
1281 * Return the requested data.
1282 */
1283 return pThis->s.Core.cWriteRecursions;
1284}
1285
1286
1287/**
1288 * Gets the read recursion count of the current writer.
1289 *
1290 * @returns The read recursion count (0 if bad critsect).
1291 * @param pThis Pointer to the read/write critical section.
1292 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1293 * RTCritSectRwGetWriterReadRecursion.
1294 */
1295VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1296{
1297 /*
1298 * Validate handle.
1299 */
1300 AssertPtr(pThis);
1301 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1302
1303 /*
1304 * Return the requested data.
1305 */
1306 return pThis->s.Core.cWriterReads;
1307}
1308
1309
1310/**
1311 * Gets the current number of reads.
1312 *
1313 * This includes all read recursions, so it might be higher than the number of
1314 * read owners. It does not include reads done by the current writer.
1315 *
1316 * @returns The read count (0 if bad critsect).
1317 * @param pThis Pointer to the read/write critical section.
1318 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1319 * RTCritSectRwGetReadCount.
1320 */
1321VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1322{
1323 /*
1324 * Validate input.
1325 */
1326 AssertPtr(pThis);
1327 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1328
1329 /*
1330 * Return the requested data.
1331 */
1332 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1333 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1334 return 0;
1335 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1336}
1337
1338
1339/**
1340 * Checks if the read/write critical section is initialized or not.
1341 *
1342 * @retval @c true if initialized.
1343 * @retval @c false if not initialized.
1344 * @param pThis Pointer to the read/write critical section.
1345 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1346 */
1347VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1348{
1349 AssertPtr(pThis);
1350 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1351}
1352
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette