VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 49482

Last change on this file since 49482 was 48936, checked in by vboxsync, 11 years ago

VMM: Whitespace cleanups by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.3 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 48936 2013-10-07 21:21:42Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125/**
126 * Worker that enters a read/write critical section with shard access.
127 *
128 * @returns VBox status code.
129 * @param pThis Pointer to the read/write critical section.
130 * @param rcBusy The busy return code for ring-0 and ring-3.
131 * @param fTryOnly Only try enter it, don't wait.
132 * @param pSrcPos The source position. (Can be NULL.)
133 * @param fNoVal No validation records.
134 */
135static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
136{
137 /*
138 * Validate input.
139 */
140 AssertPtr(pThis);
141 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
142
143#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
144 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
145 if (!fTryOnly)
146 {
147 int rc9;
148 RTNATIVETHREAD hNativeWriter;
149 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
150 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
151 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
152 else
153 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
154 if (RT_FAILURE(rc9))
155 return rc9;
156 }
157#endif
158
159 /*
160 * Get cracking...
161 */
162 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
163 uint64_t u64OldState = u64State;
164
165 for (;;)
166 {
167 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
168 {
169 /* It flows in the right direction, try follow it before it changes. */
170 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
171 c++;
172 Assert(c < RTCSRW_CNT_MASK / 2);
173 u64State &= ~RTCSRW_CNT_RD_MASK;
174 u64State |= c << RTCSRW_CNT_RD_SHIFT;
175 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
176 {
177#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
178 if (!fNoVal)
179 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
180#endif
181 break;
182 }
183 }
184 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
185 {
186 /* Wrong direction, but we're alone here and can simply try switch the direction. */
187 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
188 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
189 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
190 {
191 Assert(!pThis->s.Core.fNeedReset);
192#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
193 if (!fNoVal)
194 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
195#endif
196 break;
197 }
198 }
199 else
200 {
201 /* Is the writer perhaps doing a read recursion? */
202 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
203 RTNATIVETHREAD hNativeWriter;
204 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
205 if (hNativeSelf == hNativeWriter)
206 {
207#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
208 if (!fNoVal)
209 {
210 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
211 if (RT_FAILURE(rc9))
212 return rc9;
213 }
214#endif
215 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
216 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
217 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
218 return VINF_SUCCESS; /* don't break! */
219 }
220
221 /*
222 * If we're only trying, return already.
223 */
224 if (fTryOnly)
225 {
226 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
227 return VERR_SEM_BUSY;
228 }
229
230#if defined(IN_RING3) || defined(IN_RING0)
231# ifdef IN_RING0
232 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
233 && ASMIntAreEnabled())
234# endif
235 {
236 /*
237 * Add ourselves to the queue and wait for the direction to change.
238 */
239 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
240 c++;
241 Assert(c < RTCSRW_CNT_MASK / 2);
242
243 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
244 cWait++;
245 Assert(cWait <= c);
246 Assert(cWait < RTCSRW_CNT_MASK / 2);
247
248 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
249 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
250
251 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
252 {
253 for (uint32_t iLoop = 0; ; iLoop++)
254 {
255 int rc;
256# ifdef IN_RING3
257# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
258 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
259 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
260 if (RT_SUCCESS(rc))
261# else
262 RTTHREAD hThreadSelf = RTThreadSelf();
263 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
264# endif
265# endif
266 {
267 do
268 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
269 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
270 RT_INDEFINITE_WAIT);
271 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
272# ifdef IN_RING3
273 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
274# endif
275 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
276 return VERR_SEM_DESTROYED;
277 }
278 if (RT_FAILURE(rc))
279 {
280 /* Decrement the counts and return the error. */
281 for (;;)
282 {
283 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
284 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
285 c--;
286 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
287 cWait--;
288 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
289 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
290 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
291 break;
292 }
293 return rc;
294 }
295
296 Assert(pThis->s.Core.fNeedReset);
297 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
298 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
299 break;
300 AssertMsg(iLoop < 1, ("%u\n", iLoop));
301 }
302
303 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
304 for (;;)
305 {
306 u64OldState = u64State;
307
308 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
309 Assert(cWait > 0);
310 cWait--;
311 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
312 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
313
314 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
315 {
316 if (cWait == 0)
317 {
318 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
319 {
320 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
321 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
322 AssertRCReturn(rc, rc);
323 }
324 }
325 break;
326 }
327 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
328 }
329
330# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
331 if (!fNoVal)
332 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
333# endif
334 break;
335 }
336 }
337#endif /* IN_RING3 || IN_RING3 */
338#ifndef IN_RING3
339# ifdef IN_RING0
340 else
341# endif
342 {
343 /*
344 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
345 * back to ring-3 and do it there or return rcBusy.
346 */
347 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
348 if (rcBusy == VINF_SUCCESS)
349 {
350 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
351 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
352 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
353 * back to ring-3. Goes for both kind of crit sects. */
354 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
355 }
356 return rcBusy;
357 }
358#endif /* !IN_RING3 */
359 }
360
361 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
362 return VERR_SEM_DESTROYED;
363
364 ASMNopPause();
365 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
366 u64OldState = u64State;
367 }
368
369 /* got it! */
370 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
371 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
372 return VINF_SUCCESS;
373
374}
375
376
377/**
378 * Enter a critical section with shared (read) access.
379 *
380 * @returns VBox status code.
381 * @retval VINF_SUCCESS on success.
382 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
383 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
384 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
385 * during the operation.
386 *
387 * @param pThis Pointer to the read/write critical section.
388 * @param rcBusy The status code to return when we're in RC or R0 and the
389 * section is busy. Pass VINF_SUCCESS to acquired the
390 * critical section thru a ring-3 call if necessary.
391 * @param uId Where we're entering the section.
392 * @param pszFile The source position - file.
393 * @param iLine The source position - line.
394 * @param pszFunction The source position - function.
395 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
396 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
397 * RTCritSectRwEnterShared.
398 */
399VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
400{
401#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
402 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
403#else
404 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
405 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
406#endif
407}
408
409
410/**
411 * Enter a critical section with shared (read) access.
412 *
413 * @returns VBox status code.
414 * @retval VINF_SUCCESS on success.
415 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
416 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
417 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
418 * during the operation.
419 *
420 * @param pThis Pointer to the read/write critical section.
421 * @param rcBusy The status code to return when we're in RC or R0 and the
422 * section is busy. Pass VINF_SUCCESS to acquired the
423 * critical section thru a ring-3 call if necessary.
424 * @param uId Where we're entering the section.
425 * @param pszFile The source position - file.
426 * @param iLine The source position - line.
427 * @param pszFunction The source position - function.
428 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
429 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
430 * RTCritSectRwEnterSharedDebug.
431 */
432VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
433{
434#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
435 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
436#else
437 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
438 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
439#endif
440}
441
442
443/**
444 * Try enter a critical section with shared (read) access.
445 *
446 * @returns VBox status code.
447 * @retval VINF_SUCCESS on success.
448 * @retval VERR_SEM_BUSY if the critsect was owned.
449 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
450 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
451 * during the operation.
452 *
453 * @param pThis Pointer to the read/write critical section.
454 * @param uId Where we're entering the section.
455 * @param pszFile The source position - file.
456 * @param iLine The source position - line.
457 * @param pszFunction The source position - function.
458 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
459 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
460 * RTCritSectRwTryEnterShared.
461 */
462VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
463{
464#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
465 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
466#else
467 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
468 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
469#endif
470}
471
472
473/**
474 * Try enter a critical section with shared (read) access.
475 *
476 * @returns VBox status code.
477 * @retval VINF_SUCCESS on success.
478 * @retval VERR_SEM_BUSY if the critsect was owned.
479 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
480 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
481 * during the operation.
482 *
483 * @param pThis Pointer to the read/write critical section.
484 * @param uId Where we're entering the section.
485 * @param pszFile The source position - file.
486 * @param iLine The source position - line.
487 * @param pszFunction The source position - function.
488 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
489 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
490 * RTCritSectRwTryEnterSharedDebug.
491 */
492VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
493{
494#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
495 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
496#else
497 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
498 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
499#endif
500}
501
502
503#ifdef IN_RING3
504/**
505 * Enters a PDM read/write critical section with shared (read) access.
506 *
507 * @returns VINF_SUCCESS if entered successfully.
508 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
509 * during the operation.
510 *
511 * @param pThis Pointer to the read/write critical section.
512 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
513 */
514VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
515{
516 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
517}
518#endif
519
520
521/**
522 * Leave a critical section held with shared access.
523 *
524 * @returns VBox status code.
525 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
526 * during the operation.
527 * @param pThis Pointer to the read/write critical section.
528 * @param fNoVal No validation records (i.e. queued release).
529 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
530 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
531 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
532 */
533static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
534{
535 /*
536 * Validate handle.
537 */
538 AssertPtr(pThis);
539 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
540
541 /*
542 * Check the direction and take action accordingly.
543 */
544 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
545 uint64_t u64OldState = u64State;
546 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
547 {
548#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
549 if (fNoVal)
550 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
551 else
552 {
553 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
554 if (RT_FAILURE(rc9))
555 return rc9;
556 }
557#endif
558 for (;;)
559 {
560 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
561 AssertReturn(c > 0, VERR_NOT_OWNER);
562 c--;
563
564 if ( c > 0
565 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
566 {
567 /* Don't change the direction. */
568 u64State &= ~RTCSRW_CNT_RD_MASK;
569 u64State |= c << RTCSRW_CNT_RD_SHIFT;
570 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
571 break;
572 }
573 else
574 {
575#if defined(IN_RING3) || defined(IN_RING0)
576# ifdef IN_RING0
577 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
578 && ASMIntAreEnabled())
579# endif
580 {
581 /* Reverse the direction and signal the writer threads. */
582 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
583 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
584 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
585 {
586 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
587 AssertRC(rc);
588 break;
589 }
590 }
591#endif /* IN_RING3 || IN_RING0 */
592#ifndef IN_RING3
593# ifdef IN_RING0
594 else
595# endif
596 {
597 /* Queue the exit request (ring-3). */
598 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
599 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
600 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
601 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
602 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
603 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
604 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
605 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
606 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
607 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
608 break;
609 }
610#endif
611 }
612
613 ASMNopPause();
614 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
615 u64OldState = u64State;
616 }
617 }
618 else
619 {
620 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
621 RTNATIVETHREAD hNativeWriter;
622 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
623 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
624 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
625#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
626 if (!fNoVal)
627 {
628 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
629 if (RT_FAILURE(rc))
630 return rc;
631 }
632#endif
633 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
634 }
635
636 return VINF_SUCCESS;
637}
638
639/**
640 * Leave a critical section held with shared access.
641 *
642 * @returns VBox status code.
643 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
644 * during the operation.
645 * @param pThis Pointer to the read/write critical section.
646 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
647 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
648 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
649 */
650VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
651{
652 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
653}
654
655
656#if defined(IN_RING3) || defined(IN_RING0)
657/**
658 * PDMCritSectBothFF interface.
659 *
660 * @param pThis Pointer to the read/write critical section.
661 */
662void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
663{
664 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
665}
666#endif
667
668
669/**
670 * Worker that enters a read/write critical section with exclusive access.
671 *
672 * @returns VBox status code.
673 * @param pThis Pointer to the read/write critical section.
674 * @param rcBusy The busy return code for ring-0 and ring-3.
675 * @param fTryOnly Only try enter it, don't wait.
676 * @param pSrcPos The source position. (Can be NULL.)
677 * @param fNoVal No validation records.
678 */
679static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
680{
681 /*
682 * Validate input.
683 */
684 AssertPtr(pThis);
685 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
686
687#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
688 RTTHREAD hThreadSelf = NIL_RTTHREAD;
689 if (!fTryOnly)
690 {
691 hThreadSelf = RTThreadSelfAutoAdopt();
692 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
693 if (RT_FAILURE(rc9))
694 return rc9;
695 }
696#endif
697
698 /*
699 * Check if we're already the owner and just recursing.
700 */
701 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
702 RTNATIVETHREAD hNativeWriter;
703 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
704 if (hNativeSelf == hNativeWriter)
705 {
706 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
707#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
708 if (!fNoVal)
709 {
710 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
711 if (RT_FAILURE(rc9))
712 return rc9;
713 }
714#endif
715 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
716 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
717 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
718 return VINF_SUCCESS;
719 }
720
721 /*
722 * Get cracking.
723 */
724 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
725 uint64_t u64OldState = u64State;
726
727 for (;;)
728 {
729 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
730 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
731 {
732 /* It flows in the right direction, try follow it before it changes. */
733 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
734 c++;
735 Assert(c < RTCSRW_CNT_MASK / 2);
736 u64State &= ~RTCSRW_CNT_WR_MASK;
737 u64State |= c << RTCSRW_CNT_WR_SHIFT;
738 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
739 break;
740 }
741 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
742 {
743 /* Wrong direction, but we're alone here and can simply try switch the direction. */
744 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
745 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
746 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
747 break;
748 }
749 else if (fTryOnly)
750 {
751 /* Wrong direction and we're not supposed to wait, just return. */
752 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
753 return VERR_SEM_BUSY;
754 }
755 else
756 {
757 /* Add ourselves to the write count and break out to do the wait. */
758 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
759 c++;
760 Assert(c < RTCSRW_CNT_MASK / 2);
761 u64State &= ~RTCSRW_CNT_WR_MASK;
762 u64State |= c << RTCSRW_CNT_WR_SHIFT;
763 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
764 break;
765 }
766
767 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
768 return VERR_SEM_DESTROYED;
769
770 ASMNopPause();
771 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
772 u64OldState = u64State;
773 }
774
775 /*
776 * If we're in write mode now try grab the ownership. Play fair if there
777 * are threads already waiting.
778 */
779 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
780#if defined(IN_RING3)
781 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
782 || fTryOnly)
783#endif
784 ;
785 if (fDone)
786 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
787 if (!fDone)
788 {
789 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
790
791#if defined(IN_RING3) || defined(IN_RING0)
792 if ( !fTryOnly
793# ifdef IN_RING0
794 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
795 && ASMIntAreEnabled()
796# endif
797 )
798 {
799
800 /*
801 * Wait for our turn.
802 */
803 for (uint32_t iLoop = 0; ; iLoop++)
804 {
805 int rc;
806# ifdef IN_RING3
807# ifdef PDMCRITSECTRW_STRICT
808 if (hThreadSelf == NIL_RTTHREAD)
809 hThreadSelf = RTThreadSelfAutoAdopt();
810 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
811 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
812 if (RT_SUCCESS(rc))
813# else
814 RTTHREAD hThreadSelf = RTThreadSelf();
815 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
816# endif
817# endif
818 {
819 do
820 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
821 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
822 RT_INDEFINITE_WAIT);
823 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
824# ifdef IN_RING3
825 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
826# endif
827 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
828 return VERR_SEM_DESTROYED;
829 }
830 if (RT_FAILURE(rc))
831 {
832 /* Decrement the counts and return the error. */
833 for (;;)
834 {
835 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
836 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
837 c--;
838 u64State &= ~RTCSRW_CNT_WR_MASK;
839 u64State |= c << RTCSRW_CNT_WR_SHIFT;
840 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
841 break;
842 }
843 return rc;
844 }
845
846 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
847 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
848 {
849 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
850 if (fDone)
851 break;
852 }
853 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
854 }
855
856 }
857 else
858#endif /* IN_RING3 || IN_RING0 */
859 {
860#ifdef IN_RING3
861 /* TryEnter call - decrement the number of (waiting) writers. */
862#else
863 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
864 ring-3 and do it there or return rcBusy. */
865#endif
866
867 for (;;)
868 {
869 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
870 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
871 c--;
872 u64State &= ~RTCSRW_CNT_WR_MASK;
873 u64State |= c << RTCSRW_CNT_WR_SHIFT;
874 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
875 break;
876 }
877
878#ifdef IN_RING3
879 return VERR_SEM_BUSY;
880#else
881 if (rcBusy == VINF_SUCCESS)
882 {
883 Assert(!fTryOnly);
884 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
885 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
886 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
887 * back to ring-3. Goes for both kind of crit sects. */
888 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
889 }
890 return rcBusy;
891#endif
892 }
893 }
894
895 /*
896 * Got it!
897 */
898 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
899 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
900 Assert(pThis->s.Core.cWriterReads == 0);
901#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
902 if (!fNoVal)
903 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
904#endif
905 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
906 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
907
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Try enter a critical section with exclusive (write) access.
914 *
915 * @returns VBox status code.
916 * @retval VINF_SUCCESS on success.
917 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
918 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
919 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
920 * during the operation.
921 *
922 * @param pThis Pointer to the read/write critical section.
923 * @param rcBusy The status code to return when we're in RC or R0 and the
924 * section is busy. Pass VINF_SUCCESS to acquired the
925 * critical section thru a ring-3 call if necessary.
926 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
927 * PDMCritSectRwTryEnterExclDebug,
928 * PDMCritSectEnterDebug, PDMCritSectEnter,
929 * RTCritSectRwEnterExcl.
930 */
931VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
932{
933#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
934 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
935#else
936 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
937 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
938#endif
939}
940
941
942/**
943 * Try enter a critical section with exclusive (write) access.
944 *
945 * @returns VBox status code.
946 * @retval VINF_SUCCESS on success.
947 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
948 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
949 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
950 * during the operation.
951 *
952 * @param pThis Pointer to the read/write critical section.
953 * @param rcBusy The status code to return when we're in RC or R0 and the
954 * section is busy. Pass VINF_SUCCESS to acquired the
955 * critical section thru a ring-3 call if necessary.
956 * @param uId Where we're entering the section.
957 * @param pszFile The source position - file.
958 * @param iLine The source position - line.
959 * @param pszFunction The source position - function.
960 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
961 * PDMCritSectRwTryEnterExclDebug,
962 * PDMCritSectEnterDebug, PDMCritSectEnter,
963 * RTCritSectRwEnterExclDebug.
964 */
965VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
966{
967#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
968 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
969#else
970 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
971 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
972#endif
973}
974
975
976/**
977 * Try enter a critical section with exclusive (write) access.
978 *
979 * @retval VINF_SUCCESS on success.
980 * @retval VERR_SEM_BUSY if the critsect was owned.
981 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
982 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
983 * during the operation.
984 *
985 * @param pThis Pointer to the read/write critical section.
986 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
987 * PDMCritSectRwEnterExclDebug,
988 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
989 * RTCritSectRwTryEnterExcl.
990 */
991VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
992{
993#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
994 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
995#else
996 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
997 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
998#endif
999}
1000
1001
1002/**
1003 * Try enter a critical section with exclusive (write) access.
1004 *
1005 * @retval VINF_SUCCESS on success.
1006 * @retval VERR_SEM_BUSY if the critsect was owned.
1007 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1008 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1009 * during the operation.
1010 *
1011 * @param pThis Pointer to the read/write critical section.
1012 * @param uId Where we're entering the section.
1013 * @param pszFile The source position - file.
1014 * @param iLine The source position - line.
1015 * @param pszFunction The source position - function.
1016 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1017 * PDMCritSectRwEnterExclDebug,
1018 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1019 * RTCritSectRwTryEnterExclDebug.
1020 */
1021VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1022{
1023#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1024 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1025#else
1026 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1027 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1028#endif
1029}
1030
1031
1032#ifdef IN_RING3
1033/**
1034 * Enters a PDM read/write critical section with exclusive (write) access.
1035 *
1036 * @returns VINF_SUCCESS if entered successfully.
1037 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1038 * during the operation.
1039 *
1040 * @param pThis Pointer to the read/write critical section.
1041 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1042 */
1043VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1044{
1045 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1046}
1047#endif /* IN_RING3 */
1048
1049
1050/**
1051 * Leave a critical section held exclusively.
1052 *
1053 * @returns VBox status code.
1054 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1055 * during the operation.
1056 * @param pThis Pointer to the read/write critical section.
1057 * @param fNoVal No validation records (i.e. queued release).
1058 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1059 */
1060static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1061{
1062 /*
1063 * Validate handle.
1064 */
1065 AssertPtr(pThis);
1066 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1067
1068 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1069 RTNATIVETHREAD hNativeWriter;
1070 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1071 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1072
1073 /*
1074 * Unwind one recursion. Is it the final one?
1075 */
1076 if (pThis->s.Core.cWriteRecursions == 1)
1077 {
1078 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1079#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1080 if (fNoVal)
1081 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1082 else
1083 {
1084 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1085 if (RT_FAILURE(rc9))
1086 return rc9;
1087 }
1088#endif
1089 /*
1090 * Update the state.
1091 */
1092#if defined(IN_RING3) || defined(IN_RING0)
1093# ifdef IN_RING0
1094 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1095 && ASMIntAreEnabled())
1096# endif
1097 {
1098 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1099 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1100 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1101
1102 for (;;)
1103 {
1104 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1105 uint64_t u64OldState = u64State;
1106
1107 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1108 Assert(c > 0);
1109 c--;
1110
1111 if ( c > 0
1112 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1113 {
1114 /* Don't change the direction, wake up the next writer if any. */
1115 u64State &= ~RTCSRW_CNT_WR_MASK;
1116 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1117 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1118 {
1119 if (c > 0)
1120 {
1121 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1122 AssertRC(rc);
1123 }
1124 break;
1125 }
1126 }
1127 else
1128 {
1129 /* Reverse the direction and signal the reader threads. */
1130 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1131 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1132 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1133 {
1134 Assert(!pThis->s.Core.fNeedReset);
1135 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1136 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1137 AssertRC(rc);
1138 break;
1139 }
1140 }
1141
1142 ASMNopPause();
1143 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1144 return VERR_SEM_DESTROYED;
1145 }
1146 }
1147#endif /* IN_RING3 || IN_RING0 */
1148#ifndef IN_RING3
1149# ifdef IN_RING0
1150 else
1151# endif
1152 {
1153 /*
1154 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1155 * so queue the exit request (ring-3).
1156 */
1157 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1158 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1159 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1160 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1161 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1162 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1163 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1164 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1165 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1166 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1167 }
1168#endif
1169 }
1170 else
1171 {
1172 /*
1173 * Not the final recursion.
1174 */
1175 Assert(pThis->s.Core.cWriteRecursions != 0);
1176#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1177 if (fNoVal)
1178 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1179 else
1180 {
1181 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1182 if (RT_FAILURE(rc9))
1183 return rc9;
1184 }
1185#endif
1186 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1187 }
1188
1189 return VINF_SUCCESS;
1190}
1191
1192
1193/**
1194 * Leave a critical section held exclusively.
1195 *
1196 * @returns VBox status code.
1197 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1198 * during the operation.
1199 * @param pThis Pointer to the read/write critical section.
1200 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1201 */
1202VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1203{
1204 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1205}
1206
1207
1208#if defined(IN_RING3) || defined(IN_RING0)
1209/**
1210 * PDMCritSectBothFF interface.
1211 *
1212 * @param pThis Pointer to the read/write critical section.
1213 */
1214void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1215{
1216 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1217}
1218#endif
1219
1220
1221/**
1222 * Checks the caller is the exclusive (write) owner of the critical section.
1223 *
1224 * @retval @c true if owner.
1225 * @retval @c false if not owner.
1226 * @param pThis Pointer to the read/write critical section.
1227 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1228 * RTCritSectRwIsWriteOwner.
1229 */
1230VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1231{
1232 /*
1233 * Validate handle.
1234 */
1235 AssertPtr(pThis);
1236 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1237
1238 /*
1239 * Check ownership.
1240 */
1241 RTNATIVETHREAD hNativeWriter;
1242 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1243 if (hNativeWriter == NIL_RTNATIVETHREAD)
1244 return false;
1245 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1246}
1247
1248
1249/**
1250 * Checks if the caller is one of the read owners of the critical section.
1251 *
1252 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1253 * enabled. Meaning, the answer is not trustworhty unless
1254 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1255 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1256 * creating the semaphore. And finally, if you used a locking class,
1257 * don't disable deadlock detection by setting cMsMinDeadlock to
1258 * RT_INDEFINITE_WAIT.
1259 *
1260 * In short, only use this for assertions.
1261 *
1262 * @returns @c true if reader, @c false if not.
1263 * @param pThis Pointer to the read/write critical section.
1264 * @param fWannaHear What you'd like to hear when lock validation is not
1265 * available. (For avoiding asserting all over the place.)
1266 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1267 */
1268VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1269{
1270 /*
1271 * Validate handle.
1272 */
1273 AssertPtr(pThis);
1274 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1275
1276 /*
1277 * Inspect the state.
1278 */
1279 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1280 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1281 {
1282 /*
1283 * It's in write mode, so we can only be a reader if we're also the
1284 * current writer.
1285 */
1286 RTNATIVETHREAD hWriter;
1287 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1288 if (hWriter == NIL_RTNATIVETHREAD)
1289 return false;
1290 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1291 }
1292
1293 /*
1294 * Read mode. If there are no current readers, then we cannot be a reader.
1295 */
1296 if (!(u64State & RTCSRW_CNT_RD_MASK))
1297 return false;
1298
1299#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1300 /*
1301 * Ask the lock validator.
1302 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1303 */
1304 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1305#else
1306 /*
1307 * Ok, we don't know, just tell the caller what he want to hear.
1308 */
1309 return fWannaHear;
1310#endif
1311}
1312
1313
1314/**
1315 * Gets the write recursion count.
1316 *
1317 * @returns The write recursion count (0 if bad critsect).
1318 * @param pThis Pointer to the read/write critical section.
1319 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1320 * RTCritSectRwGetWriteRecursion.
1321 */
1322VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1323{
1324 /*
1325 * Validate handle.
1326 */
1327 AssertPtr(pThis);
1328 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1329
1330 /*
1331 * Return the requested data.
1332 */
1333 return pThis->s.Core.cWriteRecursions;
1334}
1335
1336
1337/**
1338 * Gets the read recursion count of the current writer.
1339 *
1340 * @returns The read recursion count (0 if bad critsect).
1341 * @param pThis Pointer to the read/write critical section.
1342 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1343 * RTCritSectRwGetWriterReadRecursion.
1344 */
1345VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1346{
1347 /*
1348 * Validate handle.
1349 */
1350 AssertPtr(pThis);
1351 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1352
1353 /*
1354 * Return the requested data.
1355 */
1356 return pThis->s.Core.cWriterReads;
1357}
1358
1359
1360/**
1361 * Gets the current number of reads.
1362 *
1363 * This includes all read recursions, so it might be higher than the number of
1364 * read owners. It does not include reads done by the current writer.
1365 *
1366 * @returns The read count (0 if bad critsect).
1367 * @param pThis Pointer to the read/write critical section.
1368 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1369 * RTCritSectRwGetReadCount.
1370 */
1371VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1372{
1373 /*
1374 * Validate input.
1375 */
1376 AssertPtr(pThis);
1377 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1378
1379 /*
1380 * Return the requested data.
1381 */
1382 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1383 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1384 return 0;
1385 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1386}
1387
1388
1389/**
1390 * Checks if the read/write critical section is initialized or not.
1391 *
1392 * @retval @c true if initialized.
1393 * @retval @c false if not initialized.
1394 * @param pThis Pointer to the read/write critical section.
1395 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1396 */
1397VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1398{
1399 AssertPtr(pThis);
1400 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1401}
1402
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette