VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 91999

Last change on this file since 91999 was 91817, checked in by vboxsync, 3 years ago

VMM/PDMCritSectRw: Don't preempt while on custom stack. [build fix] bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 85.4 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 91817 2021-10-18 09:52:40Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#if 0 /* unused */
54/** The number loops to spin for shared access in ring-3. */
55#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
56/** The number loops to spin for shared access in ring-0. */
57#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
58/** The number loops to spin for shared access in the raw-mode context. */
59#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
60
61/** The number loops to spin for exclusive access in ring-3. */
62#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
63/** The number loops to spin for exclusive access in ring-0. */
64#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
65/** The number loops to spin for exclusive access in the raw-mode context. */
66#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
67#endif
68
69/** Max number of write or write/read recursions. */
70#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
71
72/** Skips some of the overly paranoid atomic reads and updates.
73 * Makes some assumptions about cache coherence, though not brave enough not to
74 * always end with an atomic update. */
75#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
76
77/** For reading RTCRITSECTRWSTATE::s::u64State. */
78#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
80#else
81# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
82#endif
83
84
85/* Undefine the automatic VBOX_STRICT API mappings. */
86#undef PDMCritSectRwEnterExcl
87#undef PDMCritSectRwTryEnterExcl
88#undef PDMCritSectRwEnterShared
89#undef PDMCritSectRwTryEnterShared
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
96static int32_t g_fCmpWriteSupported = -1;
97#endif
98
99
100/*********************************************************************************************************************************
101* Internal Functions *
102*********************************************************************************************************************************/
103#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
104static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
105#else
106DECLASM(int) pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
107DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
108#endif
109
110
111#ifdef RTASM_HAVE_CMP_WRITE_U128
112
113# ifdef RT_ARCH_AMD64
114/**
115 * Called once to initialize g_fCmpWriteSupported.
116 */
117DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
118{
119 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
120 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
121 return fCmpWriteSupported;
122}
123# endif
124
125
126/**
127 * Indicates whether hardware actually supports 128-bit compare & write.
128 */
129DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
130{
131# ifdef RT_ARCH_AMD64
132 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
133 if (RT_LIKELY(fCmpWriteSupported >= 0))
134 return fCmpWriteSupported != 0;
135 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
136# else
137 return true;
138# endif
139}
140
141#endif /* RTASM_HAVE_CMP_WRITE_U128 */
142
143/**
144 * Gets the ring-3 native thread handle of the calling thread.
145 *
146 * @returns native thread handle (ring-3).
147 * @param pVM The cross context VM structure.
148 * @param pThis The read/write critical section. This is only used in
149 * R0 and RC.
150 */
151DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
152{
153#ifdef IN_RING3
154 RT_NOREF(pVM, pThis);
155 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
156#else
157 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
158 NIL_RTNATIVETHREAD);
159 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
160 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
161 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
162#endif
163 return hNativeSelf;
164}
165
166
167DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
168{
169 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
170 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
171 return VERR_PDM_CRITSECTRW_IPE;
172}
173
174
175
176#ifdef IN_RING3
177/**
178 * Changes the lock validator sub-class of the read/write critical section.
179 *
180 * It is recommended to try make sure that nobody is using this critical section
181 * while changing the value.
182 *
183 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
184 * lock validator isn't compiled in or either of the parameters are
185 * invalid.
186 * @param pThis Pointer to the read/write critical section.
187 * @param uSubClass The new sub-class value.
188 */
189VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
190{
191 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
192 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
193# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
194 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
195
196 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
197 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
198# else
199 NOREF(uSubClass);
200 return RTLOCKVAL_SUB_CLASS_INVALID;
201# endif
202}
203#endif /* IN_RING3 */
204
205
206/**
207 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
208 */
209DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
210 bool fNoVal, RTTHREAD hThreadSelf)
211{
212#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
213 if (!fNoVal)
214 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
215#else
216 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
217#endif
218
219 /* got it! */
220 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
221 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
222 return VINF_SUCCESS;
223}
224
225/**
226 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
227 * that decrement the wait count and maybe resets the semaphore.
228 */
229DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
230 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
231{
232 for (;;)
233 {
234 uint64_t const u64OldState = u64State;
235 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
236 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
237 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
238 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
239 cWait--;
240 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
241 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
242
243 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
244 {
245 if (cWait == 0)
246 {
247 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
248 {
249 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
250 AssertRCReturn(rc, rc);
251 }
252 }
253 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
254 }
255
256 ASMNopPause();
257 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
258 ASMNopPause();
259
260 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
261 }
262 /* not reached */
263}
264
265
266#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
267/**
268 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
269 * and returns @a rc.
270 *
271 * @note May return VINF_SUCCESS if we race the exclusive leave function and
272 * come out on the bottom.
273 *
274 * Ring-3 only calls in a case where it is _not_ acceptable to take the
275 * lock, so even if we get the lock we'll have to leave. In the ring-0
276 * contexts, we can safely return VINF_SUCCESS in case of a race.
277 */
278DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
279 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
280{
281#ifdef IN_RING0
282 uint64_t const tsStart = RTTimeNanoTS();
283 uint64_t cNsElapsed = 0;
284#endif
285 for (;;)
286 {
287 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
288 uint64_t u64OldState = u64State;
289
290 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
291 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
292 cWait--;
293
294 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
295 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
296
297 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
298 {
299 c--;
300 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
301 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
302 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
303 return rc;
304 }
305 else
306 {
307 /*
308 * The direction changed, so we can actually get the lock now.
309 *
310 * This means that we _have_ to wait on the semaphore to be signalled
311 * so we can properly reset it. Otherwise the stuff gets out of wack,
312 * because signalling and resetting will race one another. An
313 * exception would be if we're not the last reader waiting and don't
314 * need to worry about the resetting.
315 *
316 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
317 * but that would still leave a racing PDMCritSectRwEnterShared
318 * spinning hard for a little bit, which isn't great...
319 */
320 if (cWait == 0)
321 {
322# ifdef IN_RING0
323 /* Do timeout processing first to avoid redoing the above. */
324 uint32_t cMsWait;
325 if (cNsElapsed <= RT_NS_10SEC)
326 cMsWait = 32;
327 else
328 {
329 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
330 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
331 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
332 {
333 LogFunc(("%p: giving up\n", pThis));
334 return rc;
335 }
336 cMsWait = 2;
337 }
338
339 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
340 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
341 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
342# else
343 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
344 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
345 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
346# endif
347 if (rcWait == VINF_SUCCESS)
348 {
349# ifdef IN_RING0
350 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
351# else
352 /* ring-3: Cannot return VINF_SUCCESS. */
353 Assert(RT_FAILURE_NP(rc));
354 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
355 if (RT_SUCCESS(rc2))
356 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
357 return rc;
358# endif
359 }
360 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
361 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
362 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
363 }
364 else
365 {
366 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
367 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
368 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
369 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
370 }
371
372# ifdef IN_RING0
373 /* Calculate the elapsed time here to avoid redoing state work. */
374 cNsElapsed = RTTimeNanoTS() - tsStart;
375# endif
376 }
377
378 ASMNopPause();
379 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
380 ASMNopPause();
381 }
382}
383#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
384
385
386/**
387 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
388 * Caller has already added us to the read and read-wait counters.
389 */
390static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
391 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
392{
393 PSUPDRVSESSION const pSession = pVM->pSession;
394 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
395# ifdef IN_RING0
396 uint64_t const tsStart = RTTimeNanoTS();
397 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
398 uint64_t cNsMaxTotal = cNsMaxTotalDef;
399 uint32_t cMsMaxOne = RT_MS_5SEC;
400 bool fNonInterruptible = false;
401# endif
402
403 for (uint32_t iLoop = 0; ; iLoop++)
404 {
405 /*
406 * Wait for the direction to switch.
407 */
408 int rc;
409# ifdef IN_RING3
410# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
411 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
412 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
413 if (RT_FAILURE(rc))
414 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
415# else
416 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
417# endif
418# endif
419
420 for (;;)
421 {
422 /*
423 * We always wait with a timeout so we can re-check the structure sanity
424 * and not get stuck waiting on a corrupt or deleted section.
425 */
426# ifdef IN_RING3
427 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
428# else
429 rc = !fNonInterruptible
430 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
431 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
432 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
433 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
434# endif
435 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
436 { /* likely */ }
437 else
438 {
439# ifdef IN_RING3
440 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
441# endif
442 return VERR_SEM_DESTROYED;
443 }
444 if (RT_LIKELY(rc == VINF_SUCCESS))
445 break;
446
447 /*
448 * Timeout and interrupted waits needs careful handling in ring-0
449 * because we're cooperating with ring-3 on this critical section
450 * and thus need to make absolutely sure we won't get stuck here.
451 *
452 * The r0 interrupted case means something is pending (termination,
453 * signal, APC, debugger, whatever), so we must try our best to
454 * return to the caller and to ring-3 so it can be dealt with.
455 */
456 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
457 {
458# ifdef IN_RING0
459 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
460 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
461 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
462 ("rcTerm=%Rrc\n", rcTerm));
463 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
464 cNsMaxTotal = RT_NS_1MIN;
465
466 if (rc == VERR_TIMEOUT)
467 {
468 /* Try return get out of here with a non-VINF_SUCCESS status if
469 the thread is terminating or if the timeout has been exceeded. */
470 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
471 if ( rcTerm == VINF_THREAD_IS_TERMINATING
472 || cNsElapsed > cNsMaxTotal)
473 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
474 pSrcPos, fNoVal, hThreadSelf);
475 }
476 else
477 {
478 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
479 we will try non-interruptible sleep for a while to help resolve the issue
480 w/o guru'ing. */
481 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
482 if ( rcTerm != VINF_THREAD_IS_TERMINATING
483 && rcBusy == VINF_SUCCESS
484 && pVCpu != NULL
485 && cNsElapsed <= cNsMaxTotal)
486 {
487 if (!fNonInterruptible)
488 {
489 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
490 fNonInterruptible = true;
491 cMsMaxOne = 32;
492 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
493 if (cNsLeft > RT_NS_10SEC)
494 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
495 }
496 }
497 else
498 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
499 pSrcPos, fNoVal, hThreadSelf);
500 }
501# else /* IN_RING3 */
502 RT_NOREF(pVM, pVCpu, rcBusy);
503# endif /* IN_RING3 */
504 }
505 /*
506 * Any other return code is fatal.
507 */
508 else
509 {
510# ifdef IN_RING3
511 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
512# endif
513 AssertMsgFailed(("rc=%Rrc\n", rc));
514 return RT_FAILURE_NP(rc) ? rc : -rc;
515 }
516 }
517
518# ifdef IN_RING3
519 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
520# endif
521
522 /*
523 * Check the direction.
524 */
525 Assert(pThis->s.Core.fNeedReset);
526 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
527 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
528 {
529 /*
530 * Decrement the wait count and maybe reset the semaphore (if we're last).
531 */
532 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
533 }
534
535 AssertMsg(iLoop < 1,
536 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
537 RTThreadYield();
538 }
539
540 /* not reached */
541}
542
543
544/**
545 * Worker that enters a read/write critical section with shard access.
546 *
547 * @returns VBox status code.
548 * @param pVM The cross context VM structure.
549 * @param pThis Pointer to the read/write critical section.
550 * @param rcBusy The busy return code for ring-0 and ring-3.
551 * @param fTryOnly Only try enter it, don't wait.
552 * @param pSrcPos The source position. (Can be NULL.)
553 * @param fNoVal No validation records.
554 */
555#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
556static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
557 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
558#else
559DECLASM(int) pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
560 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
561DECLASM(int) StkBack_pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
562 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
563#endif
564{
565 /*
566 * Validate input.
567 */
568 AssertPtr(pThis);
569 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
570
571#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
572 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
573 if (!fTryOnly)
574 {
575 int rc9;
576 RTNATIVETHREAD hNativeWriter;
577 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
578 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
579 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
580 else
581 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
582 if (RT_FAILURE(rc9))
583 return rc9;
584 }
585#else
586 RTTHREAD hThreadSelf = NIL_RTTHREAD;
587#endif
588
589 /*
590 * Work the state.
591 */
592 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
593 uint64_t u64OldState = u64State;
594 for (;;)
595 {
596 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
597 {
598 /* It flows in the right direction, try follow it before it changes. */
599 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
600 c++;
601 Assert(c < RTCSRW_CNT_MASK / 4);
602 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
603 u64State &= ~RTCSRW_CNT_RD_MASK;
604 u64State |= c << RTCSRW_CNT_RD_SHIFT;
605 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
606 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
607 }
608 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
609 {
610 /* Wrong direction, but we're alone here and can simply try switch the direction. */
611 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
612 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
613 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
614 {
615 Assert(!pThis->s.Core.fNeedReset);
616 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
617 }
618 }
619 else
620 {
621 /* Is the writer perhaps doing a read recursion? */
622 RTNATIVETHREAD hNativeWriter;
623 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
624 if (hNativeWriter != NIL_RTNATIVETHREAD)
625 {
626 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
627 if (hNativeSelf == hNativeWriter)
628 {
629#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
630 if (!fNoVal)
631 {
632 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
633 if (RT_FAILURE(rc9))
634 return rc9;
635 }
636#endif
637 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
638 Assert(cReads < _16K);
639 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
640 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
641 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
642 return VINF_SUCCESS; /* don't break! */
643 }
644 }
645
646 /*
647 * If we're only trying, return already.
648 */
649 if (fTryOnly)
650 {
651 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
652 return VERR_SEM_BUSY;
653 }
654
655#if defined(IN_RING3) || defined(IN_RING0)
656 /*
657 * Add ourselves to the queue and wait for the direction to change.
658 */
659 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
660 c++;
661 Assert(c < RTCSRW_CNT_MASK / 2);
662 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
663
664 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
665 cWait++;
666 Assert(cWait <= c);
667 Assert(cWait < RTCSRW_CNT_MASK / 2);
668 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
669
670 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
671 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
672
673 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
674 {
675 /*
676 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
677 */
678# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
679 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
680# elif defined(IN_RING3)
681 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
682# else /* IN_RING0 */
683 /*
684 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
685 * account when waiting on contended locks.
686 */
687 PVMCPUCC pVCpu = VMMGetCpu(pVM);
688 if (pVCpu)
689 {
690 VMMR0EMTBLOCKCTX Ctx;
691 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
692 if (rc == VINF_SUCCESS)
693 {
694 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
695
696 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
697
698 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
699 }
700 else
701 {
702 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
703 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
704 }
705 return rc;
706 }
707
708 /* Non-EMT. */
709 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
710 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
711# endif /* IN_RING0 */
712 }
713
714#else /* !IN_RING3 && !IN_RING0 */
715 /*
716 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
717 * back to ring-3 and do it there or return rcBusy.
718 */
719# error "Unused code."
720 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
721 if (rcBusy == VINF_SUCCESS)
722 {
723 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
724 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
725 * back to ring-3. Goes for both kind of crit sects. */
726 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
727 }
728 return rcBusy;
729#endif /* !IN_RING3 && !IN_RING0 */
730 }
731
732 ASMNopPause();
733 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
734 { /* likely */ }
735 else
736 return VERR_SEM_DESTROYED;
737 ASMNopPause();
738
739 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
740 u64OldState = u64State;
741 }
742 /* not reached */
743}
744
745
746/**
747 * Enter a critical section with shared (read) access.
748 *
749 * @returns VBox status code.
750 * @retval VINF_SUCCESS on success.
751 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
752 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
753 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
754 * during the operation.
755 *
756 * @param pVM The cross context VM structure.
757 * @param pThis Pointer to the read/write critical section.
758 * @param rcBusy The status code to return when we're in RC or R0 and the
759 * section is busy. Pass VINF_SUCCESS to acquired the
760 * critical section thru a ring-3 call if necessary.
761 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
762 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
763 * RTCritSectRwEnterShared.
764 */
765VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
766{
767#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
768 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
769#else
770 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
771 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
772#endif
773}
774
775
776/**
777 * Enter a critical section with shared (read) access.
778 *
779 * @returns VBox status code.
780 * @retval VINF_SUCCESS on success.
781 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
782 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
783 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
784 * during the operation.
785 *
786 * @param pVM The cross context VM structure.
787 * @param pThis Pointer to the read/write critical section.
788 * @param rcBusy The status code to return when we're in RC or R0 and the
789 * section is busy. Pass VINF_SUCCESS to acquired the
790 * critical section thru a ring-3 call if necessary.
791 * @param uId Where we're entering the section.
792 * @param SRC_POS The source position.
793 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
794 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
795 * RTCritSectRwEnterSharedDebug.
796 */
797VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
798{
799 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
800#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
801 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
802#else
803 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
804 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
805#endif
806}
807
808
809/**
810 * Try enter a critical section with shared (read) access.
811 *
812 * @returns VBox status code.
813 * @retval VINF_SUCCESS on success.
814 * @retval VERR_SEM_BUSY if the critsect was owned.
815 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
816 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
817 * during the operation.
818 *
819 * @param pVM The cross context VM structure.
820 * @param pThis Pointer to the read/write critical section.
821 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
822 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
823 * RTCritSectRwTryEnterShared.
824 */
825VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
826{
827#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
828 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
829#else
830 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
831 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
832#endif
833}
834
835
836/**
837 * Try enter a critical section with shared (read) access.
838 *
839 * @returns VBox status code.
840 * @retval VINF_SUCCESS on success.
841 * @retval VERR_SEM_BUSY if the critsect was owned.
842 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
843 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
844 * during the operation.
845 *
846 * @param pVM The cross context VM structure.
847 * @param pThis Pointer to the read/write critical section.
848 * @param uId Where we're entering the section.
849 * @param SRC_POS The source position.
850 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
851 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
852 * RTCritSectRwTryEnterSharedDebug.
853 */
854VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
855{
856 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
857#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
858 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
859#else
860 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
861 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
862#endif
863}
864
865
866#ifdef IN_RING3
867/**
868 * Enters a PDM read/write critical section with shared (read) access.
869 *
870 * @returns VINF_SUCCESS if entered successfully.
871 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
872 * during the operation.
873 *
874 * @param pVM The cross context VM structure.
875 * @param pThis Pointer to the read/write critical section.
876 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
877 */
878VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
879{
880 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
881}
882#endif
883
884
885/**
886 * Leave a critical section held with shared access.
887 *
888 * @returns VBox status code.
889 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
890 * during the operation.
891 * @param pVM The cross context VM structure.
892 * @param pThis Pointer to the read/write critical section.
893 * @param fNoVal No validation records (i.e. queued release).
894 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
895 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
896 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
897 */
898#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
899static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
900#else
901DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
902#endif
903{
904 /*
905 * Validate handle.
906 */
907 AssertPtr(pThis);
908 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
909
910#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
911 NOREF(fNoVal);
912#endif
913
914 /*
915 * Check the direction and take action accordingly.
916 */
917#ifdef IN_RING0
918 PVMCPUCC pVCpu = NULL;
919#endif
920 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
921 uint64_t u64OldState = u64State;
922 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
923 {
924#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
925 if (fNoVal)
926 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
927 else
928 {
929 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
930 if (RT_FAILURE(rc9))
931 return rc9;
932 }
933#endif
934 for (;;)
935 {
936 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
937 AssertReturn(c > 0, VERR_NOT_OWNER);
938 c--;
939
940 if ( c > 0
941 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
942 {
943 /* Don't change the direction. */
944 u64State &= ~RTCSRW_CNT_RD_MASK;
945 u64State |= c << RTCSRW_CNT_RD_SHIFT;
946 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
947 break;
948 }
949 else
950 {
951#if defined(IN_RING3) || defined(IN_RING0)
952# ifdef IN_RING0
953 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
954 if (!pVCpu)
955 pVCpu = VMMGetCpu(pVM);
956 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
957 || VMMRZCallRing3IsEnabled(pVCpu)
958 || RTSemEventIsSignalSafe()
959 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
960 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
961 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
962 )
963# endif
964 {
965 /* Reverse the direction and signal the writer threads. */
966 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
967 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
968 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
969 {
970 int rc;
971# ifdef IN_RING0
972 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
973 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
974 {
975 VMMR0EMTBLOCKCTX Ctx;
976 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
977 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
978
979 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
980
981 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
982 }
983 else
984# endif
985 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
986 AssertRC(rc);
987 return rc;
988 }
989 }
990#endif /* IN_RING3 || IN_RING0 */
991#ifndef IN_RING3
992# ifdef IN_RING0
993 else
994# endif
995 {
996 /* Queue the exit request (ring-3). */
997# ifndef IN_RING0
998 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
999# endif
1000 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
1001 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
1002 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
1003 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1004 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
1005 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1006 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
1007 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
1008 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1009 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
1010 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
1011 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1012 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1013 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1014 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1015 break;
1016 }
1017#endif
1018 }
1019
1020 ASMNopPause();
1021 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1022 { }
1023 else
1024 return VERR_SEM_DESTROYED;
1025 ASMNopPause();
1026
1027 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1028 u64OldState = u64State;
1029 }
1030 }
1031 else
1032 {
1033 /*
1034 * Write direction. Check that it's the owner calling and that it has reads to undo.
1035 */
1036 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1037 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1038
1039 RTNATIVETHREAD hNativeWriter;
1040 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1041 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1042 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1043#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1044 if (!fNoVal)
1045 {
1046 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1047 if (RT_FAILURE(rc))
1048 return rc;
1049 }
1050#endif
1051 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1052 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1053 }
1054
1055 return VINF_SUCCESS;
1056}
1057
1058
1059/**
1060 * Leave a critical section held with shared access.
1061 *
1062 * @returns VBox status code.
1063 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1064 * during the operation.
1065 * @param pVM The cross context VM structure.
1066 * @param pThis Pointer to the read/write critical section.
1067 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1068 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1069 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1070 */
1071VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1072{
1073 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1074}
1075
1076
1077#if defined(IN_RING3) || defined(IN_RING0)
1078/**
1079 * PDMCritSectBothFF interface.
1080 *
1081 * @param pVM The cross context VM structure.
1082 * @param pThis Pointer to the read/write critical section.
1083 */
1084void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1085{
1086 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1087}
1088#endif
1089
1090
1091/**
1092 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1093 *
1094 * @returns @a rc unless corrupted.
1095 * @param pThis Pointer to the read/write critical section.
1096 * @param rc The status to return.
1097 */
1098DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1099{
1100 /*
1101 * Decrement the counts and return the error.
1102 */
1103 for (;;)
1104 {
1105 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1106 uint64_t const u64OldState = u64State;
1107 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1108 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1109 c--;
1110 u64State &= ~RTCSRW_CNT_WR_MASK;
1111 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1112 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1113 return rc;
1114
1115 ASMNopPause();
1116 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1117 ASMNopPause();
1118 }
1119}
1120
1121
1122/**
1123 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1124 * gotten exclusive ownership of the critical section.
1125 */
1126DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1127 bool fNoVal, RTTHREAD hThreadSelf)
1128{
1129 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1130 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1131
1132#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1133 pThis->s.Core.cWriteRecursions = 1;
1134#else
1135 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1136#endif
1137 Assert(pThis->s.Core.cWriterReads == 0);
1138
1139#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1140 if (!fNoVal)
1141 {
1142 if (hThreadSelf == NIL_RTTHREAD)
1143 hThreadSelf = RTThreadSelfAutoAdopt();
1144 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1145 }
1146#endif
1147 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1148 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1149 return VINF_SUCCESS;
1150}
1151
1152
1153#if defined(IN_RING3) || defined(IN_RING0)
1154/**
1155 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1156 * contended.
1157 */
1158static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1159 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1160{
1161 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1162
1163 PSUPDRVSESSION const pSession = pVM->pSession;
1164 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1165# ifdef IN_RING0
1166 uint64_t const tsStart = RTTimeNanoTS();
1167 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1168 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1169 uint32_t cMsMaxOne = RT_MS_5SEC;
1170 bool fNonInterruptible = false;
1171# endif
1172
1173 for (uint32_t iLoop = 0; ; iLoop++)
1174 {
1175 /*
1176 * Wait for our turn.
1177 */
1178 int rc;
1179# ifdef IN_RING3
1180# ifdef PDMCRITSECTRW_STRICT
1181 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1182 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1183 if (RT_SUCCESS(rc))
1184 { /* likely */ }
1185 else
1186 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1187# else
1188 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1189# endif
1190# endif
1191
1192 for (;;)
1193 {
1194 /*
1195 * We always wait with a timeout so we can re-check the structure sanity
1196 * and not get stuck waiting on a corrupt or deleted section.
1197 */
1198# ifdef IN_RING3
1199 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1200# else
1201 rc = !fNonInterruptible
1202 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1203 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1204 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1205 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1206# endif
1207 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1208 { /* likely */ }
1209 else
1210 {
1211# ifdef IN_RING3
1212 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1213# endif
1214 return VERR_SEM_DESTROYED;
1215 }
1216 if (RT_LIKELY(rc == VINF_SUCCESS))
1217 break;
1218
1219 /*
1220 * Timeout and interrupted waits needs careful handling in ring-0
1221 * because we're cooperating with ring-3 on this critical section
1222 * and thus need to make absolutely sure we won't get stuck here.
1223 *
1224 * The r0 interrupted case means something is pending (termination,
1225 * signal, APC, debugger, whatever), so we must try our best to
1226 * return to the caller and to ring-3 so it can be dealt with.
1227 */
1228 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1229 {
1230# ifdef IN_RING0
1231 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1232 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1233 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1234 ("rcTerm=%Rrc\n", rcTerm));
1235 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1236 cNsMaxTotal = RT_NS_1MIN;
1237
1238 if (rc == VERR_TIMEOUT)
1239 {
1240 /* Try return get out of here with a non-VINF_SUCCESS status if
1241 the thread is terminating or if the timeout has been exceeded. */
1242 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1243 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1244 || cNsElapsed > cNsMaxTotal)
1245 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1246 }
1247 else
1248 {
1249 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1250 we will try non-interruptible sleep for a while to help resolve the issue
1251 w/o guru'ing. */
1252 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1253 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1254 && rcBusy == VINF_SUCCESS
1255 && pVCpu != NULL
1256 && cNsElapsed <= cNsMaxTotal)
1257 {
1258 if (!fNonInterruptible)
1259 {
1260 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1261 fNonInterruptible = true;
1262 cMsMaxOne = 32;
1263 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1264 if (cNsLeft > RT_NS_10SEC)
1265 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1266 }
1267 }
1268 else
1269 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1270 }
1271# else /* IN_RING3 */
1272 RT_NOREF(pVM, pVCpu, rcBusy);
1273# endif /* IN_RING3 */
1274 }
1275 /*
1276 * Any other return code is fatal.
1277 */
1278 else
1279 {
1280# ifdef IN_RING3
1281 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1282# endif
1283 AssertMsgFailed(("rc=%Rrc\n", rc));
1284 return RT_FAILURE_NP(rc) ? rc : -rc;
1285 }
1286 }
1287
1288# ifdef IN_RING3
1289 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1290# endif
1291
1292 /*
1293 * Try take exclusive write ownership.
1294 */
1295 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1296 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1297 {
1298 bool fDone;
1299 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1300 if (fDone)
1301 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1302 }
1303 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1304 }
1305}
1306#endif /* IN_RING3 || IN_RING0 */
1307
1308
1309/**
1310 * Worker that enters a read/write critical section with exclusive access.
1311 *
1312 * @returns VBox status code.
1313 * @param pVM The cross context VM structure.
1314 * @param pThis Pointer to the read/write critical section.
1315 * @param rcBusy The busy return code for ring-0 and ring-3.
1316 * @param fTryOnly Only try enter it, don't wait.
1317 * @param pSrcPos The source position. (Can be NULL.)
1318 * @param fNoVal No validation records.
1319 */
1320#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1321static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1322 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1323#else
1324DECLASM(int) pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1325 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
1326DECLASM(int) StkBack_pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1327 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1328#endif
1329{
1330 /*
1331 * Validate input.
1332 */
1333 AssertPtr(pThis);
1334 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1335
1336 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1337#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1338 if (!fTryOnly)
1339 {
1340 hThreadSelf = RTThreadSelfAutoAdopt();
1341 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1342 if (RT_FAILURE(rc9))
1343 return rc9;
1344 }
1345#endif
1346
1347 /*
1348 * Check if we're already the owner and just recursing.
1349 */
1350 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1351 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1352 RTNATIVETHREAD hNativeWriter;
1353 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1354 if (hNativeSelf == hNativeWriter)
1355 {
1356 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1357#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1358 if (!fNoVal)
1359 {
1360 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1361 if (RT_FAILURE(rc9))
1362 return rc9;
1363 }
1364#endif
1365 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1366#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1367 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1368#else
1369 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1370#endif
1371 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1372 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1373 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1374 return VINF_SUCCESS;
1375 }
1376
1377 /*
1378 * First we try grab an idle critical section using 128-bit atomics.
1379 */
1380 /** @todo This could be moved up before the recursion check. */
1381 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1382#ifdef RTASM_HAVE_CMP_WRITE_U128
1383 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1384 && pdmCritSectRwIsCmpWriteU128Supported())
1385 {
1386 RTCRITSECTRWSTATE OldState;
1387 OldState.s.u64State = u64State;
1388 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1389 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1390
1391 RTCRITSECTRWSTATE NewState;
1392 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1393 NewState.s.hNativeWriter = hNativeSelf;
1394
1395 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1396 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1397
1398 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1399 }
1400#endif
1401
1402 /*
1403 * Do it step by step. Update the state to reflect our desire.
1404 */
1405 uint64_t u64OldState = u64State;
1406
1407 for (;;)
1408 {
1409 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1410 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1411 {
1412 /* It flows in the right direction, try follow it before it changes. */
1413 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1414 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1415 c++;
1416 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1417 u64State &= ~RTCSRW_CNT_WR_MASK;
1418 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1419 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1420 break;
1421 }
1422 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1423 {
1424 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1425 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1426 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1427 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1428 break;
1429 }
1430 else if (fTryOnly)
1431 {
1432 /* Wrong direction and we're not supposed to wait, just return. */
1433 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1434 return VERR_SEM_BUSY;
1435 }
1436 else
1437 {
1438 /* Add ourselves to the write count and break out to do the wait. */
1439 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1440 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1441 c++;
1442 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1443 u64State &= ~RTCSRW_CNT_WR_MASK;
1444 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1445 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1446 break;
1447 }
1448
1449 ASMNopPause();
1450
1451 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1452 { /* likely */ }
1453 else
1454 return VERR_SEM_DESTROYED;
1455
1456 ASMNopPause();
1457 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1458 u64OldState = u64State;
1459 }
1460
1461 /*
1462 * If we're in write mode now try grab the ownership. Play fair if there
1463 * are threads already waiting.
1464 */
1465 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1466 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1467 || fTryOnly);
1468 if (fDone)
1469 {
1470 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1471 if (fDone)
1472 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1473 }
1474
1475 /*
1476 * Okay, we have contention and will have to wait unless we're just trying.
1477 */
1478 if (fTryOnly)
1479 {
1480 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1481 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1482 }
1483
1484 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1485
1486 /*
1487 * Ring-3 is pretty straight forward.
1488 */
1489#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1490 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1491#elif defined(IN_RING3)
1492 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1493
1494#elif defined(IN_RING0)
1495 /*
1496 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1497 * account when waiting on contended locks.
1498 */
1499 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1500 if (pVCpu)
1501 {
1502 VMMR0EMTBLOCKCTX Ctx;
1503 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1504 if (rc == VINF_SUCCESS)
1505 {
1506 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1507
1508 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1509
1510 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1511 }
1512 else
1513 {
1514 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1515 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1516 }
1517 return rc;
1518 }
1519
1520 /* Non-EMT. */
1521 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1522 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1523
1524#else
1525# error "Unused."
1526 /*
1527 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1528 */
1529 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1530 if (rcBusy == VINF_SUCCESS)
1531 {
1532 Assert(!fTryOnly);
1533 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1534 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1535 * back to ring-3. Goes for both kind of crit sects. */
1536 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1537 }
1538 return rcBusy;
1539#endif
1540}
1541
1542
1543/**
1544 * Try enter a critical section with exclusive (write) access.
1545 *
1546 * @returns VBox status code.
1547 * @retval VINF_SUCCESS on success.
1548 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1549 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1550 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1551 * during the operation.
1552 *
1553 * @param pVM The cross context VM structure.
1554 * @param pThis Pointer to the read/write critical section.
1555 * @param rcBusy The status code to return when we're in RC or R0 and the
1556 * section is busy. Pass VINF_SUCCESS to acquired the
1557 * critical section thru a ring-3 call if necessary.
1558 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1559 * PDMCritSectRwTryEnterExclDebug,
1560 * PDMCritSectEnterDebug, PDMCritSectEnter,
1561 * RTCritSectRwEnterExcl.
1562 */
1563VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1564{
1565#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1566 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1567#else
1568 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1569 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1570#endif
1571}
1572
1573
1574/**
1575 * Try enter a critical section with exclusive (write) access.
1576 *
1577 * @returns VBox status code.
1578 * @retval VINF_SUCCESS on success.
1579 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1580 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1581 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1582 * during the operation.
1583 *
1584 * @param pVM The cross context VM structure.
1585 * @param pThis Pointer to the read/write critical section.
1586 * @param rcBusy The status code to return when we're in RC or R0 and the
1587 * section is busy. Pass VINF_SUCCESS to acquired the
1588 * critical section thru a ring-3 call if necessary.
1589 * @param uId Where we're entering the section.
1590 * @param SRC_POS The source position.
1591 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1592 * PDMCritSectRwTryEnterExclDebug,
1593 * PDMCritSectEnterDebug, PDMCritSectEnter,
1594 * RTCritSectRwEnterExclDebug.
1595 */
1596VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1597{
1598 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1599#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1600 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1601#else
1602 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1603 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1604#endif
1605}
1606
1607
1608/**
1609 * Try enter a critical section with exclusive (write) access.
1610 *
1611 * @retval VINF_SUCCESS on success.
1612 * @retval VERR_SEM_BUSY if the critsect was owned.
1613 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1614 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1615 * during the operation.
1616 *
1617 * @param pVM The cross context VM structure.
1618 * @param pThis Pointer to the read/write critical section.
1619 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1620 * PDMCritSectRwEnterExclDebug,
1621 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1622 * RTCritSectRwTryEnterExcl.
1623 */
1624VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1625{
1626#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1627 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1628#else
1629 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1630 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1631#endif
1632}
1633
1634
1635/**
1636 * Try enter a critical section with exclusive (write) access.
1637 *
1638 * @retval VINF_SUCCESS on success.
1639 * @retval VERR_SEM_BUSY if the critsect was owned.
1640 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1641 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1642 * during the operation.
1643 *
1644 * @param pVM The cross context VM structure.
1645 * @param pThis Pointer to the read/write critical section.
1646 * @param uId Where we're entering the section.
1647 * @param SRC_POS The source position.
1648 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1649 * PDMCritSectRwEnterExclDebug,
1650 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1651 * RTCritSectRwTryEnterExclDebug.
1652 */
1653VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1654{
1655 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1656#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1657 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1658#else
1659 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1660 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1661#endif
1662}
1663
1664
1665#ifdef IN_RING3
1666/**
1667 * Enters a PDM read/write critical section with exclusive (write) access.
1668 *
1669 * @returns VINF_SUCCESS if entered successfully.
1670 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1671 * during the operation.
1672 *
1673 * @param pVM The cross context VM structure.
1674 * @param pThis Pointer to the read/write critical section.
1675 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1676 */
1677VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1678{
1679 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1680}
1681#endif /* IN_RING3 */
1682
1683
1684/**
1685 * Leave a critical section held exclusively.
1686 *
1687 * @returns VBox status code.
1688 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1689 * during the operation.
1690 * @param pVM The cross context VM structure.
1691 * @param pThis Pointer to the read/write critical section.
1692 * @param fNoVal No validation records (i.e. queued release).
1693 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1694 */
1695#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1696static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1697#else
1698DECLASM(int) pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
1699DECLASM(int) StkBack_pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1700#endif
1701{
1702 /*
1703 * Validate handle.
1704 */
1705 AssertPtr(pThis);
1706 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1707
1708#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1709 NOREF(fNoVal);
1710#endif
1711
1712 /*
1713 * Check ownership.
1714 */
1715 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1716 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1717
1718 RTNATIVETHREAD hNativeWriter;
1719 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1720 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1721
1722
1723 /*
1724 * Unwind one recursion. Not the last?
1725 */
1726 if (pThis->s.Core.cWriteRecursions != 1)
1727 {
1728#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1729 if (fNoVal)
1730 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1731 else
1732 {
1733 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1734 if (RT_FAILURE(rc9))
1735 return rc9;
1736 }
1737#endif
1738#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1739 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1740#else
1741 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1742#endif
1743 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1744 return VINF_SUCCESS;
1745 }
1746
1747
1748 /*
1749 * Final recursion.
1750 */
1751 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1752#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1753 if (fNoVal)
1754 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1755 else
1756 {
1757 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1758 if (RT_FAILURE(rc9))
1759 return rc9;
1760 }
1761#endif
1762
1763
1764#ifdef RTASM_HAVE_CMP_WRITE_U128
1765 /*
1766 * See if we can get out w/o any signalling as this is a common case.
1767 */
1768 if (pdmCritSectRwIsCmpWriteU128Supported())
1769 {
1770 RTCRITSECTRWSTATE OldState;
1771 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1772 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1773 {
1774 OldState.s.hNativeWriter = hNativeSelf;
1775 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1776
1777 RTCRITSECTRWSTATE NewState;
1778 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1779 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1780
1781# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1782 pThis->s.Core.cWriteRecursions = 0;
1783# else
1784 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1785# endif
1786 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1787
1788 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1789 return VINF_SUCCESS;
1790
1791 /* bail out. */
1792 pThis->s.Core.cWriteRecursions = 1;
1793 }
1794 }
1795#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1796
1797
1798#if defined(IN_RING3) || defined(IN_RING0)
1799 /*
1800 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1801 * Ring-0: Try leave for real, depends on host and context.
1802 */
1803# ifdef IN_RING0
1804 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1805 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1806 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1807 || VMMRZCallRing3IsEnabled(pVCpu)
1808 || RTSemEventIsSignalSafe()
1809 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1810 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1811 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1812 )
1813# endif
1814 {
1815# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1816 pThis->s.Core.cWriteRecursions = 0;
1817# else
1818 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1819# endif
1820 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1821 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1822
1823 for (;;)
1824 {
1825 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1826 uint64_t u64OldState = u64State;
1827
1828 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1829 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1830 c--;
1831
1832 if ( c > 0
1833 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1834 {
1835 /*
1836 * Don't change the direction, wake up the next writer if any.
1837 */
1838 u64State &= ~RTCSRW_CNT_WR_MASK;
1839 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1840 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1841 {
1842 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1843 int rc;
1844 if (c == 0)
1845 rc = VINF_SUCCESS;
1846# ifdef IN_RING0
1847 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1848 {
1849 VMMR0EMTBLOCKCTX Ctx;
1850 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1851 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1852
1853 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1854
1855 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1856 }
1857# endif
1858 else
1859 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1860 AssertRC(rc);
1861 return rc;
1862 }
1863 }
1864 else
1865 {
1866 /*
1867 * Reverse the direction and signal the reader threads.
1868 */
1869 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1870 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1871 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1872 {
1873 Assert(!pThis->s.Core.fNeedReset);
1874 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1875 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1876
1877 int rc;
1878# ifdef IN_RING0
1879 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1880 {
1881 VMMR0EMTBLOCKCTX Ctx;
1882 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1883 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1884
1885 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1886
1887 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1888 }
1889 else
1890# endif
1891 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1892 AssertRC(rc);
1893 return rc;
1894 }
1895 }
1896
1897 ASMNopPause();
1898 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1899 { /*likely*/ }
1900 else
1901 return VERR_SEM_DESTROYED;
1902 ASMNopPause();
1903 }
1904 /* not reached! */
1905 }
1906#endif /* IN_RING3 || IN_RING0 */
1907
1908
1909#ifndef IN_RING3
1910 /*
1911 * Queue the requested exit for ring-3 execution.
1912 */
1913# ifndef IN_RING0
1914 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1915# endif
1916 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1917 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1918 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1919 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1920 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1921 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1922 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1923 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1924 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1925 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1926 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1927 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1928 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1929 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1930 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1931 return VINF_SUCCESS;
1932#endif
1933}
1934
1935
1936/**
1937 * Leave a critical section held exclusively.
1938 *
1939 * @returns VBox status code.
1940 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1941 * during the operation.
1942 * @param pVM The cross context VM structure.
1943 * @param pThis Pointer to the read/write critical section.
1944 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1945 */
1946VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1947{
1948 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1949}
1950
1951
1952#if defined(IN_RING3) || defined(IN_RING0)
1953/**
1954 * PDMCritSectBothFF interface.
1955 *
1956 * @param pVM The cross context VM structure.
1957 * @param pThis Pointer to the read/write critical section.
1958 */
1959void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1960{
1961 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1962}
1963#endif
1964
1965
1966/**
1967 * Checks the caller is the exclusive (write) owner of the critical section.
1968 *
1969 * @retval true if owner.
1970 * @retval false if not owner.
1971 * @param pVM The cross context VM structure.
1972 * @param pThis Pointer to the read/write critical section.
1973 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1974 * RTCritSectRwIsWriteOwner.
1975 */
1976VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1977{
1978 /*
1979 * Validate handle.
1980 */
1981 AssertPtr(pThis);
1982 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1983
1984 /*
1985 * Check ownership.
1986 */
1987 RTNATIVETHREAD hNativeWriter;
1988 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1989 if (hNativeWriter == NIL_RTNATIVETHREAD)
1990 return false;
1991 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1992}
1993
1994
1995/**
1996 * Checks if the caller is one of the read owners of the critical section.
1997 *
1998 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1999 * enabled. Meaning, the answer is not trustworhty unless
2000 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
2001 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
2002 * creating the semaphore. And finally, if you used a locking class,
2003 * don't disable deadlock detection by setting cMsMinDeadlock to
2004 * RT_INDEFINITE_WAIT.
2005 *
2006 * In short, only use this for assertions.
2007 *
2008 * @returns @c true if reader, @c false if not.
2009 * @param pVM The cross context VM structure.
2010 * @param pThis Pointer to the read/write critical section.
2011 * @param fWannaHear What you'd like to hear when lock validation is not
2012 * available. (For avoiding asserting all over the place.)
2013 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
2014 */
2015VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
2016{
2017 /*
2018 * Validate handle.
2019 */
2020 AssertPtr(pThis);
2021 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
2022
2023 /*
2024 * Inspect the state.
2025 */
2026 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2027 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2028 {
2029 /*
2030 * It's in write mode, so we can only be a reader if we're also the
2031 * current writer.
2032 */
2033 RTNATIVETHREAD hWriter;
2034 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2035 if (hWriter == NIL_RTNATIVETHREAD)
2036 return false;
2037 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2038 }
2039
2040 /*
2041 * Read mode. If there are no current readers, then we cannot be a reader.
2042 */
2043 if (!(u64State & RTCSRW_CNT_RD_MASK))
2044 return false;
2045
2046#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2047 /*
2048 * Ask the lock validator.
2049 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2050 */
2051 NOREF(fWannaHear);
2052 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2053#else
2054 /*
2055 * Ok, we don't know, just tell the caller what he want to hear.
2056 */
2057 return fWannaHear;
2058#endif
2059}
2060
2061
2062/**
2063 * Gets the write recursion count.
2064 *
2065 * @returns The write recursion count (0 if bad critsect).
2066 * @param pThis Pointer to the read/write critical section.
2067 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2068 * RTCritSectRwGetWriteRecursion.
2069 */
2070VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2071{
2072 /*
2073 * Validate handle.
2074 */
2075 AssertPtr(pThis);
2076 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2077
2078 /*
2079 * Return the requested data.
2080 */
2081 return pThis->s.Core.cWriteRecursions;
2082}
2083
2084
2085/**
2086 * Gets the read recursion count of the current writer.
2087 *
2088 * @returns The read recursion count (0 if bad critsect).
2089 * @param pThis Pointer to the read/write critical section.
2090 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2091 * RTCritSectRwGetWriterReadRecursion.
2092 */
2093VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2094{
2095 /*
2096 * Validate handle.
2097 */
2098 AssertPtr(pThis);
2099 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2100
2101 /*
2102 * Return the requested data.
2103 */
2104 return pThis->s.Core.cWriterReads;
2105}
2106
2107
2108/**
2109 * Gets the current number of reads.
2110 *
2111 * This includes all read recursions, so it might be higher than the number of
2112 * read owners. It does not include reads done by the current writer.
2113 *
2114 * @returns The read count (0 if bad critsect).
2115 * @param pThis Pointer to the read/write critical section.
2116 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2117 * RTCritSectRwGetReadCount.
2118 */
2119VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2120{
2121 /*
2122 * Validate input.
2123 */
2124 AssertPtr(pThis);
2125 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2126
2127 /*
2128 * Return the requested data.
2129 */
2130 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2131 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2132 return 0;
2133 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2134}
2135
2136
2137/**
2138 * Checks if the read/write critical section is initialized or not.
2139 *
2140 * @retval true if initialized.
2141 * @retval false if not initialized.
2142 * @param pThis Pointer to the read/write critical section.
2143 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2144 */
2145VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2146{
2147 AssertPtr(pThis);
2148 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2149}
2150
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette