VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 49999

Last change on this file since 49999 was 49999, checked in by vboxsync, 11 years ago

pdmR3R0CritSectEnterContended: Ok, that didn't work, so next attempt.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.3 KB
Line 
1/* $Id: PDMAllCritSect.cpp 49999 2013-12-24 19:23:21Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# else
102 NOREF(pSrcPos);
103# endif
104
105 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
106 return VINF_SUCCESS;
107}
108
109
110#if defined(IN_RING3) || defined(IN_RING0)
111/**
112 * Deals with the contended case in ring-3 and ring-0.
113 *
114 * @retval VINF_SUCCESS on success.
115 * @retval VERR_SEM_DESTROYED if destroyed.
116 * @retval VERR_INTERRUPTED in ring-0 if we should return rcBusy or jump to
117 * ring-3.
118 *
119 * @param pCritSect The critsect.
120 * @param hNativeSelf The native thread handle.
121 */
122static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
123{
124 /*
125 * Start waiting.
126 */
127 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
128 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
129# ifdef IN_RING3
130 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
131# else
132 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
133# endif
134
135 /*
136 * The wait loop.
137 */
138 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
139 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
140# ifdef IN_RING3
141# ifdef PDMCRITSECT_STRICT
142 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
143 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
144 if (RT_FAILURE(rc2))
145 return rc2;
146# else
147 RTTHREAD hThreadSelf = RTThreadSelf();
148# endif
149# endif
150 for (;;)
151 {
152 /*
153 * Do the wait.
154 *
155 * In ring-3 this gets cluttered by lock validation and thread state
156 * maintainence.
157 *
158 * In ring-0 we have to deal with the possibility that the thread has
159 * been signalled and the interruptible wait function returning
160 * immediately. In that case we do normal R0/RC rcBusy handling.
161 */
162# ifdef IN_RING3
163# ifdef PDMCRITSECT_STRICT
164 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
165 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
166 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
167 if (RT_FAILURE(rc9))
168 return rc9;
169# else
170 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
171# endif
172 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
173 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
174# else /* IN_RING0 */
175 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
176# endif /* IN_RING0 */
177
178 /*
179 * Deal with the return code and critsect destruction.
180 */
181 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
182 return VERR_SEM_DESTROYED;
183 if (rc == VINF_SUCCESS)
184 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
185 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
186# ifdef IN_RING0
187 return VERR_INTERRUPTED;
188# endif
189 }
190 /* won't get here */
191}
192#endif /* IN_RING3 || IN_RING0 */
193
194
195/**
196 * Common worker for the debug and normal APIs.
197 *
198 * @returns VINF_SUCCESS if entered successfully.
199 * @returns rcBusy when encountering a busy critical section in GC/R0.
200 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
201 * during the operation.
202 *
203 * @param pCritSect The PDM critical section to enter.
204 * @param rcBusy The status code to return when we're in GC or R0
205 * and the section is busy.
206 */
207DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
208{
209 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
210 Assert(pCritSect->s.Core.cNestings >= 0);
211
212 /*
213 * If the critical section has already been destroyed, then inform the caller.
214 */
215 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
216 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
217 VERR_SEM_DESTROYED);
218
219 /*
220 * See if we're lucky.
221 */
222 /* NOP ... */
223 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
224 return VINF_SUCCESS;
225
226 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
227 /* ... not owned ... */
228 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
229 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
230
231 /* ... or nested. */
232 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
233 {
234 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
235 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
236 Assert(pCritSect->s.Core.cNestings > 1);
237 return VINF_SUCCESS;
238 }
239
240 /*
241 * Spin for a bit without incrementing the counter.
242 */
243 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
244 * cpu systems. */
245 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
246 while (cSpinsLeft-- > 0)
247 {
248 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
249 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
250 ASMNopPause();
251 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
252 cli'ed pendingpreemption check up front using sti w/ instruction fusing
253 for avoiding races. Hmm ... This is assuming the other party is actually
254 executing code on another CPU ... which we could keep track of if we
255 wanted. */
256 }
257
258#ifdef IN_RING3
259 /*
260 * Take the slow path.
261 */
262 NOREF(rcBusy);
263 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
264
265#else
266# ifdef IN_RING0
267 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
268 * and would be better off switching out of that while waiting for
269 * the lock. Several of the locks jumps back to ring-3 just to
270 * get the lock, the ring-3 code will then call the kernel to do
271 * the lock wait and when the call return it will call ring-0
272 * again and resume via in setjmp style. Not very efficient. */
273# if 0
274 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
275 * callers not prepared for longjmp/blocking to
276 * use PDMCritSectTryEnter. */
277 {
278 /*
279 * Leave HM context while waiting if necessary.
280 */
281 int rc;
282 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
283 {
284 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
285 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
286 }
287 else
288 {
289 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
290 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
291 PVMCPU pVCpu = VMMGetCpu(pVM);
292 HMR0Leave(pVM, pVCpu);
293 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
294
295 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
296
297 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
298 HMR0Enter(pVM, pVCpu);
299 }
300 return rc;
301 }
302# else
303 /*
304 * We preemption hasn't been disabled, we can block here in ring-0.
305 */
306 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
307 && ASMIntAreEnabled())
308 {
309 int rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
310 if (rc != VERR_INTERRUPTED)
311 return rc;
312 }
313# endif
314#endif /* IN_RING0 */
315
316 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
317
318 /*
319 * Call ring-3 to acquire the critical section?
320 */
321 if (rcBusy == VINF_SUCCESS)
322 {
323 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
324 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
325 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
326 }
327
328 /*
329 * Return busy.
330 */
331 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
332 return rcBusy;
333#endif /* !IN_RING3 */
334}
335
336
337/**
338 * Enters a PDM critical section.
339 *
340 * @returns VINF_SUCCESS if entered successfully.
341 * @returns rcBusy when encountering a busy critical section in RC/R0.
342 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
343 * during the operation.
344 *
345 * @param pCritSect The PDM critical section to enter.
346 * @param rcBusy The status code to return when we're in RC or R0
347 * and the section is busy. Pass VINF_SUCCESS to
348 * acquired the critical section thru a ring-3
349 * call if necessary.
350 */
351VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
352{
353#ifndef PDMCRITSECT_STRICT
354 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
355#else
356 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
357 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
358#endif
359}
360
361
362/**
363 * Enters a PDM critical section, with location information for debugging.
364 *
365 * @returns VINF_SUCCESS if entered successfully.
366 * @returns rcBusy when encountering a busy critical section in RC/R0.
367 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
368 * during the operation.
369 *
370 * @param pCritSect The PDM critical section to enter.
371 * @param rcBusy The status code to return when we're in RC or R0
372 * and the section is busy. Pass VINF_SUCCESS to
373 * acquired the critical section thru a ring-3
374 * call if necessary.
375 * @param uId Some kind of locking location ID. Typically a
376 * return address up the stack. Optional (0).
377 * @param pszFile The file where the lock is being acquired from.
378 * Optional.
379 * @param iLine The line number in that file. Optional (0).
380 * @param pszFunction The function where the lock is being acquired
381 * from. Optional.
382 */
383VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
384{
385#ifdef PDMCRITSECT_STRICT
386 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
387 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
388#else
389 NOREF(uId); RT_SRC_POS_NOREF();
390 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
391#endif
392}
393
394
395/**
396 * Common worker for the debug and normal APIs.
397 *
398 * @retval VINF_SUCCESS on success.
399 * @retval VERR_SEM_BUSY if the critsect was owned.
400 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
401 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
402 * during the operation.
403 *
404 * @param pCritSect The critical section.
405 */
406static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
407{
408 /*
409 * If the critical section has already been destroyed, then inform the caller.
410 */
411 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
412 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
413 VERR_SEM_DESTROYED);
414
415 /*
416 * See if we're lucky.
417 */
418 /* NOP ... */
419 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
420 return VINF_SUCCESS;
421
422 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
423 /* ... not owned ... */
424 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
425 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
426
427 /* ... or nested. */
428 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
429 {
430 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
431 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
432 Assert(pCritSect->s.Core.cNestings > 1);
433 return VINF_SUCCESS;
434 }
435
436 /* no spinning */
437
438 /*
439 * Return busy.
440 */
441#ifdef IN_RING3
442 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
443#else
444 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
445#endif
446 LogFlow(("PDMCritSectTryEnter: locked\n"));
447 return VERR_SEM_BUSY;
448}
449
450
451/**
452 * Try enter a critical section.
453 *
454 * @retval VINF_SUCCESS on success.
455 * @retval VERR_SEM_BUSY if the critsect was owned.
456 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
457 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
458 * during the operation.
459 *
460 * @param pCritSect The critical section.
461 */
462VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
463{
464#ifndef PDMCRITSECT_STRICT
465 return pdmCritSectTryEnter(pCritSect, NULL);
466#else
467 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
468 return pdmCritSectTryEnter(pCritSect, &SrcPos);
469#endif
470}
471
472
473/**
474 * Try enter a critical section, with location information for debugging.
475 *
476 * @retval VINF_SUCCESS on success.
477 * @retval VERR_SEM_BUSY if the critsect was owned.
478 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
479 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
480 * during the operation.
481 *
482 * @param pCritSect The critical section.
483 * @param uId Some kind of locking location ID. Typically a
484 * return address up the stack. Optional (0).
485 * @param pszFile The file where the lock is being acquired from.
486 * Optional.
487 * @param iLine The line number in that file. Optional (0).
488 * @param pszFunction The function where the lock is being acquired
489 * from. Optional.
490 */
491VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
492{
493#ifdef PDMCRITSECT_STRICT
494 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
495 return pdmCritSectTryEnter(pCritSect, &SrcPos);
496#else
497 NOREF(uId); RT_SRC_POS_NOREF();
498 return pdmCritSectTryEnter(pCritSect, NULL);
499#endif
500}
501
502
503#ifdef IN_RING3
504/**
505 * Enters a PDM critical section.
506 *
507 * @returns VINF_SUCCESS if entered successfully.
508 * @returns rcBusy when encountering a busy critical section in GC/R0.
509 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
510 * during the operation.
511 *
512 * @param pCritSect The PDM critical section to enter.
513 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
514 */
515VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
516{
517 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
518 if ( rc == VINF_SUCCESS
519 && fCallRing3
520 && pCritSect->s.Core.pValidatorRec
521 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
522 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
523 return rc;
524}
525#endif /* IN_RING3 */
526
527
528/**
529 * Leaves a critical section entered with PDMCritSectEnter().
530 *
531 * @returns Indication whether we really exited the critical section.
532 * @retval VINF_SUCCESS if we really exited.
533 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
534 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
535 *
536 * @param pCritSect The PDM critical section to leave.
537 */
538VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
539{
540 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
541 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
542
543 /* Check for NOP sections before asserting ownership. */
544 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
545 return VINF_SUCCESS;
546
547 /*
548 * Always check that the caller is the owner (screw performance).
549 */
550 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
551 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
552 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
553 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
554 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
555 VERR_NOT_OWNER);
556 Assert(pCritSect->s.Core.cNestings >= 1);
557
558 /*
559 * Nested leave.
560 */
561 if (pCritSect->s.Core.cNestings > 1)
562 {
563 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
564 Assert(pCritSect->s.Core.cNestings >= 1);
565 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
566 Assert(pCritSect->s.Core.cLockers >= 0);
567 return VINF_SEM_NESTED;
568 }
569
570#ifdef IN_RING0
571# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
572 if (1) /* SUPSemEventSignal is safe */
573# else
574 if (ASMIntAreEnabled())
575# endif
576#endif
577#if defined(IN_RING3) || defined(IN_RING0)
578 {
579 /*
580 * Leave for real.
581 */
582 /* update members. */
583# ifdef IN_RING3
584 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
585 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
586# if defined(PDMCRITSECT_STRICT)
587 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
588 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
589# endif
590 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
591# endif
592 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
593 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
594 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
595 Assert(pCritSect->s.Core.cNestings == 0);
596
597 /* stop and decrement lockers. */
598 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
599 ASMCompilerBarrier();
600 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
601 {
602 /* Someone is waiting, wake up one of them. */
603 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
604 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
605 int rc = SUPSemEventSignal(pSession, hEvent);
606 AssertRC(rc);
607 }
608
609# ifdef IN_RING3
610 /* Signal exit event. */
611 if (hEventToSignal != NIL_RTSEMEVENT)
612 {
613 LogBird(("Signalling %#x\n", hEventToSignal));
614 int rc = RTSemEventSignal(hEventToSignal);
615 AssertRC(rc);
616 }
617# endif
618
619# if defined(DEBUG_bird) && defined(IN_RING0)
620 VMMTrashVolatileXMMRegs();
621# endif
622 }
623#endif /* IN_RING3 || IN_RING0 */
624#ifdef IN_RING0
625 else
626#endif
627#if defined(IN_RING0) || defined(IN_RC)
628 {
629 /*
630 * Try leave it.
631 */
632 if (pCritSect->s.Core.cLockers == 0)
633 {
634 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
635 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
636 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
637 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
638
639 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
640 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
641 return VINF_SUCCESS;
642
643 /* darn, someone raced in on us. */
644 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
645 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
646 Assert(pCritSect->s.Core.cNestings == 0);
647 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
648 }
649 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
650
651 /*
652 * Queue the request.
653 */
654 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
655 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
656 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
657 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
658 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
659 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
660 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
661 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
662 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
663 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
664 }
665#endif /* IN_RING0 || IN_RC */
666
667 return VINF_SUCCESS;
668}
669
670
671/**
672 * Checks the caller is the owner of the critical section.
673 *
674 * @returns true if owner.
675 * @returns false if not owner.
676 * @param pCritSect The critical section.
677 */
678VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
679{
680#ifdef IN_RING3
681 return RTCritSectIsOwner(&pCritSect->s.Core);
682#else
683 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
684 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
685 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
686 return false;
687 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
688 || pCritSect->s.Core.cNestings > 1;
689#endif
690}
691
692
693/**
694 * Checks the specified VCPU is the owner of the critical section.
695 *
696 * @returns true if owner.
697 * @returns false if not owner.
698 * @param pCritSect The critical section.
699 * @param pVCpu Pointer to the VMCPU.
700 */
701VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
702{
703#ifdef IN_RING3
704 NOREF(pVCpu);
705 return RTCritSectIsOwner(&pCritSect->s.Core);
706#else
707 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
708 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
709 return false;
710 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
711 || pCritSect->s.Core.cNestings > 1;
712#endif
713}
714
715
716/**
717 * Checks if anyone is waiting on the critical section we own.
718 *
719 * @returns true if someone is waiting.
720 * @returns false if no one is waiting.
721 * @param pCritSect The critical section.
722 */
723VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
724{
725 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
726 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
727 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
728}
729
730
731/**
732 * Checks if a critical section is initialized or not.
733 *
734 * @returns true if initialized.
735 * @returns false if not initialized.
736 * @param pCritSect The critical section.
737 */
738VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
739{
740 return RTCritSectIsInitialized(&pCritSect->s.Core);
741}
742
743
744/**
745 * Gets the recursion depth.
746 *
747 * @returns The recursion depth.
748 * @param pCritSect The critical section.
749 */
750VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
751{
752 return RTCritSectGetRecursion(&pCritSect->s.Core);
753}
754
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette