VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 55889

Last change on this file since 55889 was 50000, checked in by vboxsync, 11 years ago

pdmR3R0CritSectEnterContended: That worked but wasn't entirely correct, so next attempt.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.7 KB
Line 
1/* $Id: PDMAllCritSect.cpp 50000 2013-12-24 21:03:10Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# else
102 NOREF(pSrcPos);
103# endif
104
105 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
106 return VINF_SUCCESS;
107}
108
109
110#if defined(IN_RING3) || defined(IN_RING0)
111/**
112 * Deals with the contended case in ring-3 and ring-0.
113 *
114 * @retval VINF_SUCCESS on success.
115 * @retval VERR_SEM_DESTROYED if destroyed.
116 *
117 * @param pCritSect The critsect.
118 * @param hNativeSelf The native thread handle.
119 */
120static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
121{
122 /*
123 * Start waiting.
124 */
125 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
126 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
127# ifdef IN_RING3
128 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
129# else
130 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
131# endif
132
133 /*
134 * The wait loop.
135 */
136 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
137 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
138# ifdef IN_RING3
139# ifdef PDMCRITSECT_STRICT
140 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
141 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
142 if (RT_FAILURE(rc2))
143 return rc2;
144# else
145 RTTHREAD hThreadSelf = RTThreadSelf();
146# endif
147# endif
148 for (;;)
149 {
150 /*
151 * Do the wait.
152 *
153 * In ring-3 this gets cluttered by lock validation and thread state
154 * maintainence.
155 *
156 * In ring-0 we have to deal with the possibility that the thread has
157 * been signalled and the interruptible wait function returning
158 * immediately. In that case we do normal R0/RC rcBusy handling.
159 */
160# ifdef IN_RING3
161# ifdef PDMCRITSECT_STRICT
162 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
163 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
164 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
165 if (RT_FAILURE(rc9))
166 return rc9;
167# else
168 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
169# endif
170 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
171 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
172# else /* IN_RING0 */
173 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
174# endif /* IN_RING0 */
175
176 /*
177 * Deal with the return code and critsect destruction.
178 */
179 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
180 return VERR_SEM_DESTROYED;
181 if (rc == VINF_SUCCESS)
182 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
183 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
184
185# ifdef IN_RING0
186 /* Something is pending (signal, APC, debugger, whatever), just go back
187 to ring-3 so the kernel can deal with it when leaving kernel context.
188
189 Note! We've incremented cLockers already and cannot safely decrement
190 it without creating a race with PDMCritSectLeave, resulting in
191 spurious wakeups. */
192 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
193 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
194 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
195 AssertRC(rc);
196# endif
197 }
198 /* won't get here */
199}
200#endif /* IN_RING3 || IN_RING0 */
201
202
203/**
204 * Common worker for the debug and normal APIs.
205 *
206 * @returns VINF_SUCCESS if entered successfully.
207 * @returns rcBusy when encountering a busy critical section in GC/R0.
208 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
209 * during the operation.
210 *
211 * @param pCritSect The PDM critical section to enter.
212 * @param rcBusy The status code to return when we're in GC or R0
213 * and the section is busy.
214 */
215DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
216{
217 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
218 Assert(pCritSect->s.Core.cNestings >= 0);
219
220 /*
221 * If the critical section has already been destroyed, then inform the caller.
222 */
223 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
224 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
225 VERR_SEM_DESTROYED);
226
227 /*
228 * See if we're lucky.
229 */
230 /* NOP ... */
231 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
232 return VINF_SUCCESS;
233
234 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
235 /* ... not owned ... */
236 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
237 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
238
239 /* ... or nested. */
240 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
241 {
242 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
243 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
244 Assert(pCritSect->s.Core.cNestings > 1);
245 return VINF_SUCCESS;
246 }
247
248 /*
249 * Spin for a bit without incrementing the counter.
250 */
251 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
252 * cpu systems. */
253 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
254 while (cSpinsLeft-- > 0)
255 {
256 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
257 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
258 ASMNopPause();
259 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
260 cli'ed pendingpreemption check up front using sti w/ instruction fusing
261 for avoiding races. Hmm ... This is assuming the other party is actually
262 executing code on another CPU ... which we could keep track of if we
263 wanted. */
264 }
265
266#ifdef IN_RING3
267 /*
268 * Take the slow path.
269 */
270 NOREF(rcBusy);
271 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
272
273#else
274# ifdef IN_RING0
275 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
276 * and would be better off switching out of that while waiting for
277 * the lock. Several of the locks jumps back to ring-3 just to
278 * get the lock, the ring-3 code will then call the kernel to do
279 * the lock wait and when the call return it will call ring-0
280 * again and resume via in setjmp style. Not very efficient. */
281# if 0
282 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
283 * callers not prepared for longjmp/blocking to
284 * use PDMCritSectTryEnter. */
285 {
286 /*
287 * Leave HM context while waiting if necessary.
288 */
289 int rc;
290 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
291 {
292 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
293 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
294 }
295 else
296 {
297 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
298 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
299 PVMCPU pVCpu = VMMGetCpu(pVM);
300 HMR0Leave(pVM, pVCpu);
301 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
302
303 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
304
305 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
306 HMR0Enter(pVM, pVCpu);
307 }
308 return rc;
309 }
310# else
311 /*
312 * We preemption hasn't been disabled, we can block here in ring-0.
313 */
314 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
315 && ASMIntAreEnabled())
316 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
317# endif
318#endif /* IN_RING0 */
319
320 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
321
322 /*
323 * Call ring-3 to acquire the critical section?
324 */
325 if (rcBusy == VINF_SUCCESS)
326 {
327 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
328 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
329 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
330 }
331
332 /*
333 * Return busy.
334 */
335 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
336 return rcBusy;
337#endif /* !IN_RING3 */
338}
339
340
341/**
342 * Enters a PDM critical section.
343 *
344 * @returns VINF_SUCCESS if entered successfully.
345 * @returns rcBusy when encountering a busy critical section in RC/R0.
346 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
347 * during the operation.
348 *
349 * @param pCritSect The PDM critical section to enter.
350 * @param rcBusy The status code to return when we're in RC or R0
351 * and the section is busy. Pass VINF_SUCCESS to
352 * acquired the critical section thru a ring-3
353 * call if necessary.
354 */
355VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
356{
357#ifndef PDMCRITSECT_STRICT
358 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
359#else
360 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
361 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
362#endif
363}
364
365
366/**
367 * Enters a PDM critical section, with location information for debugging.
368 *
369 * @returns VINF_SUCCESS if entered successfully.
370 * @returns rcBusy when encountering a busy critical section in RC/R0.
371 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
372 * during the operation.
373 *
374 * @param pCritSect The PDM critical section to enter.
375 * @param rcBusy The status code to return when we're in RC or R0
376 * and the section is busy. Pass VINF_SUCCESS to
377 * acquired the critical section thru a ring-3
378 * call if necessary.
379 * @param uId Some kind of locking location ID. Typically a
380 * return address up the stack. Optional (0).
381 * @param pszFile The file where the lock is being acquired from.
382 * Optional.
383 * @param iLine The line number in that file. Optional (0).
384 * @param pszFunction The function where the lock is being acquired
385 * from. Optional.
386 */
387VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
388{
389#ifdef PDMCRITSECT_STRICT
390 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
391 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
392#else
393 NOREF(uId); RT_SRC_POS_NOREF();
394 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
395#endif
396}
397
398
399/**
400 * Common worker for the debug and normal APIs.
401 *
402 * @retval VINF_SUCCESS on success.
403 * @retval VERR_SEM_BUSY if the critsect was owned.
404 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
405 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
406 * during the operation.
407 *
408 * @param pCritSect The critical section.
409 */
410static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
411{
412 /*
413 * If the critical section has already been destroyed, then inform the caller.
414 */
415 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
416 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
417 VERR_SEM_DESTROYED);
418
419 /*
420 * See if we're lucky.
421 */
422 /* NOP ... */
423 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
424 return VINF_SUCCESS;
425
426 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
427 /* ... not owned ... */
428 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
429 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
430
431 /* ... or nested. */
432 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
433 {
434 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
435 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
436 Assert(pCritSect->s.Core.cNestings > 1);
437 return VINF_SUCCESS;
438 }
439
440 /* no spinning */
441
442 /*
443 * Return busy.
444 */
445#ifdef IN_RING3
446 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
447#else
448 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
449#endif
450 LogFlow(("PDMCritSectTryEnter: locked\n"));
451 return VERR_SEM_BUSY;
452}
453
454
455/**
456 * Try enter a critical section.
457 *
458 * @retval VINF_SUCCESS on success.
459 * @retval VERR_SEM_BUSY if the critsect was owned.
460 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
461 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
462 * during the operation.
463 *
464 * @param pCritSect The critical section.
465 */
466VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
467{
468#ifndef PDMCRITSECT_STRICT
469 return pdmCritSectTryEnter(pCritSect, NULL);
470#else
471 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
472 return pdmCritSectTryEnter(pCritSect, &SrcPos);
473#endif
474}
475
476
477/**
478 * Try enter a critical section, with location information for debugging.
479 *
480 * @retval VINF_SUCCESS on success.
481 * @retval VERR_SEM_BUSY if the critsect was owned.
482 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
483 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
484 * during the operation.
485 *
486 * @param pCritSect The critical section.
487 * @param uId Some kind of locking location ID. Typically a
488 * return address up the stack. Optional (0).
489 * @param pszFile The file where the lock is being acquired from.
490 * Optional.
491 * @param iLine The line number in that file. Optional (0).
492 * @param pszFunction The function where the lock is being acquired
493 * from. Optional.
494 */
495VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
496{
497#ifdef PDMCRITSECT_STRICT
498 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
499 return pdmCritSectTryEnter(pCritSect, &SrcPos);
500#else
501 NOREF(uId); RT_SRC_POS_NOREF();
502 return pdmCritSectTryEnter(pCritSect, NULL);
503#endif
504}
505
506
507#ifdef IN_RING3
508/**
509 * Enters a PDM critical section.
510 *
511 * @returns VINF_SUCCESS if entered successfully.
512 * @returns rcBusy when encountering a busy critical section in GC/R0.
513 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
514 * during the operation.
515 *
516 * @param pCritSect The PDM critical section to enter.
517 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
518 */
519VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
520{
521 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
522 if ( rc == VINF_SUCCESS
523 && fCallRing3
524 && pCritSect->s.Core.pValidatorRec
525 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
526 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
527 return rc;
528}
529#endif /* IN_RING3 */
530
531
532/**
533 * Leaves a critical section entered with PDMCritSectEnter().
534 *
535 * @returns Indication whether we really exited the critical section.
536 * @retval VINF_SUCCESS if we really exited.
537 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
538 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
539 *
540 * @param pCritSect The PDM critical section to leave.
541 */
542VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
543{
544 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
545 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
546
547 /* Check for NOP sections before asserting ownership. */
548 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
549 return VINF_SUCCESS;
550
551 /*
552 * Always check that the caller is the owner (screw performance).
553 */
554 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
555 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
556 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
557 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
558 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
559 VERR_NOT_OWNER);
560 Assert(pCritSect->s.Core.cNestings >= 1);
561
562 /*
563 * Nested leave.
564 */
565 if (pCritSect->s.Core.cNestings > 1)
566 {
567 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
568 Assert(pCritSect->s.Core.cNestings >= 1);
569 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
570 Assert(pCritSect->s.Core.cLockers >= 0);
571 return VINF_SEM_NESTED;
572 }
573
574#ifdef IN_RING0
575# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
576 if (1) /* SUPSemEventSignal is safe */
577# else
578 if (ASMIntAreEnabled())
579# endif
580#endif
581#if defined(IN_RING3) || defined(IN_RING0)
582 {
583 /*
584 * Leave for real.
585 */
586 /* update members. */
587# ifdef IN_RING3
588 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
589 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
590# if defined(PDMCRITSECT_STRICT)
591 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
592 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
593# endif
594 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
595# endif
596 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
597 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
598 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
599 Assert(pCritSect->s.Core.cNestings == 0);
600
601 /* stop and decrement lockers. */
602 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
603 ASMCompilerBarrier();
604 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
605 {
606 /* Someone is waiting, wake up one of them. */
607 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
608 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
609 int rc = SUPSemEventSignal(pSession, hEvent);
610 AssertRC(rc);
611 }
612
613# ifdef IN_RING3
614 /* Signal exit event. */
615 if (hEventToSignal != NIL_RTSEMEVENT)
616 {
617 LogBird(("Signalling %#x\n", hEventToSignal));
618 int rc = RTSemEventSignal(hEventToSignal);
619 AssertRC(rc);
620 }
621# endif
622
623# if defined(DEBUG_bird) && defined(IN_RING0)
624 VMMTrashVolatileXMMRegs();
625# endif
626 }
627#endif /* IN_RING3 || IN_RING0 */
628#ifdef IN_RING0
629 else
630#endif
631#if defined(IN_RING0) || defined(IN_RC)
632 {
633 /*
634 * Try leave it.
635 */
636 if (pCritSect->s.Core.cLockers == 0)
637 {
638 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
639 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
640 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
641 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
642
643 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
644 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
645 return VINF_SUCCESS;
646
647 /* darn, someone raced in on us. */
648 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
649 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
650 Assert(pCritSect->s.Core.cNestings == 0);
651 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
652 }
653 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
654
655 /*
656 * Queue the request.
657 */
658 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
659 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
660 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
661 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
662 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
663 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
664 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
665 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
666 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
667 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
668 }
669#endif /* IN_RING0 || IN_RC */
670
671 return VINF_SUCCESS;
672}
673
674
675/**
676 * Checks the caller is the owner of the critical section.
677 *
678 * @returns true if owner.
679 * @returns false if not owner.
680 * @param pCritSect The critical section.
681 */
682VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
683{
684#ifdef IN_RING3
685 return RTCritSectIsOwner(&pCritSect->s.Core);
686#else
687 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
688 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
689 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
690 return false;
691 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
692 || pCritSect->s.Core.cNestings > 1;
693#endif
694}
695
696
697/**
698 * Checks the specified VCPU is the owner of the critical section.
699 *
700 * @returns true if owner.
701 * @returns false if not owner.
702 * @param pCritSect The critical section.
703 * @param pVCpu Pointer to the VMCPU.
704 */
705VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
706{
707#ifdef IN_RING3
708 NOREF(pVCpu);
709 return RTCritSectIsOwner(&pCritSect->s.Core);
710#else
711 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
712 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
713 return false;
714 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
715 || pCritSect->s.Core.cNestings > 1;
716#endif
717}
718
719
720/**
721 * Checks if anyone is waiting on the critical section we own.
722 *
723 * @returns true if someone is waiting.
724 * @returns false if no one is waiting.
725 * @param pCritSect The critical section.
726 */
727VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
728{
729 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
730 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
731 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
732}
733
734
735/**
736 * Checks if a critical section is initialized or not.
737 *
738 * @returns true if initialized.
739 * @returns false if not initialized.
740 * @param pCritSect The critical section.
741 */
742VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
743{
744 return RTCritSectIsInitialized(&pCritSect->s.Core);
745}
746
747
748/**
749 * Gets the recursion depth.
750 *
751 * @returns The recursion depth.
752 * @param pCritSect The critical section.
753 */
754VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
755{
756 return RTCritSectGetRecursion(&pCritSect->s.Core);
757}
758
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette