VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 62145

Last change on this file since 62145 was 62145, checked in by vboxsync, 9 years ago

PDMCritSectAll.cpp: Prepped a few optimizations.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.1 KB
Line 
1/* $Id: PDMAllCritSect.cpp 62145 2016-07-08 16:35:35Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/** Skips some of the overly paranoid atomic updates.
56 * Makes some assumptions about cache coherence, though not brave enough not to
57 * always end with an atomic update. */
58//#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
59
60/* Undefine the automatic VBOX_STRICT API mappings. */
61#undef PDMCritSectEnter
62#undef PDMCritSectTryEnter
63
64
65/**
66 * Gets the ring-3 native thread handle of the calling thread.
67 *
68 * @returns native thread handle (ring-3).
69 * @param pCritSect The critical section. This is used in R0 and RC.
70 */
71DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
72{
73#ifdef IN_RING3
74 NOREF(pCritSect);
75 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
76#else
77 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
78 NIL_RTNATIVETHREAD);
79 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
80 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
81 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
82#endif
83 return hNativeSelf;
84}
85
86
87/**
88 * Tail code called when we've won the battle for the lock.
89 *
90 * @returns VINF_SUCCESS.
91 *
92 * @param pCritSect The critical section.
93 * @param hNativeSelf The native handle of this thread.
94 * @param pSrcPos The source position of the lock operation.
95 */
96DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
97{
98 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
99 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
100
101# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
102 pCritSect->s.Core.cNestings = 1;
103# else
104 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
105# endif
106 Assert(pCritSect->s.Core.cNestings == 1);
107 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
108
109# ifdef PDMCRITSECT_STRICT
110 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
111# else
112 NOREF(pSrcPos);
113# endif
114
115 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
116 return VINF_SUCCESS;
117}
118
119
120#if defined(IN_RING3) || defined(IN_RING0)
121/**
122 * Deals with the contended case in ring-3 and ring-0.
123 *
124 * @retval VINF_SUCCESS on success.
125 * @retval VERR_SEM_DESTROYED if destroyed.
126 *
127 * @param pCritSect The critsect.
128 * @param hNativeSelf The native thread handle.
129 * @param pSrcPos The source position of the lock operation.
130 */
131static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
132{
133 /*
134 * Start waiting.
135 */
136 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
137 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
138# ifdef IN_RING3
139 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
140# else
141 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
142# endif
143
144 /*
145 * The wait loop.
146 */
147 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
148 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
149# ifdef IN_RING3
150# ifdef PDMCRITSECT_STRICT
151 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
152 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
153 if (RT_FAILURE(rc2))
154 return rc2;
155# else
156 RTTHREAD hThreadSelf = RTThreadSelf();
157# endif
158# endif
159 for (;;)
160 {
161 /*
162 * Do the wait.
163 *
164 * In ring-3 this gets cluttered by lock validation and thread state
165 * maintainence.
166 *
167 * In ring-0 we have to deal with the possibility that the thread has
168 * been signalled and the interruptible wait function returning
169 * immediately. In that case we do normal R0/RC rcBusy handling.
170 */
171# ifdef IN_RING3
172# ifdef PDMCRITSECT_STRICT
173 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
174 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
175 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
176 if (RT_FAILURE(rc9))
177 return rc9;
178# else
179 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
180# endif
181 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
182 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
183# else /* IN_RING0 */
184 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
185# endif /* IN_RING0 */
186
187 /*
188 * Deal with the return code and critsect destruction.
189 */
190 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
191 return VERR_SEM_DESTROYED;
192 if (rc == VINF_SUCCESS)
193 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
194 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
195
196# ifdef IN_RING0
197 /* Something is pending (signal, APC, debugger, whatever), just go back
198 to ring-3 so the kernel can deal with it when leaving kernel context.
199
200 Note! We've incremented cLockers already and cannot safely decrement
201 it without creating a race with PDMCritSectLeave, resulting in
202 spurious wakeups. */
203 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
204 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
205 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
206 AssertRC(rc);
207# endif
208 }
209 /* won't get here */
210}
211#endif /* IN_RING3 || IN_RING0 */
212
213
214/**
215 * Common worker for the debug and normal APIs.
216 *
217 * @returns VINF_SUCCESS if entered successfully.
218 * @returns rcBusy when encountering a busy critical section in GC/R0.
219 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
220 * during the operation.
221 *
222 * @param pCritSect The PDM critical section to enter.
223 * @param rcBusy The status code to return when we're in GC or R0
224 * @param pSrcPos The source position of the lock operation.
225 */
226DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
227{
228 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
229 Assert(pCritSect->s.Core.cNestings >= 0);
230
231 /*
232 * If the critical section has already been destroyed, then inform the caller.
233 */
234 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
235 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
236 VERR_SEM_DESTROYED);
237
238 /*
239 * See if we're lucky.
240 */
241 /* NOP ... */
242 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
243 return VINF_SUCCESS;
244
245 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
246 /* ... not owned ... */
247 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
248 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
249
250 /* ... or nested. */
251 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
252 {
253 Assert(pCritSect->s.Core.cNestings >= 1);
254# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
255 pCritSect->s.Core.cNestings += 1;
256# else
257 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
258# endif
259 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
260 return VINF_SUCCESS;
261 }
262
263 /*
264 * Spin for a bit without incrementing the counter.
265 */
266 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
267 * cpu systems. */
268 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
269 while (cSpinsLeft-- > 0)
270 {
271 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
272 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
273 ASMNopPause();
274 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
275 cli'ed pendingpreemption check up front using sti w/ instruction fusing
276 for avoiding races. Hmm ... This is assuming the other party is actually
277 executing code on another CPU ... which we could keep track of if we
278 wanted. */
279 }
280
281#ifdef IN_RING3
282 /*
283 * Take the slow path.
284 */
285 NOREF(rcBusy);
286 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
287
288#else
289# ifdef IN_RING0
290 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
291 * and would be better off switching out of that while waiting for
292 * the lock. Several of the locks jumps back to ring-3 just to
293 * get the lock, the ring-3 code will then call the kernel to do
294 * the lock wait and when the call return it will call ring-0
295 * again and resume via in setjmp style. Not very efficient. */
296# if 0
297 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
298 * callers not prepared for longjmp/blocking to
299 * use PDMCritSectTryEnter. */
300 {
301 /*
302 * Leave HM context while waiting if necessary.
303 */
304 int rc;
305 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
306 {
307 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
308 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
309 }
310 else
311 {
312 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
313 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
314 PVMCPU pVCpu = VMMGetCpu(pVM);
315 HMR0Leave(pVM, pVCpu);
316 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
317
318 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
319
320 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
321 HMR0Enter(pVM, pVCpu);
322 }
323 return rc;
324 }
325# else
326 /*
327 * We preemption hasn't been disabled, we can block here in ring-0.
328 */
329 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
330 && ASMIntAreEnabled())
331 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
332# endif
333#endif /* IN_RING0 */
334
335 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
336
337 /*
338 * Call ring-3 to acquire the critical section?
339 */
340 if (rcBusy == VINF_SUCCESS)
341 {
342 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
343 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
344 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
345 }
346
347 /*
348 * Return busy.
349 */
350 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
351 return rcBusy;
352#endif /* !IN_RING3 */
353}
354
355
356/**
357 * Enters a PDM critical section.
358 *
359 * @returns VINF_SUCCESS if entered successfully.
360 * @returns rcBusy when encountering a busy critical section in RC/R0.
361 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
362 * during the operation.
363 *
364 * @param pCritSect The PDM critical section to enter.
365 * @param rcBusy The status code to return when we're in RC or R0
366 * and the section is busy. Pass VINF_SUCCESS to
367 * acquired the critical section thru a ring-3
368 * call if necessary.
369 */
370VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
371{
372#ifndef PDMCRITSECT_STRICT
373 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
374#else
375 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
376 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
377#endif
378}
379
380
381/**
382 * Enters a PDM critical section, with location information for debugging.
383 *
384 * @returns VINF_SUCCESS if entered successfully.
385 * @returns rcBusy when encountering a busy critical section in RC/R0.
386 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
387 * during the operation.
388 *
389 * @param pCritSect The PDM critical section to enter.
390 * @param rcBusy The status code to return when we're in RC or R0
391 * and the section is busy. Pass VINF_SUCCESS to
392 * acquired the critical section thru a ring-3
393 * call if necessary.
394 * @param uId Some kind of locking location ID. Typically a
395 * return address up the stack. Optional (0).
396 * @param SRC_POS The source position where to lock is being
397 * acquired from. Optional.
398 */
399VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
400{
401#ifdef PDMCRITSECT_STRICT
402 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
403 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
404#else
405 NOREF(uId); RT_SRC_POS_NOREF();
406 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
407#endif
408}
409
410
411/**
412 * Common worker for the debug and normal APIs.
413 *
414 * @retval VINF_SUCCESS on success.
415 * @retval VERR_SEM_BUSY if the critsect was owned.
416 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
417 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
418 * during the operation.
419 *
420 * @param pCritSect The critical section.
421 * @param pSrcPos The source position of the lock operation.
422 */
423static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
424{
425 /*
426 * If the critical section has already been destroyed, then inform the caller.
427 */
428 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
429 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
430 VERR_SEM_DESTROYED);
431
432 /*
433 * See if we're lucky.
434 */
435 /* NOP ... */
436 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
437 return VINF_SUCCESS;
438
439 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
440 /* ... not owned ... */
441 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
442 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
443
444 /* ... or nested. */
445 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
446 {
447 Assert(pCritSect->s.Core.cNestings >= 1);
448# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
449 pCritSect->s.Core.cNestings += 1;
450# else
451 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
452# endif
453 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
454 return VINF_SUCCESS;
455 }
456
457 /* no spinning */
458
459 /*
460 * Return busy.
461 */
462#ifdef IN_RING3
463 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
464#else
465 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
466#endif
467 LogFlow(("PDMCritSectTryEnter: locked\n"));
468 return VERR_SEM_BUSY;
469}
470
471
472/**
473 * Try enter a critical section.
474 *
475 * @retval VINF_SUCCESS on success.
476 * @retval VERR_SEM_BUSY if the critsect was owned.
477 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
478 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
479 * during the operation.
480 *
481 * @param pCritSect The critical section.
482 */
483VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
484{
485#ifndef PDMCRITSECT_STRICT
486 return pdmCritSectTryEnter(pCritSect, NULL);
487#else
488 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
489 return pdmCritSectTryEnter(pCritSect, &SrcPos);
490#endif
491}
492
493
494/**
495 * Try enter a critical section, with location information for debugging.
496 *
497 * @retval VINF_SUCCESS on success.
498 * @retval VERR_SEM_BUSY if the critsect was owned.
499 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
500 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
501 * during the operation.
502 *
503 * @param pCritSect The critical section.
504 * @param uId Some kind of locking location ID. Typically a
505 * return address up the stack. Optional (0).
506 * @param SRC_POS The source position where to lock is being
507 * acquired from. Optional.
508 */
509VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
510{
511#ifdef PDMCRITSECT_STRICT
512 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
513 return pdmCritSectTryEnter(pCritSect, &SrcPos);
514#else
515 NOREF(uId); RT_SRC_POS_NOREF();
516 return pdmCritSectTryEnter(pCritSect, NULL);
517#endif
518}
519
520
521#ifdef IN_RING3
522/**
523 * Enters a PDM critical section.
524 *
525 * @returns VINF_SUCCESS if entered successfully.
526 * @returns rcBusy when encountering a busy critical section in GC/R0.
527 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
528 * during the operation.
529 *
530 * @param pCritSect The PDM critical section to enter.
531 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
532 */
533VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
534{
535 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
536 if ( rc == VINF_SUCCESS
537 && fCallRing3
538 && pCritSect->s.Core.pValidatorRec
539 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
540 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
541 return rc;
542}
543#endif /* IN_RING3 */
544
545
546/**
547 * Leaves a critical section entered with PDMCritSectEnter().
548 *
549 * @returns Indication whether we really exited the critical section.
550 * @retval VINF_SUCCESS if we really exited.
551 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
552 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
553 *
554 * @param pCritSect The PDM critical section to leave.
555 */
556VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
557{
558 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
559 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
560
561 /* Check for NOP sections before asserting ownership. */
562 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
563 return VINF_SUCCESS;
564
565 /*
566 * Always check that the caller is the owner (screw performance).
567 */
568 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
569 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
570 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
571 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
572 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
573 VERR_NOT_OWNER);
574
575 /*
576 * Nested leave.
577 */
578 int32_t const cNestings = pCritSect->s.Core.cNestings;
579 Assert(cNestings >= 1);
580 if (cNestings > 1)
581 {
582# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
583 pCritSect->s.Core.cNestings = cNestings - 1;
584# else
585 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
586# endif
587 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
588 Assert(pCritSect->s.Core.cLockers >= 0);
589 return VINF_SEM_NESTED;
590 }
591
592#ifdef IN_RING0
593# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
594 if (1) /* SUPSemEventSignal is safe */
595# else
596 if (ASMIntAreEnabled())
597# endif
598#endif
599#if defined(IN_RING3) || defined(IN_RING0)
600 {
601 /*
602 * Leave for real.
603 */
604 /* update members. */
605 SUPSEMEVENT hEventToSignal = pCritSect->s.hEventToSignal;
606 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
607# ifdef IN_RING3
608# if defined(PDMCRITSECT_STRICT)
609 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
610 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
611# endif
612 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
613# endif
614# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
615 //pCritSect->s.Core.cNestings = 0; /* not really needed */
616 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
617# else
618 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
619 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
620# endif
621 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
622
623 /* stop and decrement lockers. */
624 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
625 ASMCompilerBarrier();
626 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) < 0)
627 { /* hopefully likely */ }
628 else
629 {
630 /* Someone is waiting, wake up one of them. */
631 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
632 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
633 int rc = SUPSemEventSignal(pSession, hEvent);
634 AssertRC(rc);
635 }
636
637 /* Signal exit event. */
638 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
639 { /* likely */ }
640 else
641 {
642 Log8(("Signalling %#p\n", hEventToSignal));
643 int rc = SUPSemEventSignal(pCritSect->s.CTX_SUFF(pVM)->pSession, hEventToSignal);
644 AssertRC(rc);
645 }
646
647# if defined(DEBUG_bird) && defined(IN_RING0)
648 VMMTrashVolatileXMMRegs();
649# endif
650 }
651#endif /* IN_RING3 || IN_RING0 */
652#ifdef IN_RING0
653 else
654#endif
655#if defined(IN_RING0) || defined(IN_RC)
656 {
657 /*
658 * Try leave it.
659 */
660 if (pCritSect->s.Core.cLockers == 0)
661 {
662# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
663 //pCritSect->s.Core.cNestings = 0; /* not really needed */
664# else
665 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
666# endif
667 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
668 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
669 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
670
671 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
672 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
673 return VINF_SUCCESS;
674
675 /* darn, someone raced in on us. */
676 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
677 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
678 Assert(pCritSect->s.Core.cNestings == 0);
679# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
680 //pCritSect->s.Core.cNestings = 1;
681# else
682 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
683# endif
684 }
685 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
686
687 /*
688 * Queue the request.
689 */
690 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
691 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
692 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
693 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
694 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
695 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
696 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
697 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
698 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
699 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
700 }
701#endif /* IN_RING0 || IN_RC */
702
703 return VINF_SUCCESS;
704}
705
706
707#if defined(IN_RING0) || defined(IN_RING3)
708/**
709 * Schedule a event semaphore for signalling upon critsect exit.
710 *
711 * @returns VINF_SUCCESS on success.
712 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
713 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
714 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
715 *
716 * @param pCritSect The critical section.
717 * @param hEventToSignal The support driver event semaphore that should be
718 * signalled.
719 */
720VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
721{
722 AssertPtr(pCritSect);
723 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
724 Assert(hEventToSignal != NIL_SUPSEMEVENT);
725# ifdef IN_RING3
726 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
727 return VERR_NOT_OWNER;
728# endif
729 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
730 || pCritSect->s.hEventToSignal == hEventToSignal))
731 {
732 pCritSect->s.hEventToSignal = hEventToSignal;
733 return VINF_SUCCESS;
734 }
735 return VERR_TOO_MANY_SEMAPHORES;
736}
737#endif /* IN_RING0 || IN_RING3 */
738
739
740/**
741 * Checks the caller is the owner of the critical section.
742 *
743 * @returns true if owner.
744 * @returns false if not owner.
745 * @param pCritSect The critical section.
746 */
747VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
748{
749#ifdef IN_RING3
750 return RTCritSectIsOwner(&pCritSect->s.Core);
751#else
752 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
753 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
754 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
755 return false;
756 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
757 || pCritSect->s.Core.cNestings > 1;
758#endif
759}
760
761
762/**
763 * Checks the specified VCPU is the owner of the critical section.
764 *
765 * @returns true if owner.
766 * @returns false if not owner.
767 * @param pCritSect The critical section.
768 * @param pVCpu The cross context virtual CPU structure.
769 */
770VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
771{
772#ifdef IN_RING3
773 NOREF(pVCpu);
774 return RTCritSectIsOwner(&pCritSect->s.Core);
775#else
776 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
777 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
778 return false;
779 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
780 || pCritSect->s.Core.cNestings > 1;
781#endif
782}
783
784
785/**
786 * Checks if anyone is waiting on the critical section we own.
787 *
788 * @returns true if someone is waiting.
789 * @returns false if no one is waiting.
790 * @param pCritSect The critical section.
791 */
792VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
793{
794 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
795 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
796 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
797}
798
799
800/**
801 * Checks if a critical section is initialized or not.
802 *
803 * @returns true if initialized.
804 * @returns false if not initialized.
805 * @param pCritSect The critical section.
806 */
807VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
808{
809 return RTCritSectIsInitialized(&pCritSect->s.Core);
810}
811
812
813/**
814 * Gets the recursion depth.
815 *
816 * @returns The recursion depth.
817 * @param pCritSect The critical section.
818 */
819VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
820{
821 return RTCritSectGetRecursion(&pCritSect->s.Core);
822}
823
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette