VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 37423

Last change on this file since 37423 was 37419, checked in by vboxsync, 14 years ago

PDM/IPRT CritSect: Introduced the NOP critical section for simplifying locking in IOM and TM. (Revisiting device emulation locking, making it more fine grained over time.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 37419 2011-06-11 20:25:37Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# endif
102
103 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
104 return VINF_SUCCESS;
105}
106
107
108#if defined(IN_RING3) || defined(IN_RING0)
109/**
110 * Deals with the contended case in ring-3 and ring-0.
111 *
112 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
113 * @param pCritSect The critsect.
114 * @param hNativeSelf The native thread handle.
115 */
116static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
117{
118 /*
119 * Start waiting.
120 */
121 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
122 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
123# ifdef IN_RING3
124 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
125# else
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
127# endif
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef IN_RING3
135# ifdef PDMCRITSECT_STRICT
136 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
137 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
138 if (RT_FAILURE(rc2))
139 return rc2;
140# else
141 RTTHREAD hThreadSelf = RTThreadSelf();
142# endif
143# endif
144 for (;;)
145 {
146# ifdef PDMCRITSECT_STRICT
147 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
148 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
149 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
150 if (RT_FAILURE(rc9))
151 return rc9;
152# elif defined(IN_RING3)
153 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
154# endif
155 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
156# ifdef IN_RING3
157 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
158# endif
159
160 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
161 return VERR_SEM_DESTROYED;
162 if (rc == VINF_SUCCESS)
163 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
164 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
165 }
166 /* won't get here */
167}
168#endif /* IN_RING3 || IN_RING0 */
169
170
171/**
172 * Common worker for the debug and normal APIs.
173 *
174 * @returns VINF_SUCCESS if entered successfully.
175 * @returns rcBusy when encountering a busy critical section in GC/R0.
176 * @returns VERR_SEM_DESTROYED if the critical section is dead.
177 *
178 * @param pCritSect The PDM critical section to enter.
179 * @param rcBusy The status code to return when we're in GC or R0
180 * and the section is busy.
181 */
182DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
183{
184 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
185 Assert(pCritSect->s.Core.cNestings >= 0);
186
187 /*
188 * If the critical section has already been destroyed, then inform the caller.
189 */
190 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
191 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
192 VERR_SEM_DESTROYED);
193
194 /*
195 * See if we're lucky.
196 */
197 /* NOP ... */
198 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
199 return VINF_SUCCESS;
200
201 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
202 /* ... not owned ... */
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
205
206 /* ... or nested. */
207 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
208 {
209 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
210 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
211 Assert(pCritSect->s.Core.cNestings > 1);
212 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
213 return VINF_SUCCESS;
214 }
215
216 /*
217 * Spin for a bit without incrementing the counter.
218 */
219 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
220 * cpu systems. */
221 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
222 while (cSpinsLeft-- > 0)
223 {
224 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
225 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
226 ASMNopPause();
227 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
228 cli'ed pendingpreemption check up front using sti w/ instruction fusing
229 for avoiding races. Hmm ... This is assuming the other party is actually
230 executing code on another CPU ... which we could keep track of if we
231 wanted. */
232 }
233
234#ifdef IN_RING3
235 /*
236 * Take the slow path.
237 */
238 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
239
240#elif defined(IN_RING0)
241 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
242 * and would be better off switching out of that while waiting for
243 * the lock. Several of the locks jumps back to ring-3 just to
244 * get the lock, the ring-3 code will then call the kernel to do
245 * the lock wait and when the call return it will call ring-0
246 * again and resume via in setjmp style. Not very efficient. */
247# if 0
248 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
249 * callers not prepared for longjmp/blocking to
250 * use PDMCritSectTryEnter. */
251 {
252 /*
253 * Leave HWACCM context while waiting if necessary.
254 */
255 int rc;
256 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
257 {
258 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
259 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
260 }
261 else
262 {
263 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
264 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
265 PVMCPU pVCpu = VMMGetCpu(pVM);
266 HWACCMR0Leave(pVM, pVCpu);
267 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
268
269 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
270
271 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
272 HWACCMR0Enter(pVM, pVCpu);
273 }
274 return rc;
275 }
276# else
277 /*
278 * We preemption hasn't been disabled, we can block here in ring-0.
279 */
280 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
281 && ASMIntAreEnabled())
282 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
283# endif
284
285 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
286 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
287 return rcBusy;
288
289#else /* IN_RC */
290 /*
291 * Return busy.
292 */
293 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
294 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
295 return rcBusy;
296#endif /* IN_RC */
297}
298
299
300/**
301 * Enters a PDM critical section.
302 *
303 * @returns VINF_SUCCESS if entered successfully.
304 * @returns rcBusy when encountering a busy critical section in GC/R0.
305 * @returns VERR_SEM_DESTROYED if the critical section is dead.
306 *
307 * @param pCritSect The PDM critical section to enter.
308 * @param rcBusy The status code to return when we're in GC or R0
309 * and the section is busy.
310 */
311VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
312{
313#ifndef PDMCRITSECT_STRICT
314 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
315#else
316 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
317 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
318#endif
319}
320
321
322/**
323 * Enters a PDM critical section, with location information for debugging.
324 *
325 * @returns VINF_SUCCESS if entered successfully.
326 * @returns rcBusy when encountering a busy critical section in GC/R0.
327 * @returns VERR_SEM_DESTROYED if the critical section is dead.
328 *
329 * @param pCritSect The PDM critical section to enter.
330 * @param rcBusy The status code to return when we're in GC or R0
331 * and the section is busy.
332 * @param uId Some kind of locking location ID. Typically a
333 * return address up the stack. Optional (0).
334 * @param pszFile The file where the lock is being acquired from.
335 * Optional.
336 * @param iLine The line number in that file. Optional (0).
337 * @param pszFunction The function where the lock is being acquired
338 * from. Optional.
339 */
340VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
341{
342#ifdef PDMCRITSECT_STRICT
343 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
344 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
345#else
346 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
347#endif
348}
349
350
351/**
352 * Common worker for the debug and normal APIs.
353 *
354 * @retval VINF_SUCCESS on success.
355 * @retval VERR_SEM_BUSY if the critsect was owned.
356 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
357 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
358 *
359 * @param pCritSect The critical section.
360 */
361static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
362{
363 /*
364 * If the critical section has already been destroyed, then inform the caller.
365 */
366 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
367 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
368 VERR_SEM_DESTROYED);
369
370 /*
371 * See if we're lucky.
372 */
373 /* NOP ... */
374 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
375 return VINF_SUCCESS;
376
377 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
378 /* ... not owned ... */
379 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
380 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
381
382 /* ... or nested. */
383 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
384 {
385 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
386 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
387 Assert(pCritSect->s.Core.cNestings > 1);
388 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
389 return VINF_SUCCESS;
390 }
391
392 /* no spinning */
393
394 /*
395 * Return busy.
396 */
397#ifdef IN_RING3
398 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
399#else
400 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
401#endif
402 LogFlow(("PDMCritSectTryEnter: locked\n"));
403 return VERR_SEM_BUSY;
404}
405
406
407/**
408 * Try enter a critical section.
409 *
410 * @retval VINF_SUCCESS on success.
411 * @retval VERR_SEM_BUSY if the critsect was owned.
412 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
413 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
414 *
415 * @param pCritSect The critical section.
416 */
417VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
418{
419#ifndef PDMCRITSECT_STRICT
420 return pdmCritSectTryEnter(pCritSect, NULL);
421#else
422 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
423 return pdmCritSectTryEnter(pCritSect, &SrcPos);
424#endif
425}
426
427
428/**
429 * Try enter a critical section, with location information for debugging.
430 *
431 * @retval VINF_SUCCESS on success.
432 * @retval VERR_SEM_BUSY if the critsect was owned.
433 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
434 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
435 *
436 * @param pCritSect The critical section.
437 * @param uId Some kind of locking location ID. Typically a
438 * return address up the stack. Optional (0).
439 * @param pszFile The file where the lock is being acquired from.
440 * Optional.
441 * @param iLine The line number in that file. Optional (0).
442 * @param pszFunction The function where the lock is being acquired
443 * from. Optional.
444 */
445VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
446{
447#ifdef PDMCRITSECT_STRICT
448 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
449 return pdmCritSectTryEnter(pCritSect, &SrcPos);
450#else
451 return pdmCritSectTryEnter(pCritSect, NULL);
452#endif
453}
454
455
456#ifdef IN_RING3
457/**
458 * Enters a PDM critical section.
459 *
460 * @returns VINF_SUCCESS if entered successfully.
461 * @returns rcBusy when encountering a busy critical section in GC/R0.
462 * @returns VERR_SEM_DESTROYED if the critical section is dead.
463 *
464 * @param pCritSect The PDM critical section to enter.
465 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
466 */
467VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
468{
469 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
470 if ( rc == VINF_SUCCESS
471 && fCallRing3
472 && pCritSect->s.Core.pValidatorRec
473 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
474 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
475 return rc;
476}
477#endif /* IN_RING3 */
478
479
480/**
481 * Leaves a critical section entered with PDMCritSectEnter().
482 *
483 * @param pCritSect The PDM critical section to leave.
484 */
485VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
486{
487 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
488 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
489 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
490 Assert(pCritSect->s.Core.cNestings >= 1);
491
492 /*
493 * Check for NOP sections.
494 */
495 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
496 return;
497
498 /*
499 * Nested leave.
500 */
501 if (pCritSect->s.Core.cNestings > 1)
502 {
503 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
504 Assert(pCritSect->s.Core.cNestings >= 1);
505 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
506 Assert(pCritSect->s.Core.cLockers >= 0);
507 return;
508 }
509
510#ifdef IN_RING0
511# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
512 if (1) /* SUPSemEventSignal is safe */
513# else
514 if (ASMIntAreEnabled())
515# endif
516#endif
517#if defined(IN_RING3) || defined(IN_RING0)
518 {
519 /*
520 * Leave for real.
521 */
522 /* update members. */
523# ifdef IN_RING3
524 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
525 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
526# if defined(PDMCRITSECT_STRICT)
527 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
528 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
529# endif
530 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
531# endif
532 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
533 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
534 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
535 Assert(pCritSect->s.Core.cNestings == 0);
536
537 /* stop and decrement lockers. */
538 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
539 ASMCompilerBarrier();
540 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
541 {
542 /* Someone is waiting, wake up one of them. */
543 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
544 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
545 int rc = SUPSemEventSignal(pSession, hEvent);
546 AssertRC(rc);
547 }
548
549# ifdef IN_RING3
550 /* Signal exit event. */
551 if (hEventToSignal != NIL_RTSEMEVENT)
552 {
553 LogBird(("Signalling %#x\n", hEventToSignal));
554 int rc = RTSemEventSignal(hEventToSignal);
555 AssertRC(rc);
556 }
557# endif
558
559# if defined(DEBUG_bird) && defined(IN_RING0)
560 VMMTrashVolatileXMMRegs();
561# endif
562 }
563#endif /* IN_RING3 || IN_RING0 */
564#ifdef IN_RING0
565 else
566#endif
567#if defined(IN_RING0) || defined(IN_RC)
568 {
569 /*
570 * Try leave it.
571 */
572 if (pCritSect->s.Core.cLockers == 0)
573 {
574 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
575 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
576 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
577 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
578
579 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
580 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
581 return;
582
583 /* darn, someone raced in on us. */
584 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
585 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
586 Assert(pCritSect->s.Core.cNestings == 0);
587 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
588 }
589 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
590
591 /*
592 * Queue the request.
593 */
594 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
595 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
596 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
597 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
598 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
599 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
600 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
601 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
602 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
603 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
604 }
605#endif /* IN_RING0 || IN_RC */
606}
607
608
609#if defined(IN_RING3) || defined(IN_RING0)
610/**
611 * Process the critical sections queued for ring-3 'leave'.
612 *
613 * @param pVCpu The VMCPU handle.
614 */
615VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
616{
617 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
618
619 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
620 for (RTUINT i = 0; i < c; i++)
621 {
622# ifdef IN_RING3
623 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
624# else
625 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
626# endif
627
628 PDMCritSectLeave(pCritSect);
629 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
630 }
631
632 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
633 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
634}
635#endif /* IN_RING3 || IN_RING0 */
636
637
638/**
639 * Checks the caller is the owner of the critical section.
640 *
641 * @returns true if owner.
642 * @returns false if not owner.
643 * @param pCritSect The critical section.
644 */
645VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
646{
647#ifdef IN_RING3
648 return RTCritSectIsOwner(&pCritSect->s.Core);
649#else
650 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
651 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
652 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
653 return false;
654 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
655#endif
656}
657
658
659/**
660 * Checks the specified VCPU is the owner of the critical section.
661 *
662 * @returns true if owner.
663 * @returns false if not owner.
664 * @param pCritSect The critical section.
665 * @param idCpu VCPU id
666 */
667VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
668{
669#ifdef IN_RING3
670 NOREF(idCpu);
671 return RTCritSectIsOwner(&pCritSect->s.Core);
672#else
673 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
674 AssertPtr(pVM);
675 Assert(idCpu < pVM->cCpus);
676 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
677 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
678#endif
679}
680
681
682/**
683 * Checks if somebody currently owns the critical section.
684 *
685 * @returns true if locked.
686 * @returns false if not locked.
687 *
688 * @param pCritSect The critical section.
689 *
690 * @remarks This doesn't prove that no deadlocks will occur later on; it's
691 * just a debugging tool
692 */
693VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
694{
695 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
696 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
697}
698
699
700/**
701 * Checks if anyone is waiting on the critical section we own.
702 *
703 * @returns true if someone is waiting.
704 * @returns false if no one is waiting.
705 * @param pCritSect The critical section.
706 */
707VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
708{
709 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
710 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
711 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
712}
713
714
715/**
716 * Checks if a critical section is initialized or not.
717 *
718 * @returns true if initialized.
719 * @returns false if not initialized.
720 * @param pCritSect The critical section.
721 */
722VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
723{
724 return RTCritSectIsInitialized(&pCritSect->s.Core);
725}
726
727
728/**
729 * Gets the recursion depth.
730 *
731 * @returns The recursion depth.
732 * @param pCritSect The critical section.
733 */
734VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
735{
736 return RTCritSectGetRecursion(&pCritSect->s.Core);
737}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette