VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 25576

Last change on this file since 25576 was 25478, checked in by vboxsync, 15 years ago

IPRT,PDMCritSect: More lock validator refactoring.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 25478 2009-12-18 12:58:10Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/lockvalidator.h>
40# include <iprt/semaphore.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 VERR_SEM_DESTROYED);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've wont the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALIDATORSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
97
98# ifdef PDMCRITSECT_STRICT
99 RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos);
100# endif
101
102 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
103 return VINF_SUCCESS;
104}
105
106
107#ifdef IN_RING3
108/**
109 * Deals with the contended case in ring-3.
110 *
111 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
112 * @param pCritSect The critsect.
113 * @param hNativeSelf The native thread handle.
114 */
115static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALIDATORSRCPOS pSrcPos)
116{
117 /*
118 * Start waiting.
119 */
120 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
121 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
122 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
123
124 /*
125 * The wait loop.
126 */
127 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
128 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
129# ifdef PDMCRITSECT_STRICT
130 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
131 int rc2 = RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos);
132 if (RT_FAILURE(rc2))
133 return rc2;
134# else
135 RTTHREAD hThreadSelf = RTThreadSelf();
136# endif
137 for (;;)
138 {
139# ifdef PDMCRITSECT_STRICT
140 int rc9 = RTLockValidatorCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, RTTHREADSTATE_CRITSECT,
141 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING), pSrcPos);
142 if (RT_FAILURE(rc9))
143 return rc9;
144# endif
145
146 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT);
147 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
148 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
149
150 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
151 return VERR_SEM_DESTROYED;
152 if (rc == VINF_SUCCESS)
153 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
154 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
155 }
156 /* won't get here */
157}
158#endif /* IN_RING3 */
159
160
161/**
162 * Common worker for the debug and normal APIs.
163 *
164 * @returns VINF_SUCCESS if entered successfully.
165 * @returns rcBusy when encountering a busy critical section in GC/R0.
166 * @returns VERR_SEM_DESTROYED if the critical section is dead.
167 *
168 * @param pCritSect The PDM critical section to enter.
169 * @param rcBusy The status code to return when we're in GC or R0
170 * and the section is busy.
171 */
172DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALIDATORSRCPOS pSrcPos)
173{
174 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
175
176 /*
177 * If the critical section has already been destroyed, then inform the caller.
178 */
179 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
180 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
181 VERR_SEM_DESTROYED);
182
183 /*
184 * See if we're lucky.
185 */
186 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
187 /* Not owned ... */
188 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
189 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
190
191 /* ... or nested. */
192 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
193 {
194 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
195 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
196 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
197 return VINF_SUCCESS;
198 }
199
200 /*
201 * Spin for a bit without incrementing the counter.
202 */
203 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
204 * cpu systems. */
205 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
206 while (cSpinsLeft-- > 0)
207 {
208 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
209 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
210 ASMNopPause();
211 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
212 cli'ed pendingpreemption check up front using sti w/ instruction fusing
213 for avoiding races. Hmm ... This is assuming the other party is actually
214 executing code on another CPU ... which we could keep track of if we
215 wanted. */
216 }
217
218#ifdef IN_RING3
219 /*
220 * Take the slow path.
221 */
222 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
223#else
224 /*
225 * Return busy.
226 */
227 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
228 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
229 return rcBusy;
230#endif
231}
232
233
234/**
235 * Enters a PDM critical section.
236 *
237 * @returns VINF_SUCCESS if entered successfully.
238 * @returns rcBusy when encountering a busy critical section in GC/R0.
239 * @returns VERR_SEM_DESTROYED if the critical section is dead.
240 *
241 * @param pCritSect The PDM critical section to enter.
242 * @param rcBusy The status code to return when we're in GC or R0
243 * and the section is busy.
244 */
245VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
246{
247#ifndef PDMCRITSECT_STRICT
248 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
249#else
250 RTLOCKVALIDATORSRCPOS SrcPos = RTLOCKVALIDATORSRCPOS_INIT_NORMAL_API();
251 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
252#endif
253}
254
255
256/**
257 * Enters a PDM critical section, with location information for debugging.
258 *
259 * @returns VINF_SUCCESS if entered successfully.
260 * @returns rcBusy when encountering a busy critical section in GC/R0.
261 * @returns VERR_SEM_DESTROYED if the critical section is dead.
262 *
263 * @param pCritSect The PDM critical section to enter.
264 * @param rcBusy The status code to return when we're in GC or R0
265 * and the section is busy.
266 * @param uId Some kind of locking location ID. Typically a
267 * return address up the stack. Optional (0).
268 * @param pszFile The file where the lock is being acquired from.
269 * Optional.
270 * @param iLine The line number in that file. Optional (0).
271 * @param pszFunction The functionn where the lock is being acquired
272 * from. Optional.
273 */
274VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
275{
276#ifdef PDMCRITSECT_STRICT
277 RTLOCKVALIDATORSRCPOS SrcPos = RTLOCKVALIDATORSRCPOS_INIT_DEBUG_API();
278 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
279#else
280 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
281#endif
282}
283
284
285/**
286 * Common worker for the debug and normal APIs.
287 *
288 * @retval VINF_SUCCESS on success.
289 * @retval VERR_SEM_BUSY if the critsect was owned.
290 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
291 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
292 *
293 * @param pCritSect The critical section.
294 */
295static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALIDATORSRCPOS pSrcPos)
296{
297 /*
298 * If the critical section has already been destroyed, then inform the caller.
299 */
300 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
301 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
302 VERR_SEM_DESTROYED);
303
304 /*
305 * See if we're lucky.
306 */
307 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
308 /* Not owned ... */
309 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
310 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
311
312 /* ... or nested. */
313 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
314 {
315 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
316 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
317 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
318 return VINF_SUCCESS;
319 }
320
321 /* no spinning */
322
323 /*
324 * Return busy.
325 */
326#ifdef IN_RING3
327 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
328#else
329 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
330#endif
331 LogFlow(("PDMCritSectTryEnter: locked\n"));
332 return VERR_SEM_BUSY;
333}
334
335
336/**
337 * Try enter a critical section.
338 *
339 * @retval VINF_SUCCESS on success.
340 * @retval VERR_SEM_BUSY if the critsect was owned.
341 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
342 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
343 *
344 * @param pCritSect The critical section.
345 */
346VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
347{
348#ifndef PDMCRITSECT_STRICT
349 return pdmCritSectTryEnter(pCritSect, NULL);
350#else
351 RTLOCKVALIDATORSRCPOS SrcPos = RTLOCKVALIDATORSRCPOS_INIT_NORMAL_API();
352 return pdmCritSectTryEnter(pCritSect, &SrcPos);
353#endif
354}
355
356
357/**
358 * Try enter a critical section, with location information for debugging.
359 *
360 * @retval VINF_SUCCESS on success.
361 * @retval VERR_SEM_BUSY if the critsect was owned.
362 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
363 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
364 *
365 * @param pCritSect The critical section.
366 * @param uId Some kind of locking location ID. Typically a
367 * return address up the stack. Optional (0).
368 * @param pszFile The file where the lock is being acquired from.
369 * Optional.
370 * @param iLine The line number in that file. Optional (0).
371 * @param pszFunction The functionn where the lock is being acquired
372 * from. Optional.
373 */
374VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
375{
376#ifdef PDMCRITSECT_STRICT
377 RTLOCKVALIDATORSRCPOS SrcPos = RTLOCKVALIDATORSRCPOS_INIT_DEBUG_API();
378 return pdmCritSectTryEnter(pCritSect, &SrcPos);
379#else
380 return pdmCritSectTryEnter(pCritSect, NULL);
381#endif
382}
383
384
385#ifdef IN_RING3
386/**
387 * Enters a PDM critical section.
388 *
389 * @returns VINF_SUCCESS if entered successfully.
390 * @returns rcBusy when encountering a busy critical section in GC/R0.
391 * @returns VERR_SEM_DESTROYED if the critical section is dead.
392 *
393 * @param pCritSect The PDM critical section to enter.
394 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
395 */
396VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
397{
398 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
399 if ( rc == VINF_SUCCESS
400 && fCallRing3
401 && pCritSect->s.Core.pValidatorRec
402 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
403 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec);
404 return rc;
405}
406#endif /* IN_RING3 */
407
408
409/**
410 * Leaves a critical section entered with PDMCritSectEnter().
411 *
412 * @param pCritSect The PDM critical section to leave.
413 */
414VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
415{
416 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
417 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
418 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
419 Assert(pCritSect->s.Core.cNestings >= 1);
420
421 /*
422 * Nested leave.
423 */
424 if (pCritSect->s.Core.cNestings > 1)
425 {
426 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
427 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
428 return;
429 }
430
431#ifdef IN_RING0
432# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
433 if (1) /* SUPSemEventSignal is safe */
434# else
435 if (ASMIntAreEnabled())
436# endif
437#endif
438#if defined(IN_RING3) || defined(IN_RING0)
439 {
440 /*
441 * Leave for real.
442 */
443 /* update members. */
444# ifdef IN_RING3
445 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
446 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
447# if defined(PDMCRITSECT_STRICT)
448 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
449 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec);
450# endif
451 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
452# endif
453 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
454 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
455 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
456
457 /* stop and decrement lockers. */
458 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
459 ASMCompilerBarrier();
460 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
461 {
462 /* Someone is waiting, wake up one of them. */
463 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
464 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
465 int rc = SUPSemEventSignal(pSession, hEvent);
466 AssertRC(rc);
467 }
468
469# ifdef IN_RING3
470 /* Signal exit event. */
471 if (hEventToSignal != NIL_RTSEMEVENT)
472 {
473 LogBird(("Signalling %#x\n", hEventToSignal));
474 int rc = RTSemEventSignal(hEventToSignal);
475 AssertRC(rc);
476 }
477# endif
478
479# if defined(DEBUG_bird) && defined(IN_RING0)
480 VMMTrashVolatileXMMRegs();
481# endif
482 }
483#endif /* IN_RING3 || IN_RING0 */
484#ifdef IN_RING0
485 else
486#endif
487#if defined(IN_RING0) || defined(IN_RC)
488 {
489 /*
490 * Try leave it.
491 */
492 if (pCritSect->s.Core.cLockers == 0)
493 {
494 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
495 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
496 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
497 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
498
499 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
500 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
501 return;
502
503 /* darn, someone raced in on us. */
504 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
505 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
506 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
507 }
508 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
509
510 /*
511 * Queue the request.
512 */
513 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
514 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
515 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
516 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
517 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
518 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
519 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
520 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
521 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
522 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
523 }
524#endif /* IN_RING0 || IN_RC */
525}
526
527
528#if defined(IN_RING3) || defined(IN_RING0)
529/**
530 * Process the critical sections queued for ring-3 'leave'.
531 *
532 * @param pVCpu The VMCPU handle.
533 */
534VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
535{
536 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
537
538 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
539 for (RTUINT i = 0; i < c; i++)
540 {
541# ifdef IN_RING3
542 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
543# else
544 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
545# endif
546
547 PDMCritSectLeave(pCritSect);
548 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
549 }
550
551 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
552 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
553}
554#endif /* IN_RING3 || IN_RING0 */
555
556
557/**
558 * Checks the caller is the owner of the critical section.
559 *
560 * @returns true if owner.
561 * @returns false if not owner.
562 * @param pCritSect The critical section.
563 */
564VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
565{
566#ifdef IN_RING3
567 return RTCritSectIsOwner(&pCritSect->s.Core);
568#else
569 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
570 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
571 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
572 return false;
573 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
574#endif
575}
576
577
578/**
579 * Checks the specified VCPU is the owner of the critical section.
580 *
581 * @returns true if owner.
582 * @returns false if not owner.
583 * @param pCritSect The critical section.
584 * @param idCpu VCPU id
585 */
586VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
587{
588#ifdef IN_RING3
589 NOREF(idCpu);
590 return RTCritSectIsOwner(&pCritSect->s.Core);
591#else
592 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
593 AssertPtr(pVM);
594 Assert(idCpu < pVM->cCpus);
595 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
596 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
597#endif
598}
599
600
601/**
602 * Checks if somebody currently owns the critical section.
603 *
604 * @returns true if locked.
605 * @returns false if not locked.
606 *
607 * @param pCritSect The critical section.
608 *
609 * @remarks This doesn't prove that no deadlocks will occur later on; it's
610 * just a debugging tool
611 */
612VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
613{
614 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
615 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
616}
617
618
619/**
620 * Checks if anyone is waiting on the critical section we own.
621 *
622 * @returns true if someone is waitings.
623 * @returns false if no one is waiting.
624 * @param pCritSect The critical section.
625 */
626VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
627{
628 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
629 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
630 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
631}
632
633
634/**
635 * Checks if a critical section is initialized or not.
636 *
637 * @returns true if initialized.
638 * @returns false if not initialized.
639 * @param pCritSect The critical section.
640 */
641VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
642{
643 return RTCritSectIsInitialized(&pCritSect->s.Core);
644}
645
646
647/**
648 * Gets the recursion depth.
649 *
650 * @returns The recursion depth.
651 * @param pCritSect The critical section.
652 */
653VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
654{
655 return RTCritSectGetRecursion(&pCritSect->s.Core);
656}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette