VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 25467

Last change on this file since 25467 was 25467, checked in by vboxsync, 15 years ago

IPRT,PDMCritSect: More lock validation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.9 KB
Line 
1/* $Id: PDMAllCritSect.cpp 25467 2009-12-17 15:16:55Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/lockvalidator.h>
40# include <iprt/semaphore.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54#ifdef PDMCRITSECT_STRICT
55# define PDMCRITSECT_STRICT_POS_DECL RTHCUINTPTR uId, RT_SRC_POS_DECL
56# define PDMCRITSECT_STRICT_POS_ARGS uId, RT_SRC_POS_ARGS
57# define PDMCRITSECT_STRICT_BLOCK_RET(hThread, pRec, fRecursive) \
58 do { \
59 int rc9 = RTLockValidatorCheckBlocking(pRec, (hThread), RTTHREADSTATE_CRITSECT, fRecursive, uId, RT_SRC_POS_ARGS); \
60 if (RT_FAILURE(rc9)) \
61 return rc9; \
62 } while (0)
63#else
64# define PDMCRITSECT_STRICT_POS_DECL int iDummy
65# define PDMCRITSECT_STRICT_POS_ARGS 0
66# define PDMCRITSECT_STRICT_BLOCK_RET(hThread, pRec, fRecursive) \
67 RTThreadBlocking((hThread), RTTHREADSTATE_CRITSECT)
68#endif
69#define PDMCRITSECT_STRICT_UNBLOCK(hThread) RTThreadUnblocked((hThread), RTTHREADSTATE_CRITSECT)
70
71/* Undefine the automatic VBOX_STRICT API mappings. */
72#undef PDMCritSectEnter
73#undef PDMCritSectTryEnter
74
75
76/**
77 * Gets the ring-3 native thread handle of the calling thread.
78 *
79 * @returns native thread handle (ring-3).
80 * @param pCritSect The critical section. This is used in R0 and RC.
81 */
82DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
83{
84#ifdef IN_RING3
85 NOREF(pCritSect);
86 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
87#else
88 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
89 VERR_SEM_DESTROYED);
90 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
91 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
92 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
93#endif
94 return hNativeSelf;
95}
96
97
98/**
99 * Tail code called when we've wont the battle for the lock.
100 *
101 * @returns VINF_SUCCESS.
102 *
103 * @param pCritSect The critical section.
104 * @param hNativeSelf The native handle of this thread.
105 */
106DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_POS_DECL)
107{
108 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
109 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
110
111 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
112 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
113
114# ifdef PDMCRITSECT_STRICT
115 RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, PDMCRITSECT_STRICT_POS_ARGS);
116# endif
117
118 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
119 return VINF_SUCCESS;
120}
121
122
123#ifdef IN_RING3
124/**
125 * Deals with the contended case in ring-3.
126 *
127 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
128 * @param pCritSect The critsect.
129 * @param hNativeSelf The native thread handle.
130 */
131static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_POS_DECL)
132{
133 /*
134 * Start waiting.
135 */
136 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
137 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
138 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
139
140 /*
141 * The wait loop.
142 */
143 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
144 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
145# ifdef PDMCRITSECT_STRICT
146 RTTHREAD hSelf = RTThreadSelfAutoAdopt();
147 int rc2 = RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hSelf, 0, NULL, 0, NULL);
148 if (RT_FAILURE(rc2))
149 return rc2;
150# else
151 RTTHREAD hSelf = RTThreadSelf();
152# endif
153 for (;;)
154 {
155 PDMCRITSECT_STRICT_BLOCK_RET(hSelf, pCritSect->s.Core.pValidatorRec, !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING));
156 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
157 PDMCRITSECT_STRICT_UNBLOCK(hSelf);
158 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
159 return VERR_SEM_DESTROYED;
160 if (rc == VINF_SUCCESS)
161 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
162 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
163 }
164 /* won't get here */
165}
166#endif /* IN_RING3 */
167
168
169/**
170 * Common worker for the debug and normal APIs.
171 *
172 * @returns VINF_SUCCESS if entered successfully.
173 * @returns rcBusy when encountering a busy critical section in GC/R0.
174 * @returns VERR_SEM_DESTROYED if the critical section is dead.
175 *
176 * @param pCritSect The PDM critical section to enter.
177 * @param rcBusy The status code to return when we're in GC or R0
178 * and the section is busy.
179 */
180DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PDMCRITSECT_STRICT_POS_DECL)
181{
182 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
183
184 /*
185 * If the critical section has already been destroyed, then inform the caller.
186 */
187 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
188 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
189 VERR_SEM_DESTROYED);
190
191 /*
192 * See if we're lucky.
193 */
194 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
195 /* Not owned ... */
196 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
197 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
198
199 /* ... or nested. */
200 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
201 {
202 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
203 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
204 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
205 return VINF_SUCCESS;
206 }
207
208 /*
209 * Spin for a bit without incrementing the counter.
210 */
211 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
212 * cpu systems. */
213 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
214 while (cSpinsLeft-- > 0)
215 {
216 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
217 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
218 ASMNopPause();
219 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
220 cli'ed pendingpreemption check up front using sti w/ instruction fusing
221 for avoiding races. Hmm ... This is assuming the other party is actually
222 executing code on another CPU ... which we could keep track of if we
223 wanted. */
224 }
225
226#ifdef IN_RING3
227 /*
228 * Take the slow path.
229 */
230 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
231#else
232 /*
233 * Return busy.
234 */
235 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
236 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
237 return rcBusy;
238#endif
239}
240
241
242/**
243 * Enters a PDM critical section.
244 *
245 * @returns VINF_SUCCESS if entered successfully.
246 * @returns rcBusy when encountering a busy critical section in GC/R0.
247 * @returns VERR_SEM_DESTROYED if the critical section is dead.
248 *
249 * @param pCritSect The PDM critical section to enter.
250 * @param rcBusy The status code to return when we're in GC or R0
251 * and the section is busy.
252 */
253VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
254{
255#ifndef PDMCRITSECT_STRICT
256 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_POS_ARGS);
257#else
258 /* No need for a second code instance. */
259 return PDMCritSectEnterDebug(pCritSect, rcBusy, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
260#endif
261}
262
263
264/**
265 * Enters a PDM critical section, with location information for debugging.
266 *
267 * @returns VINF_SUCCESS if entered successfully.
268 * @returns rcBusy when encountering a busy critical section in GC/R0.
269 * @returns VERR_SEM_DESTROYED if the critical section is dead.
270 *
271 * @param pCritSect The PDM critical section to enter.
272 * @param rcBusy The status code to return when we're in GC or R0
273 * and the section is busy.
274 * @param uId Some kind of locking location ID. Typically a
275 * return address up the stack. Optional (0).
276 * @param pszFile The file where the lock is being acquired from.
277 * Optional.
278 * @param iLine The line number in that file. Optional (0).
279 * @param pszFunction The functionn where the lock is being acquired
280 * from. Optional.
281 */
282VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
283{
284#ifdef PDMCRITSECT_STRICT
285 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_POS_ARGS);
286#else
287 /* No need for a second code instance. */
288 return PDMCritSectEnter(pCritSect, rcBusy);
289#endif
290}
291
292
293/**
294 * Common worker for the debug and normal APIs.
295 *
296 * @retval VINF_SUCCESS on success.
297 * @retval VERR_SEM_BUSY if the critsect was owned.
298 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
299 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
300 *
301 * @param pCritSect The critical section.
302 */
303static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PDMCRITSECT_STRICT_POS_DECL)
304{
305 /*
306 * If the critical section has already been destroyed, then inform the caller.
307 */
308 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
309 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
310 VERR_SEM_DESTROYED);
311
312 /*
313 * See if we're lucky.
314 */
315 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
316 /* Not owned ... */
317 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
318 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
319
320 /* ... or nested. */
321 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
322 {
323 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
324 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
325 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
326 return VINF_SUCCESS;
327 }
328
329 /* no spinning */
330
331 /*
332 * Return busy.
333 */
334#ifdef IN_RING3
335 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
336#else
337 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
338#endif
339 LogFlow(("PDMCritSectTryEnter: locked\n"));
340 return VERR_SEM_BUSY;
341}
342
343
344/**
345 * Try enter a critical section.
346 *
347 * @retval VINF_SUCCESS on success.
348 * @retval VERR_SEM_BUSY if the critsect was owned.
349 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
350 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
351 *
352 * @param pCritSect The critical section.
353 */
354VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
355{
356#ifndef PDMCRITSECT_STRICT
357 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_POS_ARGS);
358#else
359 /* No need for a second code instance. */
360 return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
361#endif
362}
363
364
365/**
366 * Try enter a critical section, with location information for debugging.
367 *
368 * @retval VINF_SUCCESS on success.
369 * @retval VERR_SEM_BUSY if the critsect was owned.
370 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
371 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
372 *
373 * @param pCritSect The critical section.
374 * @param uId Some kind of locking location ID. Typically a
375 * return address up the stack. Optional (0).
376 * @param pszFile The file where the lock is being acquired from.
377 * Optional.
378 * @param iLine The line number in that file. Optional (0).
379 * @param pszFunction The functionn where the lock is being acquired
380 * from. Optional.
381 */
382VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
383{
384#ifdef PDMCRITSECT_STRICT
385 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_POS_ARGS);
386#else
387 /* No need for a second code instance. */
388 return PDMCritSectTryEnter(pCritSect);
389#endif
390}
391
392
393#ifdef IN_RING3
394/**
395 * Enters a PDM critical section.
396 *
397 * @returns VINF_SUCCESS if entered successfully.
398 * @returns rcBusy when encountering a busy critical section in GC/R0.
399 * @returns VERR_SEM_DESTROYED if the critical section is dead.
400 *
401 * @param pCritSect The PDM critical section to enter.
402 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
403 */
404VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
405{
406 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
407 if ( rc == VINF_SUCCESS
408 && fCallRing3
409 && pCritSect->s.Core.pValidatorRec
410 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
411 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec);
412 return rc;
413}
414#endif /* IN_RING3 */
415
416
417/**
418 * Leaves a critical section entered with PDMCritSectEnter().
419 *
420 * @param pCritSect The PDM critical section to leave.
421 */
422VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
423{
424 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
425 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
426 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
427 Assert(pCritSect->s.Core.cNestings >= 1);
428
429 /*
430 * Nested leave.
431 */
432 if (pCritSect->s.Core.cNestings > 1)
433 {
434 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
435 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
436 return;
437 }
438
439#ifdef IN_RING0
440# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
441 if (1) /* SUPSemEventSignal is safe */
442# else
443 if (ASMIntAreEnabled())
444# endif
445#endif
446#if defined(IN_RING3) || defined(IN_RING0)
447 {
448 /*
449 * Leave for real.
450 */
451 /* update members. */
452# ifdef IN_RING3
453 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
454 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
455# if defined(PDMCRITSECT_STRICT)
456 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
457 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec);
458# endif
459 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
460# endif
461 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
462 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
463 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
464
465 /* stop and decrement lockers. */
466 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
467 ASMCompilerBarrier();
468 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
469 {
470 /* Someone is waiting, wake up one of them. */
471 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
472 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
473 int rc = SUPSemEventSignal(pSession, hEvent);
474 AssertRC(rc);
475 }
476
477# ifdef IN_RING3
478 /* Signal exit event. */
479 if (hEventToSignal != NIL_RTSEMEVENT)
480 {
481 LogBird(("Signalling %#x\n", hEventToSignal));
482 int rc = RTSemEventSignal(hEventToSignal);
483 AssertRC(rc);
484 }
485# endif
486
487# if defined(DEBUG_bird) && defined(IN_RING0)
488 VMMTrashVolatileXMMRegs();
489# endif
490 }
491#endif /* IN_RING3 || IN_RING0 */
492#ifdef IN_RING0
493 else
494#endif
495#if defined(IN_RING0) || defined(IN_RC)
496 {
497 /*
498 * Try leave it.
499 */
500 if (pCritSect->s.Core.cLockers == 0)
501 {
502 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
503 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
504 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
505 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
506
507 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
508 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
509 return;
510
511 /* darn, someone raced in on us. */
512 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
513 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
514 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
515 }
516 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
517
518 /*
519 * Queue the request.
520 */
521 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
522 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
523 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
524 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
525 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
526 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
527 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
528 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
529 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
530 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
531 }
532#endif /* IN_RING0 || IN_RC */
533}
534
535
536#if defined(IN_RING3) || defined(IN_RING0)
537/**
538 * Process the critical sections queued for ring-3 'leave'.
539 *
540 * @param pVCpu The VMCPU handle.
541 */
542VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
543{
544 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
545
546 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
547 for (RTUINT i = 0; i < c; i++)
548 {
549# ifdef IN_RING3
550 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
551# else
552 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
553# endif
554
555 PDMCritSectLeave(pCritSect);
556 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
557 }
558
559 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
560 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
561}
562#endif /* IN_RING3 || IN_RING0 */
563
564
565/**
566 * Checks the caller is the owner of the critical section.
567 *
568 * @returns true if owner.
569 * @returns false if not owner.
570 * @param pCritSect The critical section.
571 */
572VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
573{
574#ifdef IN_RING3
575 return RTCritSectIsOwner(&pCritSect->s.Core);
576#else
577 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
578 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
579 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
580 return false;
581 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
582#endif
583}
584
585
586/**
587 * Checks the specified VCPU is the owner of the critical section.
588 *
589 * @returns true if owner.
590 * @returns false if not owner.
591 * @param pCritSect The critical section.
592 * @param idCpu VCPU id
593 */
594VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
595{
596#ifdef IN_RING3
597 NOREF(idCpu);
598 return RTCritSectIsOwner(&pCritSect->s.Core);
599#else
600 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
601 AssertPtr(pVM);
602 Assert(idCpu < pVM->cCpus);
603 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
604 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
605#endif
606}
607
608
609/**
610 * Checks if somebody currently owns the critical section.
611 *
612 * @returns true if locked.
613 * @returns false if not locked.
614 *
615 * @param pCritSect The critical section.
616 *
617 * @remarks This doesn't prove that no deadlocks will occur later on; it's
618 * just a debugging tool
619 */
620VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
621{
622 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
623 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
624}
625
626
627/**
628 * Checks if anyone is waiting on the critical section we own.
629 *
630 * @returns true if someone is waitings.
631 * @returns false if no one is waiting.
632 * @param pCritSect The critical section.
633 */
634VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
635{
636 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
637 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
638 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
639}
640
641
642/**
643 * Checks if a critical section is initialized or not.
644 *
645 * @returns true if initialized.
646 * @returns false if not initialized.
647 * @param pCritSect The critical section.
648 */
649VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
650{
651 return RTCritSectIsInitialized(&pCritSect->s.Core);
652}
653
654
655/**
656 * Gets the recursion depth.
657 *
658 * @returns The recursion depth.
659 * @param pCritSect The critical section.
660 */
661VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
662{
663 return RTCritSectGetRecursion(&pCritSect->s.Core);
664}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette