VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 30493

Last change on this file since 30493 was 30328, checked in by vboxsync, 14 years ago

More paranoia

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.5 KB
Line 
1/* $Id: PDMAllCritSect.cpp 30328 2010-06-21 12:55:14Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "../PDMInternal.h"
24#include <VBox/pdmcritsect.h>
25#include <VBox/mm.h>
26#include <VBox/vmm.h>
27#include <VBox/vm.h>
28#include <VBox/err.h>
29#include <VBox/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/** The number loops to spin for in ring-3. */
45#define PDMCRITSECT_SPIN_COUNT_R3 20
46/** The number loops to spin for in ring-0. */
47#define PDMCRITSECT_SPIN_COUNT_R0 256
48/** The number loops to spin for in the raw-mode context. */
49#define PDMCRITSECT_SPIN_COUNT_RC 256
50
51
52/* Undefine the automatic VBOX_STRICT API mappings. */
53#undef PDMCritSectEnter
54#undef PDMCritSectTryEnter
55
56
57/**
58 * Gets the ring-3 native thread handle of the calling thread.
59 *
60 * @returns native thread handle (ring-3).
61 * @param pCritSect The critical section. This is used in R0 and RC.
62 */
63DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
64{
65#ifdef IN_RING3
66 NOREF(pCritSect);
67 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
68#else
69 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
70 NIL_RTNATIVETHREAD);
71 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
72 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
73 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
74#endif
75 return hNativeSelf;
76}
77
78
79/**
80 * Tail code called when we've won the battle for the lock.
81 *
82 * @returns VINF_SUCCESS.
83 *
84 * @param pCritSect The critical section.
85 * @param hNativeSelf The native handle of this thread.
86 */
87DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
88{
89 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
90 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
91
92 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
93 Assert(pCritSect->s.Core.cNestings == 1);
94 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
95
96# ifdef PDMCRITSECT_STRICT
97 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
98# endif
99
100 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
101 return VINF_SUCCESS;
102}
103
104
105#ifdef IN_RING3
106/**
107 * Deals with the contended case in ring-3.
108 *
109 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
110 * @param pCritSect The critsect.
111 * @param hNativeSelf The native thread handle.
112 */
113static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
114{
115 /*
116 * Start waiting.
117 */
118 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
119 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
120 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
121
122 /*
123 * The wait loop.
124 */
125 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
126 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
127# ifdef PDMCRITSECT_STRICT
128 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
129 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
130 if (RT_FAILURE(rc2))
131 return rc2;
132# else
133 RTTHREAD hThreadSelf = RTThreadSelf();
134# endif
135 for (;;)
136 {
137# ifdef PDMCRITSECT_STRICT
138 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
139 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
140 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
141 if (RT_FAILURE(rc9))
142 return rc9;
143# else
144 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
145# endif
146 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
147 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
148
149 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
150 return VERR_SEM_DESTROYED;
151 if (rc == VINF_SUCCESS)
152 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
153 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
154 }
155 /* won't get here */
156}
157#endif /* IN_RING3 */
158
159
160/**
161 * Common worker for the debug and normal APIs.
162 *
163 * @returns VINF_SUCCESS if entered successfully.
164 * @returns rcBusy when encountering a busy critical section in GC/R0.
165 * @returns VERR_SEM_DESTROYED if the critical section is dead.
166 *
167 * @param pCritSect The PDM critical section to enter.
168 * @param rcBusy The status code to return when we're in GC or R0
169 * and the section is busy.
170 */
171DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
172{
173 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
174 Assert(pCritSect->s.Core.cNestings >= 0);
175
176 /*
177 * If the critical section has already been destroyed, then inform the caller.
178 */
179 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
180 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
181 VERR_SEM_DESTROYED);
182
183 /*
184 * See if we're lucky.
185 */
186 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
187 /* Not owned ... */
188 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
189 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
190
191 /* ... or nested. */
192 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
193 {
194 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
195 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
196 Assert(pCritSect->s.Core.cNestings > 1);
197 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
198 return VINF_SUCCESS;
199 }
200
201 /*
202 * Spin for a bit without incrementing the counter.
203 */
204 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
205 * cpu systems. */
206 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
207 while (cSpinsLeft-- > 0)
208 {
209 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
210 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
211 ASMNopPause();
212 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
213 cli'ed pendingpreemption check up front using sti w/ instruction fusing
214 for avoiding races. Hmm ... This is assuming the other party is actually
215 executing code on another CPU ... which we could keep track of if we
216 wanted. */
217 }
218
219#ifdef IN_RING3
220 /*
221 * Take the slow path.
222 */
223 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
224#else
225 /*
226 * Return busy.
227 */
228 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
229 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
230 return rcBusy;
231#endif
232}
233
234
235/**
236 * Enters a PDM critical section.
237 *
238 * @returns VINF_SUCCESS if entered successfully.
239 * @returns rcBusy when encountering a busy critical section in GC/R0.
240 * @returns VERR_SEM_DESTROYED if the critical section is dead.
241 *
242 * @param pCritSect The PDM critical section to enter.
243 * @param rcBusy The status code to return when we're in GC or R0
244 * and the section is busy.
245 */
246VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
247{
248#ifndef PDMCRITSECT_STRICT
249 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
250#else
251 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
252 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
253#endif
254}
255
256
257/**
258 * Enters a PDM critical section, with location information for debugging.
259 *
260 * @returns VINF_SUCCESS if entered successfully.
261 * @returns rcBusy when encountering a busy critical section in GC/R0.
262 * @returns VERR_SEM_DESTROYED if the critical section is dead.
263 *
264 * @param pCritSect The PDM critical section to enter.
265 * @param rcBusy The status code to return when we're in GC or R0
266 * and the section is busy.
267 * @param uId Some kind of locking location ID. Typically a
268 * return address up the stack. Optional (0).
269 * @param pszFile The file where the lock is being acquired from.
270 * Optional.
271 * @param iLine The line number in that file. Optional (0).
272 * @param pszFunction The functionn where the lock is being acquired
273 * from. Optional.
274 */
275VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
276{
277#ifdef PDMCRITSECT_STRICT
278 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
279 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
280#else
281 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
282#endif
283}
284
285
286/**
287 * Common worker for the debug and normal APIs.
288 *
289 * @retval VINF_SUCCESS on success.
290 * @retval VERR_SEM_BUSY if the critsect was owned.
291 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
292 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
293 *
294 * @param pCritSect The critical section.
295 */
296static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
297{
298 /*
299 * If the critical section has already been destroyed, then inform the caller.
300 */
301 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
302 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
303 VERR_SEM_DESTROYED);
304
305 /*
306 * See if we're lucky.
307 */
308 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
309 /* Not owned ... */
310 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
311 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
312
313 /* ... or nested. */
314 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
315 {
316 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
317 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
318 Assert(pCritSect->s.Core.cNestings > 1);
319 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
320 return VINF_SUCCESS;
321 }
322
323 /* no spinning */
324
325 /*
326 * Return busy.
327 */
328#ifdef IN_RING3
329 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
330#else
331 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
332#endif
333 LogFlow(("PDMCritSectTryEnter: locked\n"));
334 return VERR_SEM_BUSY;
335}
336
337
338/**
339 * Try enter a critical section.
340 *
341 * @retval VINF_SUCCESS on success.
342 * @retval VERR_SEM_BUSY if the critsect was owned.
343 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
344 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
345 *
346 * @param pCritSect The critical section.
347 */
348VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
349{
350#ifndef PDMCRITSECT_STRICT
351 return pdmCritSectTryEnter(pCritSect, NULL);
352#else
353 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
354 return pdmCritSectTryEnter(pCritSect, &SrcPos);
355#endif
356}
357
358
359/**
360 * Try enter a critical section, with location information for debugging.
361 *
362 * @retval VINF_SUCCESS on success.
363 * @retval VERR_SEM_BUSY if the critsect was owned.
364 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
365 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
366 *
367 * @param pCritSect The critical section.
368 * @param uId Some kind of locking location ID. Typically a
369 * return address up the stack. Optional (0).
370 * @param pszFile The file where the lock is being acquired from.
371 * Optional.
372 * @param iLine The line number in that file. Optional (0).
373 * @param pszFunction The functionn where the lock is being acquired
374 * from. Optional.
375 */
376VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
377{
378#ifdef PDMCRITSECT_STRICT
379 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
380 return pdmCritSectTryEnter(pCritSect, &SrcPos);
381#else
382 return pdmCritSectTryEnter(pCritSect, NULL);
383#endif
384}
385
386
387#ifdef IN_RING3
388/**
389 * Enters a PDM critical section.
390 *
391 * @returns VINF_SUCCESS if entered successfully.
392 * @returns rcBusy when encountering a busy critical section in GC/R0.
393 * @returns VERR_SEM_DESTROYED if the critical section is dead.
394 *
395 * @param pCritSect The PDM critical section to enter.
396 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
397 */
398VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
399{
400 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
401 if ( rc == VINF_SUCCESS
402 && fCallRing3
403 && pCritSect->s.Core.pValidatorRec
404 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
405 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
406 return rc;
407}
408#endif /* IN_RING3 */
409
410
411/**
412 * Leaves a critical section entered with PDMCritSectEnter().
413 *
414 * @param pCritSect The PDM critical section to leave.
415 */
416VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
417{
418 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
419 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
420 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
421 Assert(pCritSect->s.Core.cNestings >= 1);
422
423 /*
424 * Nested leave.
425 */
426 if (pCritSect->s.Core.cNestings > 1)
427 {
428 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
429 Assert(pCritSect->s.Core.cNestings >= 1);
430 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
431 return;
432 }
433
434#ifdef IN_RING0
435# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
436 if (1) /* SUPSemEventSignal is safe */
437# else
438 if (ASMIntAreEnabled())
439# endif
440#endif
441#if defined(IN_RING3) || defined(IN_RING0)
442 {
443 /*
444 * Leave for real.
445 */
446 /* update members. */
447# ifdef IN_RING3
448 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
449 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
450# if defined(PDMCRITSECT_STRICT)
451 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
452 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
453# endif
454 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
455# endif
456 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
457 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
458 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
459 Assert(pCritSect->s.Core.cNestings == 0);
460
461 /* stop and decrement lockers. */
462 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
463 ASMCompilerBarrier();
464 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
465 {
466 /* Someone is waiting, wake up one of them. */
467 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
468 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
469 int rc = SUPSemEventSignal(pSession, hEvent);
470 AssertRC(rc);
471 }
472
473# ifdef IN_RING3
474 /* Signal exit event. */
475 if (hEventToSignal != NIL_RTSEMEVENT)
476 {
477 LogBird(("Signalling %#x\n", hEventToSignal));
478 int rc = RTSemEventSignal(hEventToSignal);
479 AssertRC(rc);
480 }
481# endif
482
483# if defined(DEBUG_bird) && defined(IN_RING0)
484 VMMTrashVolatileXMMRegs();
485# endif
486 }
487#endif /* IN_RING3 || IN_RING0 */
488#ifdef IN_RING0
489 else
490#endif
491#if defined(IN_RING0) || defined(IN_RC)
492 {
493 /*
494 * Try leave it.
495 */
496 if (pCritSect->s.Core.cLockers == 0)
497 {
498 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
499 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
500 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
501 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
502
503 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
504 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
505 return;
506
507 /* darn, someone raced in on us. */
508 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
509 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
510 Assert(pCritSect->s.Core.cNestings == 0);
511 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
512 }
513 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
514
515 /*
516 * Queue the request.
517 */
518 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
519 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
520 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
521 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
522 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
523 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
524 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
525 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
526 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
527 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
528 }
529#endif /* IN_RING0 || IN_RC */
530}
531
532
533#if defined(IN_RING3) || defined(IN_RING0)
534/**
535 * Process the critical sections queued for ring-3 'leave'.
536 *
537 * @param pVCpu The VMCPU handle.
538 */
539VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
540{
541 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
542
543 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
544 for (RTUINT i = 0; i < c; i++)
545 {
546# ifdef IN_RING3
547 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
548# else
549 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
550# endif
551
552 PDMCritSectLeave(pCritSect);
553 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
554 }
555
556 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
557 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
558}
559#endif /* IN_RING3 || IN_RING0 */
560
561
562/**
563 * Checks the caller is the owner of the critical section.
564 *
565 * @returns true if owner.
566 * @returns false if not owner.
567 * @param pCritSect The critical section.
568 */
569VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
570{
571#ifdef IN_RING3
572 return RTCritSectIsOwner(&pCritSect->s.Core);
573#else
574 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
575 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
576 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
577 return false;
578 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
579#endif
580}
581
582
583/**
584 * Checks the specified VCPU is the owner of the critical section.
585 *
586 * @returns true if owner.
587 * @returns false if not owner.
588 * @param pCritSect The critical section.
589 * @param idCpu VCPU id
590 */
591VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
592{
593#ifdef IN_RING3
594 NOREF(idCpu);
595 return RTCritSectIsOwner(&pCritSect->s.Core);
596#else
597 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
598 AssertPtr(pVM);
599 Assert(idCpu < pVM->cCpus);
600 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
601 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
602#endif
603}
604
605
606/**
607 * Checks if somebody currently owns the critical section.
608 *
609 * @returns true if locked.
610 * @returns false if not locked.
611 *
612 * @param pCritSect The critical section.
613 *
614 * @remarks This doesn't prove that no deadlocks will occur later on; it's
615 * just a debugging tool
616 */
617VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
618{
619 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
620 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
621}
622
623
624/**
625 * Checks if anyone is waiting on the critical section we own.
626 *
627 * @returns true if someone is waitings.
628 * @returns false if no one is waiting.
629 * @param pCritSect The critical section.
630 */
631VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
632{
633 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
634 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
635 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
636}
637
638
639/**
640 * Checks if a critical section is initialized or not.
641 *
642 * @returns true if initialized.
643 * @returns false if not initialized.
644 * @param pCritSect The critical section.
645 */
646VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
647{
648 return RTCritSectIsInitialized(&pCritSect->s.Core);
649}
650
651
652/**
653 * Gets the recursion depth.
654 *
655 * @returns The recursion depth.
656 * @param pCritSect The critical section.
657 */
658VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
659{
660 return RTCritSectGetRecursion(&pCritSect->s.Core);
661}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette