VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 37529

Last change on this file since 37529 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.1 KB
Line 
1/* $Id: PDMAllCritSect.cpp 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# endif
102
103 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
104 return VINF_SUCCESS;
105}
106
107
108#if defined(IN_RING3) || defined(IN_RING0)
109/**
110 * Deals with the contended case in ring-3 and ring-0.
111 *
112 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
113 * @param pCritSect The critsect.
114 * @param hNativeSelf The native thread handle.
115 */
116static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
117{
118 /*
119 * Start waiting.
120 */
121 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
122 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
123# ifdef IN_RING3
124 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
125# else
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
127# endif
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef IN_RING3
135# ifdef PDMCRITSECT_STRICT
136 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
137 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
138 if (RT_FAILURE(rc2))
139 return rc2;
140# else
141 RTTHREAD hThreadSelf = RTThreadSelf();
142# endif
143# endif
144 for (;;)
145 {
146# ifdef PDMCRITSECT_STRICT
147 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
148 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
149 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
150 if (RT_FAILURE(rc9))
151 return rc9;
152# elif defined(IN_RING3)
153 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
154# endif
155 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
156# ifdef IN_RING3
157 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
158# endif
159
160 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
161 return VERR_SEM_DESTROYED;
162 if (rc == VINF_SUCCESS)
163 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
164 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
165 }
166 /* won't get here */
167}
168#endif /* IN_RING3 || IN_RING0 */
169
170
171/**
172 * Common worker for the debug and normal APIs.
173 *
174 * @returns VINF_SUCCESS if entered successfully.
175 * @returns rcBusy when encountering a busy critical section in GC/R0.
176 * @returns VERR_SEM_DESTROYED if the critical section is dead.
177 *
178 * @param pCritSect The PDM critical section to enter.
179 * @param rcBusy The status code to return when we're in GC or R0
180 * and the section is busy.
181 */
182DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
183{
184 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
185 Assert(pCritSect->s.Core.cNestings >= 0);
186
187 /*
188 * If the critical section has already been destroyed, then inform the caller.
189 */
190 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
191 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
192 VERR_SEM_DESTROYED);
193
194 /*
195 * See if we're lucky.
196 */
197 /* NOP ... */
198 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
199 return VINF_SUCCESS;
200
201 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
202 /* ... not owned ... */
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
205
206 /* ... or nested. */
207 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
208 {
209 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
210 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
211 Assert(pCritSect->s.Core.cNestings > 1);
212 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
213 return VINF_SUCCESS;
214 }
215
216 /*
217 * Spin for a bit without incrementing the counter.
218 */
219 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
220 * cpu systems. */
221 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
222 while (cSpinsLeft-- > 0)
223 {
224 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
225 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
226 ASMNopPause();
227 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
228 cli'ed pendingpreemption check up front using sti w/ instruction fusing
229 for avoiding races. Hmm ... This is assuming the other party is actually
230 executing code on another CPU ... which we could keep track of if we
231 wanted. */
232 }
233
234#ifdef IN_RING3
235 /*
236 * Take the slow path.
237 */
238 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
239
240#else
241# ifdef IN_RING0
242 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
243 * and would be better off switching out of that while waiting for
244 * the lock. Several of the locks jumps back to ring-3 just to
245 * get the lock, the ring-3 code will then call the kernel to do
246 * the lock wait and when the call return it will call ring-0
247 * again and resume via in setjmp style. Not very efficient. */
248# if 0
249 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
250 * callers not prepared for longjmp/blocking to
251 * use PDMCritSectTryEnter. */
252 {
253 /*
254 * Leave HWACCM context while waiting if necessary.
255 */
256 int rc;
257 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
258 {
259 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
260 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
261 }
262 else
263 {
264 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
265 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
266 PVMCPU pVCpu = VMMGetCpu(pVM);
267 HWACCMR0Leave(pVM, pVCpu);
268 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
269
270 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
271
272 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
273 HWACCMR0Enter(pVM, pVCpu);
274 }
275 return rc;
276 }
277# else
278 /*
279 * We preemption hasn't been disabled, we can block here in ring-0.
280 */
281 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
282 && ASMIntAreEnabled())
283 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
284# endif
285#endif /* IN_RING0 */
286
287 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
288
289 /*
290 * Call ring-3 to acquire the critical section?
291 */
292 if (rcBusy == VINF_SUCCESS)
293 {
294 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
295 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
296 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
297 }
298
299 /*
300 * Return busy.
301 */
302 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
303 return rcBusy;
304#endif /* !IN_RING3 */
305}
306
307
308/**
309 * Enters a PDM critical section.
310 *
311 * @returns VINF_SUCCESS if entered successfully.
312 * @returns rcBusy when encountering a busy critical section in GC/R0.
313 * @returns VERR_SEM_DESTROYED if the critical section is dead.
314 *
315 * @param pCritSect The PDM critical section to enter.
316 * @param rcBusy The status code to return when we're in GC or R0
317 * and the section is busy. Pass VINF_SUCCESS to
318 * acquired the critical section thru a ring-3
319 * call if necessary.
320 */
321VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
322{
323 int rc;
324#ifndef IN_RING3
325 if (rcBusy == VINF_SUCCESS)
326 {
327# ifndef PDMCRITSECT_STRICT
328 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, NULL);
329# else
330 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
331 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, &SrcPos);
332# endif
333 if (rc == VERR_SEM_BUSY)
334 {
335
336 }
337 }
338 else
339#endif /* !IN_RING3 */
340 {
341#ifndef PDMCRITSECT_STRICT
342 rc = pdmCritSectEnter(pCritSect, rcBusy, NULL);
343#else
344 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
345 rc = pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
346#endif
347 }
348 return rc;
349}
350
351
352/**
353 * Enters a PDM critical section, with location information for debugging.
354 *
355 * @returns VINF_SUCCESS if entered successfully.
356 * @returns rcBusy when encountering a busy critical section in GC/R0.
357 * @returns VERR_SEM_DESTROYED if the critical section is dead.
358 *
359 * @param pCritSect The PDM critical section to enter.
360 * @param rcBusy The status code to return when we're in GC or R0
361 * and the section is busy. Pass VINF_SUCCESS to
362 * acquired the critical section thru a ring-3
363 * call if necessary.
364 * @param uId Some kind of locking location ID. Typically a
365 * return address up the stack. Optional (0).
366 * @param pszFile The file where the lock is being acquired from.
367 * Optional.
368 * @param iLine The line number in that file. Optional (0).
369 * @param pszFunction The function where the lock is being acquired
370 * from. Optional.
371 */
372VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
373{
374#ifdef PDMCRITSECT_STRICT
375 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
376 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
377#else
378 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
379#endif
380}
381
382
383/**
384 * Common worker for the debug and normal APIs.
385 *
386 * @retval VINF_SUCCESS on success.
387 * @retval VERR_SEM_BUSY if the critsect was owned.
388 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
389 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
390 *
391 * @param pCritSect The critical section.
392 */
393static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
394{
395 /*
396 * If the critical section has already been destroyed, then inform the caller.
397 */
398 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
399 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
400 VERR_SEM_DESTROYED);
401
402 /*
403 * See if we're lucky.
404 */
405 /* NOP ... */
406 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
407 return VINF_SUCCESS;
408
409 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
410 /* ... not owned ... */
411 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
412 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
413
414 /* ... or nested. */
415 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
416 {
417 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
418 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
419 Assert(pCritSect->s.Core.cNestings > 1);
420 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
421 return VINF_SUCCESS;
422 }
423
424 /* no spinning */
425
426 /*
427 * Return busy.
428 */
429#ifdef IN_RING3
430 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
431#else
432 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
433#endif
434 LogFlow(("PDMCritSectTryEnter: locked\n"));
435 return VERR_SEM_BUSY;
436}
437
438
439/**
440 * Try enter a critical section.
441 *
442 * @retval VINF_SUCCESS on success.
443 * @retval VERR_SEM_BUSY if the critsect was owned.
444 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
445 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
446 *
447 * @param pCritSect The critical section.
448 */
449VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
450{
451#ifndef PDMCRITSECT_STRICT
452 return pdmCritSectTryEnter(pCritSect, NULL);
453#else
454 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
455 return pdmCritSectTryEnter(pCritSect, &SrcPos);
456#endif
457}
458
459
460/**
461 * Try enter a critical section, with location information for debugging.
462 *
463 * @retval VINF_SUCCESS on success.
464 * @retval VERR_SEM_BUSY if the critsect was owned.
465 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
466 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
467 *
468 * @param pCritSect The critical section.
469 * @param uId Some kind of locking location ID. Typically a
470 * return address up the stack. Optional (0).
471 * @param pszFile The file where the lock is being acquired from.
472 * Optional.
473 * @param iLine The line number in that file. Optional (0).
474 * @param pszFunction The function where the lock is being acquired
475 * from. Optional.
476 */
477VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
478{
479#ifdef PDMCRITSECT_STRICT
480 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
481 return pdmCritSectTryEnter(pCritSect, &SrcPos);
482#else
483 return pdmCritSectTryEnter(pCritSect, NULL);
484#endif
485}
486
487
488#ifdef IN_RING3
489/**
490 * Enters a PDM critical section.
491 *
492 * @returns VINF_SUCCESS if entered successfully.
493 * @returns rcBusy when encountering a busy critical section in GC/R0.
494 * @returns VERR_SEM_DESTROYED if the critical section is dead.
495 *
496 * @param pCritSect The PDM critical section to enter.
497 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
498 */
499VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
500{
501 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
502 if ( rc == VINF_SUCCESS
503 && fCallRing3
504 && pCritSect->s.Core.pValidatorRec
505 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
506 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
507 return rc;
508}
509#endif /* IN_RING3 */
510
511
512/**
513 * Leaves a critical section entered with PDMCritSectEnter().
514 *
515 * @param pCritSect The PDM critical section to leave.
516 */
517VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
518{
519 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
520 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
521
522 /* Check for NOP sections before asserting ownership. */
523 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
524 return;
525
526 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
527 Assert(pCritSect->s.Core.cNestings >= 1);
528
529 /*
530 * Nested leave.
531 */
532 if (pCritSect->s.Core.cNestings > 1)
533 {
534 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
535 Assert(pCritSect->s.Core.cNestings >= 1);
536 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
537 Assert(pCritSect->s.Core.cLockers >= 0);
538 return;
539 }
540
541#ifdef IN_RING0
542# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
543 if (1) /* SUPSemEventSignal is safe */
544# else
545 if (ASMIntAreEnabled())
546# endif
547#endif
548#if defined(IN_RING3) || defined(IN_RING0)
549 {
550 /*
551 * Leave for real.
552 */
553 /* update members. */
554# ifdef IN_RING3
555 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
556 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
557# if defined(PDMCRITSECT_STRICT)
558 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
559 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
560# endif
561 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
562# endif
563 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
564 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
565 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
566 Assert(pCritSect->s.Core.cNestings == 0);
567
568 /* stop and decrement lockers. */
569 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
570 ASMCompilerBarrier();
571 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
572 {
573 /* Someone is waiting, wake up one of them. */
574 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
575 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
576 int rc = SUPSemEventSignal(pSession, hEvent);
577 AssertRC(rc);
578 }
579
580# ifdef IN_RING3
581 /* Signal exit event. */
582 if (hEventToSignal != NIL_RTSEMEVENT)
583 {
584 LogBird(("Signalling %#x\n", hEventToSignal));
585 int rc = RTSemEventSignal(hEventToSignal);
586 AssertRC(rc);
587 }
588# endif
589
590# if defined(DEBUG_bird) && defined(IN_RING0)
591 VMMTrashVolatileXMMRegs();
592# endif
593 }
594#endif /* IN_RING3 || IN_RING0 */
595#ifdef IN_RING0
596 else
597#endif
598#if defined(IN_RING0) || defined(IN_RC)
599 {
600 /*
601 * Try leave it.
602 */
603 if (pCritSect->s.Core.cLockers == 0)
604 {
605 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
606 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
607 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
608 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
609
610 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
611 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
612 return;
613
614 /* darn, someone raced in on us. */
615 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
616 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
617 Assert(pCritSect->s.Core.cNestings == 0);
618 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
619 }
620 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
621
622 /*
623 * Queue the request.
624 */
625 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
626 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
627 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
628 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
629 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
630 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
631 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
632 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
633 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
634 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
635 }
636#endif /* IN_RING0 || IN_RC */
637}
638
639
640#if defined(IN_RING3) || defined(IN_RING0)
641/**
642 * Process the critical sections queued for ring-3 'leave'.
643 *
644 * @param pVCpu The VMCPU handle.
645 */
646VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
647{
648 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
649
650 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
651 for (RTUINT i = 0; i < c; i++)
652 {
653# ifdef IN_RING3
654 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
655# else
656 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
657# endif
658
659 PDMCritSectLeave(pCritSect);
660 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
661 }
662
663 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
665}
666#endif /* IN_RING3 || IN_RING0 */
667
668
669/**
670 * Checks the caller is the owner of the critical section.
671 *
672 * @returns true if owner.
673 * @returns false if not owner.
674 * @param pCritSect The critical section.
675 */
676VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
677{
678#ifdef IN_RING3
679 return RTCritSectIsOwner(&pCritSect->s.Core);
680#else
681 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
682 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
683 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
684 return false;
685 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
686#endif
687}
688
689
690/**
691 * Checks the specified VCPU is the owner of the critical section.
692 *
693 * @returns true if owner.
694 * @returns false if not owner.
695 * @param pCritSect The critical section.
696 * @param idCpu VCPU id
697 */
698VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
699{
700#ifdef IN_RING3
701 NOREF(idCpu);
702 return RTCritSectIsOwner(&pCritSect->s.Core);
703#else
704 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
705 AssertPtr(pVM);
706 Assert(idCpu < pVM->cCpus);
707 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
708 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
709#endif
710}
711
712
713/**
714 * Checks if somebody currently owns the critical section.
715 *
716 * @returns true if locked.
717 * @returns false if not locked.
718 *
719 * @param pCritSect The critical section.
720 *
721 * @remarks This doesn't prove that no deadlocks will occur later on; it's
722 * just a debugging tool
723 */
724VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
725{
726 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
727 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
728}
729
730
731/**
732 * Checks if anyone is waiting on the critical section we own.
733 *
734 * @returns true if someone is waiting.
735 * @returns false if no one is waiting.
736 * @param pCritSect The critical section.
737 */
738VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
739{
740 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
741 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
742 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
743}
744
745
746/**
747 * Checks if a critical section is initialized or not.
748 *
749 * @returns true if initialized.
750 * @returns false if not initialized.
751 * @param pCritSect The critical section.
752 */
753VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
754{
755 return RTCritSectIsInitialized(&pCritSect->s.Core);
756}
757
758
759/**
760 * Gets the recursion depth.
761 *
762 * @returns The recursion depth.
763 * @param pCritSect The critical section.
764 */
765VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
766{
767 return RTCritSectGetRecursion(&pCritSect->s.Core);
768}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette