VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 90329

Last change on this file since 90329 was 90329, checked in by vboxsync, 3 years ago

PDMDrvHlp: Put the PDMCritSect API into driver helpers just like we've got for devices. bugref:10074

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.0 KB
Line 
1/* $Id: PDMAllCritSect.cpp 90329 2021-07-26 12:47:26Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/** Skips some of the overly paranoid atomic updates.
56 * Makes some assumptions about cache coherence, though not brave enough not to
57 * always end with an atomic update. */
58#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
59
60/* Undefine the automatic VBOX_STRICT API mappings. */
61#undef PDMCritSectEnter
62#undef PDMCritSectTryEnter
63
64
65/**
66 * Gets the ring-3 native thread handle of the calling thread.
67 *
68 * @returns native thread handle (ring-3).
69 * @param pCritSect The critical section. This is used in R0 and RC.
70 */
71DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
72{
73#ifdef IN_RING3
74 NOREF(pCritSect);
75 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
76#else
77 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
78 NIL_RTNATIVETHREAD);
79 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
80 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
81 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
82#endif
83 return hNativeSelf;
84}
85
86
87/**
88 * Tail code called when we've won the battle for the lock.
89 *
90 * @returns VINF_SUCCESS.
91 *
92 * @param pCritSect The critical section.
93 * @param hNativeSelf The native handle of this thread.
94 * @param pSrcPos The source position of the lock operation.
95 */
96DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
97{
98 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
99 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
100
101# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
102 pCritSect->s.Core.cNestings = 1;
103# else
104 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
105# endif
106 Assert(pCritSect->s.Core.cNestings == 1);
107 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
108
109# ifdef PDMCRITSECT_STRICT
110 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
111# else
112 NOREF(pSrcPos);
113# endif
114
115 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
116 return VINF_SUCCESS;
117}
118
119
120#if defined(IN_RING3) || defined(IN_RING0)
121/**
122 * Deals with the contended case in ring-3 and ring-0.
123 *
124 * @retval VINF_SUCCESS on success.
125 * @retval VERR_SEM_DESTROYED if destroyed.
126 *
127 * @param pCritSect The critsect.
128 * @param hNativeSelf The native thread handle.
129 * @param pSrcPos The source position of the lock operation.
130 */
131static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
132{
133 /*
134 * Start waiting.
135 */
136 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
137 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
138# ifdef IN_RING3
139 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
140# else
141 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
142# endif
143
144 /*
145 * The wait loop.
146 */
147 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
148 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
149# ifdef IN_RING3
150# ifdef PDMCRITSECT_STRICT
151 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
152 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
153 if (RT_FAILURE(rc2))
154 return rc2;
155# else
156 RTTHREAD hThreadSelf = RTThreadSelf();
157# endif
158# endif
159 for (;;)
160 {
161 /*
162 * Do the wait.
163 *
164 * In ring-3 this gets cluttered by lock validation and thread state
165 * maintainence.
166 *
167 * In ring-0 we have to deal with the possibility that the thread has
168 * been signalled and the interruptible wait function returning
169 * immediately. In that case we do normal R0/RC rcBusy handling.
170 */
171# ifdef IN_RING3
172# ifdef PDMCRITSECT_STRICT
173 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
174 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
175 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
176 if (RT_FAILURE(rc9))
177 return rc9;
178# else
179 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
180# endif
181 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
182 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
183# else /* IN_RING0 */
184 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
185# endif /* IN_RING0 */
186
187 /*
188 * Deal with the return code and critsect destruction.
189 */
190 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
191 return VERR_SEM_DESTROYED;
192 if (rc == VINF_SUCCESS)
193 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
194 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
195
196# ifdef IN_RING0
197 /* Something is pending (signal, APC, debugger, whatever), just go back
198 to ring-3 so the kernel can deal with it when leaving kernel context.
199
200 Note! We've incremented cLockers already and cannot safely decrement
201 it without creating a race with PDMCritSectLeave, resulting in
202 spurious wakeups. */
203 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
204 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
205 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
206 AssertRC(rc);
207# endif
208 }
209 /* won't get here */
210}
211#endif /* IN_RING3 || IN_RING0 */
212
213
214/**
215 * Common worker for the debug and normal APIs.
216 *
217 * @returns VINF_SUCCESS if entered successfully.
218 * @returns rcBusy when encountering a busy critical section in GC/R0.
219 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
220 * during the operation.
221 *
222 * @param pCritSect The PDM critical section to enter.
223 * @param rcBusy The status code to return when we're in GC or R0
224 * @param pSrcPos The source position of the lock operation.
225 */
226DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
227{
228 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
229 Assert(pCritSect->s.Core.cNestings >= 0);
230
231 /*
232 * If the critical section has already been destroyed, then inform the caller.
233 */
234 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
235 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
236 VERR_SEM_DESTROYED);
237
238 /*
239 * See if we're lucky.
240 */
241 /* NOP ... */
242 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
243 { /* We're more likely to end up here with real critsects than a NOP one. */ }
244 else
245 return VINF_SUCCESS;
246
247 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
248 /* ... not owned ... */
249 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
250 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
251
252 /* ... or nested. */
253 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
254 {
255 Assert(pCritSect->s.Core.cNestings >= 1);
256# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
257 pCritSect->s.Core.cNestings += 1;
258# else
259 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
260# endif
261 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
262 return VINF_SUCCESS;
263 }
264
265 /*
266 * Spin for a bit without incrementing the counter.
267 */
268 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
269 * cpu systems. */
270 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
271 while (cSpinsLeft-- > 0)
272 {
273 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
274 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
275 ASMNopPause();
276 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
277 cli'ed pendingpreemption check up front using sti w/ instruction fusing
278 for avoiding races. Hmm ... This is assuming the other party is actually
279 executing code on another CPU ... which we could keep track of if we
280 wanted. */
281 }
282
283#ifdef IN_RING3
284 /*
285 * Take the slow path.
286 */
287 NOREF(rcBusy);
288 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
289
290#else
291# ifdef IN_RING0
292 /*
293 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
294 * account when waiting on contended locks.
295 *
296 * While we usually (it can be VINF_SUCCESS) have to option via the rcBusy
297 * parameter of going to back to ring-3 and to re-start the work there, it's
298 * almost always more efficient to try wait for the lock here. The rcBusy
299 * will be used if we encounter an VERR_INTERRUPTED situation though.
300 *
301 * We must never block if VMMRZCallRing3Disable is active.
302 */
303
304 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
305 * and would be better off switching out of that while waiting for
306 * the lock. Several of the locks jumps back to ring-3 just to
307 * get the lock, the ring-3 code will then call the kernel to do
308 * the lock wait and when the call return it will call ring-0
309 * again and resume via in setjmp style. Not very efficient. */
310# if 0
311 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
312 * callers not prepared for longjmp/blocking to
313 * use PDMCritSectTryEnter. */
314 {
315 /*
316 * Leave HM context while waiting if necessary.
317 */
318 int rc;
319 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
320 {
321 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
322 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
323 }
324 else
325 {
326 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
327 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM);
328 PVMCPUCC pVCpu = VMMGetCpu(pVM);
329 HMR0Leave(pVM, pVCpu);
330 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
331
332 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
333
334 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
335 HMR0Enter(pVM, pVCpu);
336 }
337 return rc;
338 }
339# else
340 /*
341 * We preemption hasn't been disabled, we can block here in ring-0.
342 */
343 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
344 && ASMIntAreEnabled())
345 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
346# endif
347# endif /* IN_RING0 */
348
349 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
350
351 /*
352 * Call ring-3 to acquire the critical section?
353 */
354 if (rcBusy == VINF_SUCCESS)
355 {
356 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
357 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
358 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
359 }
360
361 /*
362 * Return busy.
363 */
364 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
365 return rcBusy;
366#endif /* !IN_RING3 */
367}
368
369
370/**
371 * Enters a PDM critical section.
372 *
373 * @returns VINF_SUCCESS if entered successfully.
374 * @returns rcBusy when encountering a busy critical section in RC/R0.
375 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
376 * during the operation.
377 *
378 * @param pCritSect The PDM critical section to enter.
379 * @param rcBusy The status code to return when we're in RC or R0
380 * and the section is busy. Pass VINF_SUCCESS to
381 * acquired the critical section thru a ring-3
382 * call if necessary.
383 */
384VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
385{
386#ifndef PDMCRITSECT_STRICT
387 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
388#else
389 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
390 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
391#endif
392}
393
394
395/**
396 * Enters a PDM critical section, with location information for debugging.
397 *
398 * @returns VINF_SUCCESS if entered successfully.
399 * @returns rcBusy when encountering a busy critical section in RC/R0.
400 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
401 * during the operation.
402 *
403 * @param pCritSect The PDM critical section to enter.
404 * @param rcBusy The status code to return when we're in RC or R0
405 * and the section is busy. Pass VINF_SUCCESS to
406 * acquired the critical section thru a ring-3
407 * call if necessary.
408 * @param uId Some kind of locking location ID. Typically a
409 * return address up the stack. Optional (0).
410 * @param SRC_POS The source position where to lock is being
411 * acquired from. Optional.
412 */
413VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
414{
415#ifdef PDMCRITSECT_STRICT
416 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
417 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
418#else
419 NOREF(uId); RT_SRC_POS_NOREF();
420 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
421#endif
422}
423
424
425/**
426 * Common worker for the debug and normal APIs.
427 *
428 * @retval VINF_SUCCESS on success.
429 * @retval VERR_SEM_BUSY if the critsect was owned.
430 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
431 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
432 * during the operation.
433 *
434 * @param pCritSect The critical section.
435 * @param pSrcPos The source position of the lock operation.
436 */
437static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
438{
439 /*
440 * If the critical section has already been destroyed, then inform the caller.
441 */
442 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
443 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
444 VERR_SEM_DESTROYED);
445
446 /*
447 * See if we're lucky.
448 */
449 /* NOP ... */
450 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
451 { /* We're more likely to end up here with real critsects than a NOP one. */ }
452 else
453 return VINF_SUCCESS;
454
455 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
456 /* ... not owned ... */
457 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
458 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
459
460 /* ... or nested. */
461 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
462 {
463 Assert(pCritSect->s.Core.cNestings >= 1);
464# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
465 pCritSect->s.Core.cNestings += 1;
466# else
467 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
468# endif
469 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
470 return VINF_SUCCESS;
471 }
472
473 /* no spinning */
474
475 /*
476 * Return busy.
477 */
478#ifdef IN_RING3
479 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
480#else
481 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
482#endif
483 LogFlow(("PDMCritSectTryEnter: locked\n"));
484 return VERR_SEM_BUSY;
485}
486
487
488/**
489 * Try enter a critical section.
490 *
491 * @retval VINF_SUCCESS on success.
492 * @retval VERR_SEM_BUSY if the critsect was owned.
493 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
494 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
495 * during the operation.
496 *
497 * @param pCritSect The critical section.
498 */
499VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
500{
501#ifndef PDMCRITSECT_STRICT
502 return pdmCritSectTryEnter(pCritSect, NULL);
503#else
504 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
505 return pdmCritSectTryEnter(pCritSect, &SrcPos);
506#endif
507}
508
509
510/**
511 * Try enter a critical section, with location information for debugging.
512 *
513 * @retval VINF_SUCCESS on success.
514 * @retval VERR_SEM_BUSY if the critsect was owned.
515 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
516 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
517 * during the operation.
518 *
519 * @param pCritSect The critical section.
520 * @param uId Some kind of locking location ID. Typically a
521 * return address up the stack. Optional (0).
522 * @param SRC_POS The source position where to lock is being
523 * acquired from. Optional.
524 */
525VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
526{
527#ifdef PDMCRITSECT_STRICT
528 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
529 return pdmCritSectTryEnter(pCritSect, &SrcPos);
530#else
531 NOREF(uId); RT_SRC_POS_NOREF();
532 return pdmCritSectTryEnter(pCritSect, NULL);
533#endif
534}
535
536
537#ifdef IN_RING3
538/**
539 * Enters a PDM critical section.
540 *
541 * @returns VINF_SUCCESS if entered successfully.
542 * @returns rcBusy when encountering a busy critical section in GC/R0.
543 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
544 * during the operation.
545 *
546 * @param pCritSect The PDM critical section to enter.
547 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
548 */
549VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
550{
551 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
552 if ( rc == VINF_SUCCESS
553 && fCallRing3
554 && pCritSect->s.Core.pValidatorRec
555 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
556 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
557 return rc;
558}
559#endif /* IN_RING3 */
560
561
562/**
563 * Leaves a critical section entered with PDMCritSectEnter().
564 *
565 * @returns Indication whether we really exited the critical section.
566 * @retval VINF_SUCCESS if we really exited.
567 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
568 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
569 *
570 * @param pCritSect The PDM critical section to leave.
571 */
572VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
573{
574 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
575 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
576
577 /* Check for NOP sections before asserting ownership. */
578 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
579 { /* We're more likely to end up here with real critsects than a NOP one. */ }
580 else
581 return VINF_SUCCESS;
582
583 /*
584 * Always check that the caller is the owner (screw performance).
585 */
586 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
587 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
588 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
589 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
590 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
591 VERR_NOT_OWNER);
592
593 /*
594 * Nested leave.
595 */
596 int32_t const cNestings = pCritSect->s.Core.cNestings;
597 Assert(cNestings >= 1);
598 if (cNestings > 1)
599 {
600# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
601 pCritSect->s.Core.cNestings = cNestings - 1;
602# else
603 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
604# endif
605 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
606 Assert(pCritSect->s.Core.cLockers >= 0);
607 return VINF_SEM_NESTED;
608 }
609
610#ifdef IN_RING0
611# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
612 if (1) /* SUPSemEventSignal is safe */
613# else
614 if (ASMIntAreEnabled())
615# endif
616#endif
617#if defined(IN_RING3) || defined(IN_RING0)
618 {
619 /*
620 * Leave for real.
621 */
622 /* update members. */
623 SUPSEMEVENT hEventToSignal = pCritSect->s.hEventToSignal;
624 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
625# ifdef IN_RING3
626# if defined(PDMCRITSECT_STRICT)
627 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
628 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
629# endif
630 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
631# endif
632# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
633 //pCritSect->s.Core.cNestings = 0; /* not really needed */
634 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
635# else
636 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
637 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
638# endif
639 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
640
641 /* stop and decrement lockers. */
642 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
643 ASMCompilerBarrier();
644 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) < 0)
645 { /* hopefully likely */ }
646 else
647 {
648 /* Someone is waiting, wake up one of them. */
649 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
650 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
651 int rc = SUPSemEventSignal(pSession, hEvent);
652 AssertRC(rc);
653 }
654
655 /* Signal exit event. */
656 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
657 { /* likely */ }
658 else
659 {
660 Log8(("Signalling %#p\n", hEventToSignal));
661 int rc = SUPSemEventSignal(pCritSect->s.CTX_SUFF(pVM)->pSession, hEventToSignal);
662 AssertRC(rc);
663 }
664
665# if defined(DEBUG_bird) && defined(IN_RING0)
666 VMMTrashVolatileXMMRegs();
667# endif
668 }
669#endif /* IN_RING3 || IN_RING0 */
670#ifdef IN_RING0
671 else
672#endif
673#if defined(IN_RING0) || defined(IN_RC)
674 {
675 /*
676 * Try leave it.
677 */
678 if (pCritSect->s.Core.cLockers == 0)
679 {
680# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
681 //pCritSect->s.Core.cNestings = 0; /* not really needed */
682# else
683 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
684# endif
685 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
686 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
687 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
688
689 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
690 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
691 return VINF_SUCCESS;
692
693 /* darn, someone raced in on us. */
694 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
695 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
696# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
697 //pCritSect->s.Core.cNestings = 1;
698 Assert(pCritSect->s.Core.cNestings == 1);
699# else
700 //Assert(pCritSect->s.Core.cNestings == 0);
701 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
702# endif
703 }
704 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
705
706 /*
707 * Queue the request.
708 */
709 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
710 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
711 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
712 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
713 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
714 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
715 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
716 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
717 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
718 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
719 }
720#endif /* IN_RING0 || IN_RC */
721
722 return VINF_SUCCESS;
723}
724
725
726#if defined(IN_RING0) || defined(IN_RING3)
727/**
728 * Schedule a event semaphore for signalling upon critsect exit.
729 *
730 * @returns VINF_SUCCESS on success.
731 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
732 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
733 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
734 *
735 * @param pCritSect The critical section.
736 * @param hEventToSignal The support driver event semaphore that should be
737 * signalled.
738 */
739VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
740{
741 AssertPtr(pCritSect);
742 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
743 Assert(hEventToSignal != NIL_SUPSEMEVENT);
744# ifdef IN_RING3
745 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
746 return VERR_NOT_OWNER;
747# endif
748 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
749 || pCritSect->s.hEventToSignal == hEventToSignal))
750 {
751 pCritSect->s.hEventToSignal = hEventToSignal;
752 return VINF_SUCCESS;
753 }
754 return VERR_TOO_MANY_SEMAPHORES;
755}
756#endif /* IN_RING0 || IN_RING3 */
757
758
759/**
760 * Checks the caller is the owner of the critical section.
761 *
762 * @returns true if owner.
763 * @returns false if not owner.
764 * @param pCritSect The critical section.
765 */
766VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
767{
768#ifdef IN_RING3
769 return RTCritSectIsOwner(&pCritSect->s.Core);
770#else
771 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
772 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
773 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
774 return false;
775 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
776 || pCritSect->s.Core.cNestings > 1;
777#endif
778}
779
780
781/**
782 * Checks the specified VCPU is the owner of the critical section.
783 *
784 * @returns true if owner.
785 * @returns false if not owner.
786 * @param pCritSect The critical section.
787 * @param pVCpu The cross context virtual CPU structure.
788 */
789VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPUCC pVCpu)
790{
791#ifdef IN_RING3
792 NOREF(pVCpu);
793 return RTCritSectIsOwner(&pCritSect->s.Core);
794#else
795 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
796 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
797 return false;
798 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
799 || pCritSect->s.Core.cNestings > 1;
800#endif
801}
802
803
804/**
805 * Checks if anyone is waiting on the critical section we own.
806 *
807 * @returns true if someone is waiting.
808 * @returns false if no one is waiting.
809 * @param pCritSect The critical section.
810 */
811VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
812{
813 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
814 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
815 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
816}
817
818
819/**
820 * Checks if a critical section is initialized or not.
821 *
822 * @returns true if initialized.
823 * @returns false if not initialized.
824 * @param pCritSect The critical section.
825 */
826VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
827{
828 return RTCritSectIsInitialized(&pCritSect->s.Core);
829}
830
831
832/**
833 * Gets the recursion depth.
834 *
835 * @returns The recursion depth.
836 * @param pCritSect The critical section.
837 */
838VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
839{
840 return RTCritSectGetRecursion(&pCritSect->s.Core);
841}
842
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette